diff --git a/.buildkite/Dockerfile b/.buildkite/Dockerfile new file mode 100644 index 000000000..b497446fc --- /dev/null +++ b/.buildkite/Dockerfile @@ -0,0 +1,14 @@ +ARG NODE_VERSION=${NODE_VERSION:-22} +FROM node:$NODE_VERSION + +# Install required tools +RUN apt-get clean -y && \ + apt-get -qy update && \ + apt-get -y install zip && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* + +WORKDIR /usr/src/app + +COPY package.json . +RUN npm install diff --git a/.buildkite/Dockerfile-make b/.buildkite/Dockerfile-make new file mode 100644 index 000000000..4d0712c4d --- /dev/null +++ b/.buildkite/Dockerfile-make @@ -0,0 +1,27 @@ +ARG NODE_VERSION=${NODE_VERSION:-22} +FROM node:$NODE_VERSION + +ARG BUILDER_UID=1000 +ARG BUILDER_GID=1000 +ENV BUILDER_USER elastic +ENV BUILDER_GROUP elastic + +# install zip util +RUN apt-get clean -y && \ + apt-get update -y && \ + apt-get install -y zip + +# Set user permissions and directory +RUN (id -g ${BUILDER_GID} || groupadd --system -g ${BUILDER_GID} ${BUILDER_GROUP}) \ + && (id -u ${BUILDER_UID} || useradd --system --shell /bin/bash -u ${BUILDER_UID} -g ${BUILDER_GID} -m elastic) \ + && mkdir -p /usr/src/elasticsearch-js \ + && chown -R ${BUILDER_UID}:${BUILDER_GID} /usr/src/ + +WORKDIR /usr/src/elasticsearch-js + +# run remainder of commands as non-root user +USER ${BUILDER_UID}:${BUILDER_GID} + +# install dependencies +COPY package.json . +RUN npm install diff --git a/.ci/certs/ca.crt b/.buildkite/certs/ca.crt similarity index 100% rename from .ci/certs/ca.crt rename to .buildkite/certs/ca.crt diff --git a/.ci/certs/ca.key b/.buildkite/certs/ca.key similarity index 100% rename from .ci/certs/ca.key rename to .buildkite/certs/ca.key diff --git a/.buildkite/certs/testnode.crt b/.buildkite/certs/testnode.crt new file mode 100755 index 000000000..0a6e76430 --- /dev/null +++ b/.buildkite/certs/testnode.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDYjCCAkqgAwIBAgIVAIClHav09e9XGWJrnshywAjUHTnXMA0GCSqGSIb3DQEB +CwUAMDQxMjAwBgNVBAMTKUVsYXN0aWMgQ2VydGlmaWNhdGUgVG9vbCBBdXRvZ2Vu +ZXJhdGVkIENBMB4XDTIzMDMyODE3MDIzOVoXDTI2MDMyNzE3MDIzOVowEzERMA8G +A1UEAxMIaW5zdGFuY2UwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCV ++t5/g6u2r3awCtzqp17KG0hRxzkVoJoF8DYzVh+Rv9ymxQW0C/U8dQihAjkZHaIA +n49lSyNLkwWtmqQgPcimV4d6XuTYx2ahDixXYtjmoOSwH5dRtovKPCNKDPkUj9Vq +NwMW0uB1VxniMKI4DnYFqBgHL9kQKhQqvas6Gx0X6ptGRCLYCtVxeFcau6nnkZJt +urb+HNV5waOh0uTmsqnnslK3NjCQ/f030vPKxM5fOqOU5ajUHpZFJ6ZFmS32074H +l+mZoRT/GtbnVtIg+CJXsWThF3/L4iBImv+rkY9MKX5fyMLJgmIJG68S90IQGR8c +Z2lZYzC0J7zjMsYlODbDAgMBAAGjgYswgYgwHQYDVR0OBBYEFIDIcECn3AVHc3jk +MpQ4r7Kc3WCsMB8GA1UdIwQYMBaAFJYCWKn16g+acbing4Vl45QGUBs0MDsGA1Ud +EQQ0MDKCCWxvY2FsaG9zdIIIaW5zdGFuY2WHBH8AAAGHEAAAAAAAAAAAAAAAAAAA +AAGCA2VzMTAJBgNVHRMEAjAAMA0GCSqGSIb3DQEBCwUAA4IBAQBtX3RQ5ATpfORM +lrnhaUPGOWkjnb3p3BrdAWUaWoh136QhaXqxKiALQQhTtTerkXOcuquy9MmAyYvS +9fDdGvLCAO8pPCXjnzonCHerCLGdS7f/eqvSFWCdy7LPHzTAFYfVWVvbZed+83TL +bDY63AMwIexj34vJEStMapuFwWx05fstE8qZWIbYCL87sF5H/MRhzlz3ScAhQ1N7 +tODH7zvLzSxFGGEzCIKZ0iPFKbd3Y0wE6SptDSKhOqlnC8kkNeI2GjWsqVfHKsoF +pDFmri7IfOucuvalXJ6xiHPr9RDbuxEXs0u8mteT5nFQo7EaEGdHpg1pNGbfBOzP +lmj/dRS9 +-----END CERTIFICATE----- diff --git a/.buildkite/certs/testnode.key b/.buildkite/certs/testnode.key new file mode 100755 index 000000000..a9de563c8 --- /dev/null +++ b/.buildkite/certs/testnode.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAlfref4Ortq92sArc6qdeyhtIUcc5FaCaBfA2M1Yfkb/cpsUF +tAv1PHUIoQI5GR2iAJ+PZUsjS5MFrZqkID3IpleHel7k2MdmoQ4sV2LY5qDksB+X +UbaLyjwjSgz5FI/VajcDFtLgdVcZ4jCiOA52BagYBy/ZECoUKr2rOhsdF+qbRkQi +2ArVcXhXGrup55GSbbq2/hzVecGjodLk5rKp57JStzYwkP39N9LzysTOXzqjlOWo +1B6WRSemRZkt9tO+B5fpmaEU/xrW51bSIPgiV7Fk4Rd/y+IgSJr/q5GPTCl+X8jC +yYJiCRuvEvdCEBkfHGdpWWMwtCe84zLGJTg2wwIDAQABAoIBAAEP7HYNNnDWdYMD ++WAtYM12X/W5s/wUP94juaBI4u4iZH2EZodlixEdZUCTXgq43WsDUhxX05s7cE+p +H5DuSCHtoo2WHvGKAposwRDm2f3YVWQ2Xyb2ahNt69LYHHWrO+XQ60YYTa3r8Gn3 +7dFR3I016/jyn5DeEVaglvS1dfj2UG4ybR4KkMfcKd94X0rKvz3wzAhHIh+hwMtv +sVk7V4vSnKf2mJXwIVECTolnEJEkCjWjjymgUJYKT8yN7JnAsHRcvMa6kWwIGrLp +oQCEaJwYM6ynCRS989pLt3vA2iu5VkYhiHXJ9Ds/5b5yzhzmj+ymzKbFKrrUUrmn ++2Jp1K0CgYEAw8BchALsD/+JuoXjinA14MH7PZjIsXyhtPk+c4pk42iMNyg1J8XF +Y/ITepLYsl2bZqQI1jOJdDqsTwIsva9r749lsmkYI3VOxhi7+qBK0sThR66C87lX +iU2QpnZ9NloC6ort4a3MEvZ/gRQcXdBrNlNoza2p7PHAVDTnsdSrNKUCgYEAxCQV +uo85oZyfnMufn/gcI9IeYOgiB0tO3a8cAFX2wQW1y935t6Z13ApUQc4EnCOH7ZBc +td5kT+xGdRWnfPZ38FM1dd5MBdGE69s3q8pJDUExSgNLqaF6/5bD32qui66L3ugu +eMjxrzqJsc2uQTPCs18SGsyRmf54DpY8HglOmUcCgYAGRDgx+a347SNJl1OrcOAo +q80RMbzrAaRjmL8JD9se9I/YjC73cPtasbsx51WMkDaTWJj30nqJ//7YIKeyAtWf +u6Vzyq19JRo6eTw7T7pVePwFQW7rwnks6hDBY3WqscL6IyxuVxP7X2zBgxVNY4ir +Gox2WSLhdPPFPlRUewxoCQKBgAJvqE1u5fpZ5ame5dao0ECppXLyrymEB/C88g4X +Az+WgJGNqkJbsO8QuccvdeMylcefmWcw4fIULzPZFwF4VjkH74wNPMh9t7buPBzI +IGwnuSMAM3ph5RMzni8yNgTKIDaej6U0abwRcBBjS5zHtc1giusGS3CsNnWH7Cs7 +VlyVAoGBAK+prq9t9x3tC3NfCZH8/Wfs/X0T1qm11RiL5+tOhmbguWAqSSBy8OjX +Yh8AOXrFuMGldcaTXxMeiKvI2cyybnls1MFsPoeV/fSMJbex7whdeJeTi66NOSKr +oftUHvkHS0Vv/LicMEOufFGslb4T9aPJ7oyhoSlz9CfAutDWk/q/ +-----END RSA PRIVATE KEY----- diff --git a/.ci/certs/testnode_san.crt b/.buildkite/certs/testnode_san.crt similarity index 100% rename from .ci/certs/testnode_san.crt rename to .buildkite/certs/testnode_san.crt diff --git a/.ci/certs/testnode_san.key b/.buildkite/certs/testnode_san.key similarity index 100% rename from .ci/certs/testnode_san.key rename to .buildkite/certs/testnode_san.key diff --git a/.ci/functions/cleanup.sh b/.buildkite/functions/cleanup.sh old mode 100644 new mode 100755 similarity index 96% rename from .ci/functions/cleanup.sh rename to .buildkite/functions/cleanup.sh index 4c25166fb..98dfe4e14 --- a/.ci/functions/cleanup.sh +++ b/.buildkite/functions/cleanup.sh @@ -2,7 +2,7 @@ # # Shared cleanup routines between different steps # -# Please source .ci/functions/imports.sh as a whole not just this file +# Please source .buildkite/functions/imports.sh as a whole not just this file # # Version 1.0.0 # - Initial version after refactor diff --git a/.ci/functions/imports.sh b/.buildkite/functions/imports.sh old mode 100644 new mode 100755 similarity index 85% rename from .ci/functions/imports.sh rename to .buildkite/functions/imports.sh index e022a3be9..c05f36826 --- a/.ci/functions/imports.sh +++ b/.buildkite/functions/imports.sh @@ -18,7 +18,7 @@ require_stack_version if [[ -z $es_node_name ]]; then # only set these once set -euo pipefail - export TEST_SUITE=${TEST_SUITE-oss} + export TEST_SUITE=${TEST_SUITE-free} export RUNSCRIPTS=${RUNSCRIPTS-} export DETACH=${DETACH-false} export CLEANUP=${CLEANUP-false} @@ -26,11 +26,11 @@ if [[ -z $es_node_name ]]; then export es_node_name=instance export elastic_password=changeme export elasticsearch_image=elasticsearch - export elasticsearch_url=https://elastic:${elastic_password}@${es_node_name}:9200 - if [[ $TEST_SUITE != "xpack" ]]; then - export elasticsearch_image=elasticsearch-${TEST_SUITE} - export elasticsearch_url=http://${es_node_name}:9200 + export elasticsearch_scheme="https" + if [[ $TEST_SUITE != "platinum" ]]; then + export elasticsearch_scheme="http" fi + export elasticsearch_url=${elasticsearch_scheme}://elastic:${elastic_password}@${es_node_name}:9200 export external_elasticsearch_url=${elasticsearch_url/$es_node_name/localhost} export elasticsearch_container="${elasticsearch_image}:${STACK_VERSION}" diff --git a/.ci/functions/wait-for-container.sh b/.buildkite/functions/wait-for-container.sh old mode 100644 new mode 100755 similarity index 94% rename from .ci/functions/wait-for-container.sh rename to .buildkite/functions/wait-for-container.sh index 1a721b588..bbbf4ea63 --- a/.ci/functions/wait-for-container.sh +++ b/.buildkite/functions/wait-for-container.sh @@ -2,7 +2,7 @@ # # Exposes a routine scripts can call to wait for a container if that container set up a health command # -# Please source .ci/functions/imports.sh as a whole not just this file +# Please source .buildkite/functions/imports.sh as a whole not just this file # # Version 1.0.1 # - Initial version after refactor diff --git a/.buildkite/make.mjs b/.buildkite/make.mjs new file mode 100644 index 000000000..ddc91d01f --- /dev/null +++ b/.buildkite/make.mjs @@ -0,0 +1,126 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* global $ argv */ + +'use strict' + +import 'zx/globals' + +import { readFile, writeFile } from 'fs/promises' +import assert from 'assert' +import { join } from 'desm' +import semver from 'semver' + +// xz/globals loads minimist-parsed args as a global `argv`, but it +// interprets args like '8.10' as numbers and shortens them to '8.1'. +// so we have to import and configure minimist ourselves. +import minimist from 'minimist' +const argv = minimist(process.argv.slice(2), { string: ['_', 'task'] }) +assert(typeof argv.task === 'string', 'Missing task parameter') + +switch (argv.task) { + case 'release': + release(argv._).catch(onError) + break + case 'bump': + bump(argv._).catch(onError) + break + case 'codegen': + codegen(argv._).catch(onError) + break + default: + console.log(`Unknown task: ${argv.task}`) + process.exit(1) +} + +async function release (args) { + assert(args.length === 2, 'Release task expects two parameters') + let [version, outputFolder] = args + + if (process.env.WORKFLOW === 'snapshot' && !version.endsWith('SNAPSHOT')) { + version = `${version}-SNAPSHOT` + } + + await bump([version]) + + const packageJson = JSON.parse(await readFile( + join(import.meta.url, '..', 'package.json'), + 'utf8' + )) + + await $`npm run build` + await $`npm pack` + await $`zip elasticsearch-js-${version}.zip elastic-elasticsearch-${packageJson.version}.tgz` + await $`rm elastic-elasticsearch-${packageJson.version}.tgz` + await $`mv ${join(import.meta.url, '..', `elasticsearch-js-${version}.zip`)} ${join(import.meta.url, '..', outputFolder, `elasticsearch-js-${version}.zip`)}` +} + +async function bump (args) { + assert(args.length === 1, 'Bump task expects one parameter') + let [version] = args + const packageJson = JSON.parse(await readFile( + join(import.meta.url, '..', 'package.json'), + 'utf8' + )) + + if (version.split('.').length === 2) version = `${version}.0` + const cleanVersion = semver.clean(version.includes('SNAPSHOT') ? version.split('-')[0] : version) + assert(semver.valid(cleanVersion), `${cleanVersion} is not seen as a valid semver version. raw version: ${version}`) + packageJson.version = cleanVersion + packageJson.versionCanary = `${cleanVersion}-canary.0` + + await writeFile( + join(import.meta.url, '..', 'package.json'), + JSON.stringify(packageJson, null, 2), + 'utf8' + ) + + const pipeline = await readFile(join(import.meta.url, '..', '.buildkite', 'pipeline.yml'), 'utf8') + await writeFile( + join(import.meta.url, '..', '.buildkite', 'pipeline.yml'), + pipeline.replace(/STACK_VERSION: [0-9]+[0-9\.]*[0-9](?:\-SNAPSHOT)?/, `STACK_VERSION: ${cleanVersion}`), + 'utf8' + ) +} + +// this command can only be executed locally for now +async function codegen (args) { + assert(args.length === 1, 'Codegen task expects one parameter') + const version = args[0].toString() + + const clientGeneratorPath = join(import.meta.url, '..', '..', 'elastic-client-generator-js') + const isGeneratorCloned = await $`[[ -d ${clientGeneratorPath} ]]`.exitCode === 0 + assert(isGeneratorCloned, 'You must clone the elastic-client-generator-js first') + + await $`npm install --prefix ${clientGeneratorPath}` + + // generate elasticsearch client. this command will take a while! + if (version === 'main') { + await $`npm run elasticsearch --prefix ${clientGeneratorPath} -- --version main` + } else { + await $`npm run elasticsearch --prefix ${clientGeneratorPath} -- --version ${version.split('.').slice(0, 2).join('.')}` + } + // clean up fixable linter issues + await $`npm run fix --prefix ${clientGeneratorPath}` + + await $`rm -rf ${join(import.meta.url, '..', 'src', 'api')}` + await $`mkdir ${join(import.meta.url, '..', 'src', 'api')}` + await $`cp -R ${join(import.meta.url, '..', '..', 'elastic-client-generator-js', 'output')}/* ${join(import.meta.url, '..', 'src', 'api')}` + await $`mv ${join(import.meta.url, '..', 'src', 'api', 'reference.md')} ${join(import.meta.url, '..', 'docs', 'reference', 'api-reference.md')}` + await $`npm run build` + + // run docs example generation + if (version === 'main') { + await $`node ./scripts/generate-docs-examples.js` + } else { + await $`node ./scripts/generate-docs-examples.js ${version.split('.').slice(0, 2).join('.')}` + } +} + +function onError (err) { + console.log(err) + process.exit(1) +} diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml new file mode 100644 index 000000000..e664f03d9 --- /dev/null +++ b/.buildkite/pipeline.yml @@ -0,0 +1,32 @@ +--- +agents: + provider: "gcp" + image: family/core-ubuntu-2204 + memory: "8G" + cpu: "2" + +steps: + - label: ":elasticsearch: :javascript: ES JavaScript ({{ matrix.nodejs }})" + env: + NODE_VERSION: "{{ matrix.nodejs }}" + TEST_SUITE: "platinum" + STACK_VERSION: 9.1.0 + GITHUB_TOKEN_PATH: "secret/ci/elastic-elasticsearch-js/github-token" + TEST_ES_STACK: "1" + matrix: + setup: + nodejs: + - "20" + - "22" + - "24" + command: ./.buildkite/run-tests.sh + artifact_paths: "./junit-output/junit-*.xml" + - wait: ~ + continue_on_failure: true + - label: ":junit: Test results" + plugins: + - junit-annotate#v2.7.0: + artifacts: "junit-output/junit-*.xml" + job-uuid-file-pattern: "junit-(.*).xml" + fail-build-on-error: true + failure-format: file diff --git a/.buildkite/pull-requests.json b/.buildkite/pull-requests.json new file mode 100644 index 000000000..59c46cd87 --- /dev/null +++ b/.buildkite/pull-requests.json @@ -0,0 +1,19 @@ +{ + "jobs": [ + { + "enabled": true, + "pipeline_slug": "elasticsearch-js-integration-tests", + "allowed_repo_permissions": ["admin", "write"], + "build_on_commit": true, + "skip_ci_on_only_changed": [ + "\\.md$", + "\\.asciidoc$", + "^docs\\/", + "^scripts\\/", + "^catalog-info\\.yaml$", + "^test\\/unit\\/", + "^\\.github\\/" + ] + } + ] +} diff --git a/.buildkite/run-client.sh b/.buildkite/run-client.sh new file mode 100755 index 000000000..f210219a2 --- /dev/null +++ b/.buildkite/run-client.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash +# +# Once called Elasticsearch should be up and running +# +script_path=$(dirname "$(realpath -s "$0")") +set -euo pipefail +repo=$(pwd) + +export NODE_VERSION=${NODE_VERSION:-20} + +echo "--- :javascript: Building Docker image" +docker build \ + --file "$script_path/Dockerfile" \ + --tag elastic/elasticsearch-js \ + --build-arg NODE_VERSION="$NODE_VERSION" \ + . + +GITHUB_TOKEN=$(vault read -field=token "$GITHUB_TOKEN_PATH") +export GITHUB_TOKEN + +echo "--- :javascript: Running tests" +mkdir -p "$repo/junit-output" +docker run \ + --network="${network_name}" \ + --env TEST_ES_STACK \ + --env STACK_VERSION \ + --env GITHUB_TOKEN \ + --env "TEST_ES_SERVER=${elasticsearch_url}" \ + --env "ELASTIC_PASSWORD=${elastic_password}" \ + --env "ELASTIC_USER=elastic" \ + --env "BUILDKITE=true" \ + --volume "/usr/src/app/node_modules" \ + --volume "$repo:/usr/src/app" \ + --volume "$repo/junit-output:/junit-output" \ + --name elasticsearch-js \ + --rm \ + elastic/elasticsearch-js \ + bash -c "npm run test:integration; [ -f ./report-junit.xml ] && mv ./report-junit.xml /junit-output/junit-$BUILDKITE_JOB_ID.xml || echo 'No JUnit artifact found'" diff --git a/.buildkite/run-elasticsearch.sh b/.buildkite/run-elasticsearch.sh new file mode 100755 index 000000000..d9e0c6fd3 --- /dev/null +++ b/.buildkite/run-elasticsearch.sh @@ -0,0 +1,155 @@ +#!/usr/bin/env bash +# +# Launch one or more Elasticsearch nodes via the Docker image, +# to form a cluster suitable for running the REST API tests. +# +# Export the STACK_VERSION variable, eg. '8.0.0-SNAPSHOT'. +# Export the TEST_SUITE variable, eg. 'free' or 'platinum' defaults to 'free'. +# Export the NUMBER_OF_NODES variable to start more than 1 node + +# Version 1.6.1 +# - Initial version of the run-elasticsearch.sh script +# - Deleting the volume should not dependent on the container still running +# - Fixed `ES_JAVA_OPTS` config +# - Moved to STACK_VERSION and TEST_VERSION +# - Refactored into functions and imports +# - Support NUMBER_OF_NODES +# - Added 5 retries on docker pull for fixing transient network errors +# - Added flags to make local CCR configurations work +# - Added action.destructive_requires_name=false as the default will be true in v8 +# - Added ingest.geoip.downloader.enabled=false as it causes false positives in testing +# - Moved ELASTIC_PASSWORD and xpack.security.enabled to the base arguments for "Security On by default" +# - Use https only when TEST_SUITE is "platinum", when "free" use http +# - Set xpack.security.enabled=false for "free" and xpack.security.enabled=true for "platinum" + +script_path=$(dirname $(realpath -s $0)) +source $script_path/functions/imports.sh +set -euo pipefail + +echo -e "\033[34;1mINFO:\033[0m Take down node if called twice with the same arguments (DETACH=true) or on separate terminals \033[0m" +cleanup_node $es_node_name + +master_node_name=${es_node_name} +cluster_name=${moniker}${suffix} + +# Set vm.max_map_count kernel setting to 262144 +if [ "$(sysctl vm.max_map_count)" != 'vm.max_map_count = 262144' ]; then + echo "vm.max_map_count may be too low. resetting." + sudo sysctl -w vm.max_map_count=262144 +fi + +declare -a volumes +environment=($(cat <<-END + --env ELASTIC_PASSWORD=$elastic_password + --env node.name=$es_node_name + --env cluster.name=$cluster_name + --env cluster.initial_master_nodes=$master_node_name + --env discovery.seed_hosts=$master_node_name + --env cluster.routing.allocation.disk.threshold_enabled=false + --env bootstrap.memory_lock=true + --env node.attr.testattr=test + --env path.repo=/tmp + --env repositories.url.allowed_urls=http://snapshot.test* + --env action.destructive_requires_name=false + --env ingest.geoip.downloader.enabled=false + --env cluster.deprecation_indexing.enabled=false +END +)) +if [[ "$TEST_SUITE" == "platinum" ]]; then + environment+=($(cat <<-END + --env xpack.security.enabled=true + --env xpack.license.self_generated.type=trial + --env xpack.security.http.ssl.enabled=true + --env xpack.security.http.ssl.verification_mode=certificate + --env xpack.security.http.ssl.key=certs/testnode.key + --env xpack.security.http.ssl.certificate=certs/testnode.crt + --env xpack.security.http.ssl.certificate_authorities=certs/ca.crt + --env xpack.security.transport.ssl.enabled=true + --env xpack.security.transport.ssl.verification_mode=certificate + --env xpack.security.transport.ssl.key=certs/testnode.key + --env xpack.security.transport.ssl.certificate=certs/testnode.crt + --env xpack.security.transport.ssl.certificate_authorities=certs/ca.crt +END +)) + volumes+=($(cat <<-END + --volume $ssl_cert:/usr/share/elasticsearch/config/certs/testnode.crt + --volume $ssl_key:/usr/share/elasticsearch/config/certs/testnode.key + --volume $ssl_ca:/usr/share/elasticsearch/config/certs/ca.crt +END +)) +else + environment+=($(cat <<-END + --env node.roles=data,data_cold,data_content,data_frozen,data_hot,data_warm,ingest,master,ml,remote_cluster_client,transform + --env xpack.security.enabled=false + --env xpack.security.http.ssl.enabled=false +END +)) +fi + +cert_validation_flags="" +if [[ "$TEST_SUITE" == "platinum" ]]; then + cert_validation_flags="--insecure --cacert /usr/share/elasticsearch/config/certs/ca.crt --resolve ${es_node_name}:443:127.0.0.1" +fi + +echo "--- :elasticsearch: Environment setup" +echo "TEST_SUITE: $TEST_SUITE" +echo "Elasticsearch URL: $elasticsearch_url" +echo "Elasticsearch External URL: $external_elasticsearch_url" + + +echo "--- :elasticsearch: Running container" +# Pull the container, retry on failures up to 5 times with +# short delays between each attempt. Fixes most transient network errors. +docker_pull_attempts=0 +until [ "$docker_pull_attempts" -ge 5 ] +do + docker pull docker.elastic.co/elasticsearch/"$elasticsearch_container" && break + docker_pull_attempts=$((docker_pull_attempts+1)) + echo "Failed to pull image, retrying in 10 seconds (retry $docker_pull_attempts/5)..." + sleep 10 +done + +NUMBER_OF_NODES=${NUMBER_OF_NODES-1} +http_port=9200 +for (( i=0; i<$NUMBER_OF_NODES; i++, http_port++ )); do + node_name=${es_node_name}$i + node_url=${external_elasticsearch_url/9200/${http_port}}$i + if [[ "$i" == "0" ]]; then node_name=$es_node_name; fi + environment+=($(cat <<-END + --env node.name=$node_name +END +)) + echo "$i: $http_port $node_url " + volume_name=${node_name}-${suffix}-data + volumes+=($(cat <<-END + --volume $volume_name:/usr/share/elasticsearch/data${i} +END +)) + + # make sure we detach for all but the last node if DETACH=false (default) so all nodes are started + local_detach="true" + if [[ "$i" == "$((NUMBER_OF_NODES-1))" ]]; then local_detach=$DETACH; fi + echo -e "\033[34;1mINFO:\033[0m Starting container $node_name \033[0m" + set -x + docker run \ + --name "$node_name" \ + --network "$network_name" \ + --env "ES_JAVA_OPTS=-Xms1g -Xmx1g -da:org.elasticsearch.xpack.ccr.index.engine.FollowingEngineAssertions" \ + "${environment[@]}" \ + "${volumes[@]}" \ + --publish "$http_port":9200 \ + --ulimit nofile=65536:65536 \ + --ulimit memlock=-1:-1 \ + --detach="$local_detach" \ + --health-cmd="curl $cert_validation_flags --fail $elasticsearch_url/_cluster/health || exit 1" \ + --health-interval=2s \ + --health-retries=20 \ + --health-timeout=2s \ + --rm \ + docker.elastic.co/elasticsearch/"$elasticsearch_container"; + + set +x + if wait_for_container "$es_node_name" "$network_name"; then + echo -e "\033[32;1mSUCCESS:\033[0m Running on: $node_url\033[0m" + fi +done diff --git a/.buildkite/run-tests.sh b/.buildkite/run-tests.sh new file mode 100755 index 000000000..d9aa181af --- /dev/null +++ b/.buildkite/run-tests.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +# +# Script to run Elasticsearch container and Elasticsearch client integration tests on Buildkite +# +# Version 0.1 +# +script_path=$(dirname "$(realpath -s "$0")") +source "$script_path/functions/imports.sh" + +set -euo pipefail + +echo "--- :elasticsearch: Starting Elasticsearch" +DETACH=true bash "$script_path/run-elasticsearch.sh" + +echo "+++ :javascript: Run Client" +bash "$script_path/run-client.sh" diff --git a/.ci/Dockerfile b/.ci/Dockerfile deleted file mode 100644 index b25ea6f66..000000000 --- a/.ci/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -ARG NODE_JS_VERSION=10 -FROM node:${NODE_JS_VERSION} - -# Create app directory -WORKDIR /usr/src/app - -# Install app dependencies -COPY package*.json ./ -RUN npm install - -COPY . . diff --git a/.ci/certs/testnode.crt b/.ci/certs/testnode.crt deleted file mode 100755 index a49dfd775..000000000 --- a/.ci/certs/testnode.crt +++ /dev/null @@ -1,19 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDIzCCAgugAwIBAgIVAMTO6uVx9dLox2t0lY4IcBKZXb5WMA0GCSqGSIb3DQEB -CwUAMDQxMjAwBgNVBAMTKUVsYXN0aWMgQ2VydGlmaWNhdGUgVG9vbCBBdXRvZ2Vu -ZXJhdGVkIENBMB4XDTIwMDIyNjA1NTA1OVoXDTIzMDIyNTA1NTA1OVowEzERMA8G -A1UEAxMIaW5zdGFuY2UwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDK -YLTOikVENiN/qYupOsoXd7VYYnryyfCC/dK4FC2aozkbqjFzBdvPGAasoc4yEiH5 -CGeXMgJuOjk1maqetmdIsw00j4oHJviYsnGXzxxS5swhD7spcW4Uk4V4tAUzrbfT -vW/2WW/yYCLe5phVb2chz0jL+WYb4bBmdfs/t6RtP9RqsplYAmVp3gZ6lt2YNtvE -k9gz0TVk3DuO1TquIClfRYUjuywS6xDSvxJ8Jl91EfDWM8QU+9F+YAtiv74xl2U3 -P0wwMqNvMxf9/3ak3lTQGsgO4L6cwbKpVLMMzxSVunZz/sgl19xy3qHHz1Qr2MjJ -/2c2J7vahUL4NPRkjJClAgMBAAGjTTBLMB0GA1UdDgQWBBS2Wn8E2VZv4oenY+pR -O8G3zfQXhzAfBgNVHSMEGDAWgBSWAlip9eoPmnG4p4OFZeOUBlAbNDAJBgNVHRME -AjAAMA0GCSqGSIb3DQEBCwUAA4IBAQAvwPvCiJJ6v9jYcyvYY8I3gP0oCwrylpRL -n91UlgRSHUmuAObyOoVN5518gSV/bTU2SDrstcLkLFxHvnfpoGJoxsQEHuGxwDRI -nhYNd62EKLerehNM/F9ILKmvTh8f6QPCzjUuExTXv+63l2Sr6dBS7FHsGs6UKUYO -llM/y9wMZ1LCuZuBg9RhtgpFXRSgDM9Z7Begu0d/BPX9od/qAeZg9Arz4rwUiCN4 -IJOMEBEPi5q1tgeS0Fb1Grpqd0Uz5tZKtEHNKzLG+zSMmkneL62Nk2HsmEFZKwzg -u2pU42UaUE596G6o78s1aLn9ICcElPHTjiuZNSiyuu9IzvFDjGQw ------END CERTIFICATE----- diff --git a/.ci/certs/testnode.key b/.ci/certs/testnode.key deleted file mode 100755 index 82efeecb9..000000000 --- a/.ci/certs/testnode.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEogIBAAKCAQEAymC0zopFRDYjf6mLqTrKF3e1WGJ68snwgv3SuBQtmqM5G6ox -cwXbzxgGrKHOMhIh+QhnlzICbjo5NZmqnrZnSLMNNI+KByb4mLJxl88cUubMIQ+7 -KXFuFJOFeLQFM623071v9llv8mAi3uaYVW9nIc9Iy/lmG+GwZnX7P7ekbT/UarKZ -WAJlad4GepbdmDbbxJPYM9E1ZNw7jtU6riApX0WFI7ssEusQ0r8SfCZfdRHw1jPE -FPvRfmALYr++MZdlNz9MMDKjbzMX/f92pN5U0BrIDuC+nMGyqVSzDM8Ulbp2c/7I -Jdfcct6hx89UK9jIyf9nNie72oVC+DT0ZIyQpQIDAQABAoIBADAh7f7NjgnaInlD -ds8KB3SraPsbeQhzlPtiqRJU4j/MIFH/GYG03AGWQkget67a9y+GmzSvlTpoKKEh -6h2TXl9BDpv4o6ht0WRn1HJ5tM/Wyqf2WNpTew3zxCPgFPikkXsPrChYPzLTQJfp -GkP/mfTFmxfAOlPZSp4j41zVLYs53eDkAegFPVfKSr1XNNJ3QODLPcIBfxBYsiC9 -oU+jRW8xYuj31cEl5k5UqrChJ1rm3mt6cguqXKbISuoSvi13gXI6DccqhuLAU+Kr -ib2XYrRP+pWocZo/pM9WUVoNGtFxfY88sAQtvG6gDKo2AURtFyq84Ow0h9mdixV/ -gRIDPcECgYEA5nEqE3OKuG9WuUFGXvjtn4C0F6JjflYWh7AbX51S4F6LKrW6/XHL -Rg4BtF+XReT7OQ6llsV8kZeUxsUckkgDLzSaA8lysNDV5KkhAWHfRqH//QKFbqZi -JL9t3x63Qt81US8s2hQk3khPYTRM8ZB3xHiXvZYSGC/0x/DxfEO3QJECgYEA4NK5 -sxtrat8sFz6SK9nWEKimPjDVzxJ0hxdX4tRq/JdOO5RncawVqt6TNP9gTuxfBvhW -MhJYEsQj8iUoL1dxo9d1eP8HEANNV0iX5OBvJNmgBp+2OyRSyr+PA55+wAxYuAE7 -QKaitOjW57fpArNRt2hQyiSzTuqUFRWTWJHCWNUCgYAEurPTXF6vdFGCUc2g61jt -GhYYGhQSpq+lrz6Qksj9o9MVWE9zHh++21C7o+6V16I0RJGva3QoBMVf4vG4KtQt -5tV2WG8LI+4P2Ey+G4UajP6U8bVNVQrUmD0oBBhcvfn5JY+1Fg6/pRpD82/U0VMz -7AmpMWhDqNBMPiymkTk0kQKBgCuWb05cSI0ly4SOKwS5bRk5uVFhYnKNH255hh6C -FGP4acB/WzbcqC7CjEPAJ0nl5d6SExQOHmk1AcsWjR3wlCWxxiK5PwNJwJrlhh1n -reS1FKN0H36D4lFQpkeLWQOe4Sx7gKNeKzlr0w6Fx3Uwku0+Gju2tdTdAey8jB6l -08opAoGAEe1AuR/OFp2xw6V8TH9UHkkpGxy+OrXI6PX6tgk29PgB+uiMu4RwbjVz -1di1KKq2XecAilVbnyqY+edADxYGbSnci9x5wQRIebfMi3VXKtV8NQBv2as6qwtW -JDcQUWotOHjpdvmfJWWkcBhbAKrgX8ukww00ZI/lC3/rmkGnBBg= ------END RSA PRIVATE KEY----- diff --git a/.ci/docker/Dockerfile b/.ci/docker/Dockerfile deleted file mode 100644 index 2f37234ae..000000000 --- a/.ci/docker/Dockerfile +++ /dev/null @@ -1,7 +0,0 @@ -ARG NODE_JS_VERSION=10 -FROM node:${NODE_JS_VERSION}-alpine - -RUN apk --no-cache add git - -# Create app directory -WORKDIR /usr/src/app diff --git a/.ci/jobs/defaults.yml b/.ci/jobs/defaults.yml deleted file mode 100644 index 0e035d538..000000000 --- a/.ci/jobs/defaults.yml +++ /dev/null @@ -1,81 +0,0 @@ ---- - -##### GLOBAL METADATA - -- meta: - cluster: clients-ci - -##### JOB DEFAULTS - -- job: - project-type: matrix - logrotate: - daysToKeep: 30 - numToKeep: 100 - parameters: - - string: - name: branch_specifier - default: refs/heads/master - description: the Git branch specifier to build (<branchName>, <tagName>, - <commitId>, etc.) - properties: - - github: - url: https://github.com/elastic/elasticsearch-js/ - - inject: - properties-content: HOME=$JENKINS_HOME - concurrent: true - node: flyweight - scm: - - git: - name: origin - credentials-id: f6c7695a-671e-4f4f-a331-acdce44ff9ba - reference-repo: /var/lib/jenkins/.git-references/elasticsearch-js.git - branches: - - ${branch_specifier} - url: https://github.com/elastic/elasticsearch-js.git - basedir: '' - wipe-workspace: 'True' - triggers: - - github - vault: - # vault read auth/approle/role/clients-ci/role-id - role_id: ddbd0d44-0e51-105b-177a-c8fdfd445126 - axes: - - axis: - type: slave - name: label - values: - - linux - - axis: - type: yaml - filename: .ci/test-matrix.yml - name: STACK_VERSION - - axis: - type: yaml - filename: .ci/test-matrix.yml - name: NODE_JS_VERSION - - axis: - type: yaml - filename: .ci/test-matrix.yml - name: TEST_SUITE - yaml-strategy: - exclude-key: exclude - filename: .ci/test-matrix.yml - wrappers: - - ansicolor - - timeout: - type: absolute - timeout: 120 - fail: true - - timestamps - - workspace-cleanup - builders: - - shell: |- - #!/usr/local/bin/runbld - .ci/run-tests - publishers: - - email: - recipients: infra-root+build@elastic.co - - junit: - results: "**/*-junit.xml" - allow-empty-results: true diff --git a/.ci/jobs/elastic+elasticsearch-js+5.x.yml b/.ci/jobs/elastic+elasticsearch-js+5.x.yml deleted file mode 100644 index a2f6e51f9..000000000 --- a/.ci/jobs/elastic+elasticsearch-js+5.x.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -- job: - name: elastic+elasticsearch-js+5.x - display-name: 'elastic / elasticsearch-js # 5.x' - description: Testing the elasticsearch-js 5.x branch. - junit_results: "*-junit.xml" - parameters: - - string: - name: branch_specifier - default: refs/heads/5.x - description: the Git branch specifier to build (<branchName>, <tagName>, - <commitId>, etc.) - triggers: - - github - - timed: '@weekly' diff --git a/.ci/jobs/elastic+elasticsearch-js+6.x.yml b/.ci/jobs/elastic+elasticsearch-js+6.x.yml deleted file mode 100644 index 6990d1c9b..000000000 --- a/.ci/jobs/elastic+elasticsearch-js+6.x.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -- job: - name: elastic+elasticsearch-js+6.x - display-name: 'elastic / elasticsearch-js # 6.x' - description: Testing the elasticsearch-js 6.x branch. - junit_results: "*-junit.xml" - parameters: - - string: - name: branch_specifier - default: refs/heads/6.x - description: the Git branch specifier to build (<branchName>, <tagName>, - <commitId>, etc.) - triggers: - - github - - timed: '@weekly' diff --git a/.ci/jobs/elastic+elasticsearch-js+7.8.yml b/.ci/jobs/elastic+elasticsearch-js+7.8.yml deleted file mode 100644 index b11b25f37..000000000 --- a/.ci/jobs/elastic+elasticsearch-js+7.8.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -- job: - name: elastic+elasticsearch-js+7.8 - display-name: 'elastic / elasticsearch-js # 7.8' - description: Testing the elasticsearch-js 7.8 branch. - junit_results: "*-junit.xml" - parameters: - - string: - name: branch_specifier - default: refs/heads/7.8 - description: the Git branch specifier to build (<branchName>, <tagName>, - <commitId>, etc.) - triggers: - - github - - timed: '@daily' diff --git a/.ci/jobs/elastic+elasticsearch-js+7.x.yml b/.ci/jobs/elastic+elasticsearch-js+7.x.yml deleted file mode 100644 index 27a121d32..000000000 --- a/.ci/jobs/elastic+elasticsearch-js+7.x.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -- job: - name: elastic+elasticsearch-js+7.x - display-name: 'elastic / elasticsearch-js # 7.x' - description: Testing the elasticsearch-js 7.x branch. - junit_results: "*-junit.xml" - parameters: - - string: - name: branch_specifier - default: refs/heads/7.x - description: the Git branch specifier to build (<branchName>, <tagName>, - <commitId>, etc.) - triggers: - - github - - timed: '@daily' diff --git a/.ci/jobs/elastic+elasticsearch-js+master.yml b/.ci/jobs/elastic+elasticsearch-js+master.yml deleted file mode 100644 index 79a5929a2..000000000 --- a/.ci/jobs/elastic+elasticsearch-js+master.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -- job: - name: elastic+elasticsearch-js+master - display-name: 'elastic / elasticsearch-js # master' - description: Testing the elasticsearch-js master branch. - junit_results: "*-junit.xml" - parameters: - - string: - name: branch_specifier - default: refs/heads/master - description: the Git branch specifier to build (<branchName>, <tagName>, - <commitId>, etc.) - triggers: - - github - - timed: '@daily' diff --git a/.ci/jobs/elastic+elasticsearch-js+pull-request.yml b/.ci/jobs/elastic+elasticsearch-js+pull-request.yml deleted file mode 100644 index 722dfc62c..000000000 --- a/.ci/jobs/elastic+elasticsearch-js+pull-request.yml +++ /dev/null @@ -1,19 +0,0 @@ ---- -- job: - name: elastic+elasticsearch-js+pull-request - display-name: 'elastic / elasticsearch-js # pull-request' - description: Testing of elasticsearch-js pull requests. - junit_results: "*-junit.xml" - scm: - - git: - branches: - - ${ghprbActualCommit} - refspec: +refs/pull/*:refs/remotes/origin/pr/* - triggers: - - github-pull-request: - org-list: - - elastic - allow-whitelist-orgs-as-admins: true - github-hooks: true - status-context: clients-ci - cancel-builds-on-update: true diff --git a/.ci/packer_cache.sh b/.ci/packer_cache.sh deleted file mode 100644 index 41f3c12db..000000000 --- a/.ci/packer_cache.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env bash - -source /usr/local/bin/bash_standard_lib.sh - -DOCKER_IMAGES="node:14-alpine -node:12-alpine -node:10-alpine -node:8-alpine -" - -for di in ${DOCKER_IMAGES} -do -(retry 2 docker pull "${di}") || echo "Error pulling ${di} Docker image, we continue" -done - diff --git a/.ci/run-elasticsearch.sh b/.ci/run-elasticsearch.sh deleted file mode 100755 index f9d13906a..000000000 --- a/.ci/run-elasticsearch.sh +++ /dev/null @@ -1,115 +0,0 @@ -#!/usr/bin/env bash -# -# Launch one or more Elasticsearch nodes via the Docker image, -# to form a cluster suitable for running the REST API tests. -# -# Export the STACK_VERSION variable, eg. '8.0.0-SNAPSHOT'. -# Export the TEST_SUITE variable, eg. 'oss' or 'xpack' defaults to 'oss'. -# Export the NUMBER_OF_NODES variable to start more than 1 node - -# Version 1.1.0 -# - Initial version of the run-elasticsearch.sh script -# - Deleting the volume should not dependent on the container still running -# - Fixed `ES_JAVA_OPTS` config -# - Moved to STACK_VERSION and TEST_VERSION -# - Refactored into functions and imports -# - Support NUMBER_OF_NODES - -script_path=$(dirname $(realpath -s $0)) -source $script_path/functions/imports.sh -set -euo pipefail - -echo -e "\033[34;1mINFO:\033[0m Take down node if called twice with the same arguments (DETACH=true) or on seperate terminals \033[0m" -cleanup_node $es_node_name - -master_node_name=${es_node_name} -cluster_name=${moniker}${suffix} - -declare -a volumes -environment=($(cat <<-END - --env node.name=$es_node_name - --env cluster.name=$cluster_name - --env cluster.initial_master_nodes=$master_node_name - --env discovery.seed_hosts=$master_node_name - --env cluster.routing.allocation.disk.threshold_enabled=false - --env bootstrap.memory_lock=true - --env node.attr.testattr=test - --env path.repo=/tmp - --env repositories.url.allowed_urls=http://snapshot.test* -END -)) -if [[ "$TEST_SUITE" == "xpack" ]]; then - environment+=($(cat <<-END - --env ELASTIC_PASSWORD=$elastic_password - --env xpack.license.self_generated.type=trial - --env xpack.security.enabled=true - --env xpack.security.http.ssl.enabled=true - --env xpack.security.http.ssl.verification_mode=certificate - --env xpack.security.http.ssl.key=certs/testnode.key - --env xpack.security.http.ssl.certificate=certs/testnode.crt - --env xpack.security.http.ssl.certificate_authorities=certs/ca.crt - --env xpack.security.transport.ssl.enabled=true - --env xpack.security.transport.ssl.key=certs/testnode.key - --env xpack.security.transport.ssl.certificate=certs/testnode.crt - --env xpack.security.transport.ssl.certificate_authorities=certs/ca.crt -END -)) - volumes+=($(cat <<-END - --volume $ssl_cert:/usr/share/elasticsearch/config/certs/testnode.crt - --volume $ssl_key:/usr/share/elasticsearch/config/certs/testnode.key - --volume $ssl_ca:/usr/share/elasticsearch/config/certs/ca.crt -END -)) -fi - -cert_validation_flags="" -if [[ "$TEST_SUITE" == "xpack" ]]; then - cert_validation_flags="--insecure --cacert /usr/share/elasticsearch/config/certs/ca.crt --resolve ${es_node_name}:443:127.0.0.1" -fi - -NUMBER_OF_NODES=${NUMBER_OF_NODES-1} -http_port=9200 -for (( i=0; i<$NUMBER_OF_NODES; i++, http_port++ )); do - node_name=${es_node_name}$i - node_url=${external_elasticsearch_url/9200/${http_port}}$i - if [[ "$i" == "0" ]]; then node_name=$es_node_name; fi - environment+=($(cat <<-END - --env node.name=$node_name -END -)) - echo "$i: $http_port $node_url " - volume_name=${node_name}-${suffix}-data - volumes+=($(cat <<-END - --volume $volume_name:/usr/share/elasticsearch/data${i} -END -)) - - # make sure we detach for all but the last node if DETACH=false (default) so all nodes are started - local_detach="true" - if [[ "$i" == "$((NUMBER_OF_NODES-1))" ]]; then local_detach=$DETACH; fi - echo -e "\033[34;1mINFO:\033[0m Starting container $node_name \033[0m" - set -x - docker run \ - --name "$node_name" \ - --network "$network_name" \ - --env "ES_JAVA_OPTS=-Xms1g -Xmx1g" \ - "${environment[@]}" \ - "${volumes[@]}" \ - --publish "$http_port":9200 \ - --ulimit nofile=65536:65536 \ - --ulimit memlock=-1:-1 \ - --detach="$local_detach" \ - --health-cmd="curl $cert_validation_flags --fail $elasticsearch_url/_cluster/health || exit 1" \ - --health-interval=2s \ - --health-retries=20 \ - --health-timeout=2s \ - --rm \ - docker.elastic.co/elasticsearch/"$elasticsearch_container"; - - set +x - if wait_for_container "$es_node_name" "$network_name"; then - echo -e "\033[32;1mSUCCESS:\033[0m Running on: $node_url\033[0m" - fi - -done - diff --git a/.ci/run-repository.sh b/.ci/run-repository.sh deleted file mode 100755 index 64c5b413e..000000000 --- a/.ci/run-repository.sh +++ /dev/null @@ -1,46 +0,0 @@ -#!/usr/bin/env bash -# parameters are available to this script - -# STACK_VERSION -- version e.g Major.Minor.Patch(-Prelease) -# TEST_SUITE -- which test suite to run: oss or xpack -# ELASTICSEARCH_URL -- The url at which elasticsearch is reachable, a default is composed based on STACK_VERSION and TEST_SUITE -# NODE_JS_VERSION -- node js version (defined in test-matrix.yml, a default is hardcoded here) -script_path=$(dirname $(realpath -s $0)) -source $script_path/functions/imports.sh -set -euo pipefail - -NODE_JS_VERSION=${NODE_JS_VERSION-12} -ELASTICSEARCH_URL=${ELASTICSEARCH_URL-"$elasticsearch_url"} -elasticsearch_container=${elasticsearch_container-} - -echo -e "\033[34;1mINFO:\033[0m VERSION ${STACK_VERSION}\033[0m" -echo -e "\033[34;1mINFO:\033[0m TEST_SUITE ${TEST_SUITE}\033[0m" -echo -e "\033[34;1mINFO:\033[0m URL ${ELASTICSEARCH_URL}\033[0m" -echo -e "\033[34;1mINFO:\033[0m CONTAINER ${elasticsearch_container}\033[0m" -echo -e "\033[34;1mINFO:\033[0m NODE_JS_VERSION ${NODE_JS_VERSION}\033[0m" - -echo -e "\033[1m>>>>> Build docker container >>>>>>>>>>>>>>>>>>>>>>>>>>>>>\033[0m" - -docker build \ - --file .ci/Dockerfile \ - --tag elastic/elasticsearch-js \ - --build-arg NODE_JS_VERSION=${NODE_JS_VERSION} \ - . - -echo -e "\033[1m>>>>> NPM run test:integration >>>>>>>>>>>>>>>>>>>>>>>>>>>>>\033[0m" - -repo=$(realpath $(dirname $(realpath -s $0))/../) -run_script_args="" -if [[ "$NODE_JS_VERSION" == "8" ]]; then - run_script_args="--harmony-async-iteration" -fi - -docker run \ - --network=${network_name} \ - --env "TEST_ES_SERVER=${ELASTICSEARCH_URL}" \ - --volume $repo:/usr/src/app \ - --volume /usr/src/app/node_modules \ - --name elasticsearch-js \ - --rm \ - elastic/elasticsearch-js \ - node ${run_script_args} test/integration/index.js diff --git a/.ci/run-tests b/.ci/run-tests deleted file mode 100755 index 76bb055a3..000000000 --- a/.ci/run-tests +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env bash -# -# Version 1.1 -# - Moved to .ci folder and seperated out `run-repository.sh` -# - Add `$RUNSCRIPTS` env var for running Elasticsearch dependent products -script_path=$(dirname $(realpath -s $0)) -source $script_path/functions/imports.sh -set -euo pipefail - -echo -e "\033[1m>>>>> Start [$STACK_VERSION container] >>>>>>>>>>>>>>>>>>>>>>>>>>>>>\033[0m" -DETACH=true bash .ci/run-elasticsearch.sh - -if [[ -n "$RUNSCRIPTS" ]]; then - for RUNSCRIPT in ${RUNSCRIPTS//,/ } ; do - echo -e "\033[1m>>>>> Running run-$RUNSCRIPT.sh >>>>>>>>>>>>>>>>>>>>>>>>>>>>>\033[0m" - CONTAINER_NAME=${RUNSCRIPT} \ - DETACH=true \ - bash .ci/run-${RUNSCRIPT}.sh - done -fi - -echo -e "\033[1m>>>>> Repository specific tests >>>>>>>>>>>>>>>>>>>>>>>>>>>>>\033[0m" -bash .ci/run-repository.sh diff --git a/.ci/test-matrix.yml b/.ci/test-matrix.yml deleted file mode 100644 index 1c6423ab7..000000000 --- a/.ci/test-matrix.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -STACK_VERSION: - - 8.0.0-SNAPSHOT - -NODE_JS_VERSION: - - 14 - - 12 - - 10 - - 8 - -TEST_SUITE: - - oss - - xpack - -exclude: ~ diff --git a/.dockerignore b/.dockerignore index 54eb2a95a..c2031b20f 100644 --- a/.dockerignore +++ b/.dockerignore @@ -3,3 +3,9 @@ npm-debug.log test/benchmarks elasticsearch .git +lib +junit-output +.tap +rest-api-spec +yaml-rest-tests +generated-tests diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 000000000..67ba321cc --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,3 @@ +package.json @joshmock +renovate.json @joshmock +catalog-info.yaml @joshmock diff --git a/.github/ISSUE_TEMPLATE/bug.md b/.github/ISSUE_TEMPLATE/bug.md deleted file mode 100644 index c1dbcdb75..000000000 --- a/.github/ISSUE_TEMPLATE/bug.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -name: 🐛 Bug report -about: Create a report to help us improve ---- - -It's not uncommon that somebody already opened an issue or in the best case it's already fixed but not merged. That's the reason why you should [search](https://github.com/elastic/elasticsearch-js/issues) at first before submitting a new one. - -**Please read this entire template before posting any issue. If you ignore these instructions -and post an issue here that does not follow the instructions, your issue might be closed, -locked, and assigned the `not reproducible` label.** - -## 🐛 Bug Report - -A clear and concise description of what the bug is. - -## To Reproduce - -Steps to reproduce the behavior: - -Paste your code here: - -```js - -``` - - - -## Expected behavior - -A clear and concise description of what you expected to happen. - -Paste the results here: - -```js - -``` - -## Your Environment - -- *node version*: 6,8,10 -- `@elastic/elasticsearch` *version*: >=7.0.0 -- *os*: Mac, Windows, Linux -- *any other relevant information* diff --git a/.github/ISSUE_TEMPLATE/bug.yaml b/.github/ISSUE_TEMPLATE/bug.yaml new file mode 100644 index 000000000..46bda9336 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug.yaml @@ -0,0 +1,66 @@ +--- +name: 🐛 Bug report +description: Create a report to help us improve +labels: ["Category: Bug"] +body: + - type: markdown + attributes: + value: | + It's not uncommon that somebody already opened an issue or in the best case it's already fixed but not merged. That's the reason why you should [search](https://github.com/elastic/elasticsearch-js/issues) at first before submitting a new one. + + **Please read this entire template before posting any issue. If you ignore these instructions + and post an issue here that does not follow the instructions, your issue might be closed, + locked, and assigned the `Category: Not an issue` label.** + + - type: textarea + id: bug-report + attributes: + label: 🐛 Bug report + description: A clear and concise description of what the bug is. + validations: + required: true + + - type: textarea + id: reproduction + attributes: + label: To reproduce + description: Steps to reproduce the behavior + validations: + required: true + + - type: textarea + id: expected + attributes: + label: Expected behavior + description: A clear and concise description of what you expected to happen. + validations: + required: true + + - type: input + id: node-js-version + attributes: + label: Node.js version + placeholder: 20.x, 22.x, etc. + validations: + required: true + + - type: input + id: client-version + attributes: + label: "@elastic/elasticsearch version" + placeholder: 7.17.0, 8.14.1, etc. + validations: + required: true + + - type: input + id: os + attributes: + label: Operating system + placeholder: Ubuntu 22.04, macOS, etc. + validations: + required: true + + - type: input + id: env-info + attributes: + label: Any other relevant environment information diff --git a/.github/ISSUE_TEMPLATE/feature.md b/.github/ISSUE_TEMPLATE/feature.md deleted file mode 100644 index 2335d551d..000000000 --- a/.github/ISSUE_TEMPLATE/feature.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -name: 🚀 Feature Proposal -about: Submit a proposal for a new feature ---- - -It's not uncommon that somebody already opened an issue or in the best case it's already fixed but not merged. That's the reason why you should [search](https://github.com/elastic/elasticsearch-js/issues) at first before submitting a new one. - -**Please read this entire template before posting any issue. If you ignore these instructions -and post an issue here that does not follow the instructions, your issue might be closed, -locked, and assigned the `invalid` label.** - -## 🚀 Feature Proposal - -A clear and concise description of what the feature is. - -## Motivation - -Please outline the motivation for the proposal. - -## Example - -Please provide an example for how this feature would be used. diff --git a/.github/ISSUE_TEMPLATE/feature.yaml b/.github/ISSUE_TEMPLATE/feature.yaml new file mode 100644 index 000000000..4a35cf6eb --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature.yaml @@ -0,0 +1,33 @@ +--- +name: 🚀 Feature Proposal +description: Submit a proposal for a new feature +labels: ["Category: Feature"] +body: + - type: markdown + attributes: + value: | + It's not uncommon that somebody already opened an issue or in the best case it's already fixed but not merged. That's the reason why you should [search](https://github.com/elastic/elasticsearch-js/issues) at first before submitting a new one. + + **Please read this entire template before posting any issue. If you ignore these instructions + and post an issue here that does not follow the instructions, your issue might be closed, + locked, and assigned the `Category: Not an issue` label.** + + - type: textarea + id: feature-proposal + attributes: + label: 🚀 Feature Proposal + description: A clear and concise description of what the feature is. + validations: + required: true + + - type: textarea + id: motivation + attributes: + label: Motivation + description: Please outline the motivation for the proposal. + + - type: textarea + id: example + attributes: + label: Example + description: Please provide an example for how this feature would be used. diff --git a/.github/ISSUE_TEMPLATE/question.md b/.github/ISSUE_TEMPLATE/question.md deleted file mode 100644 index bbd7143bc..000000000 --- a/.github/ISSUE_TEMPLATE/question.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -name: 💬 Questions / Help -about: If you have questions, please check our Gitter or Help repo ---- - -## 💬 Questions and Help - -### Please note that this issue tracker is not a help forum and this issue may be closed. - -It's not uncommon that somebody already opened an issue or in the best case it's already fixed but not merged. That's the reason why you should [search](https://github.com/elastic/elasticsearch-js/issues) at first before submitting a new one. diff --git a/.github/ISSUE_TEMPLATE/question.yaml b/.github/ISSUE_TEMPLATE/question.yaml new file mode 100644 index 000000000..083cb7f2d --- /dev/null +++ b/.github/ISSUE_TEMPLATE/question.yaml @@ -0,0 +1,21 @@ +--- +name: 💬 Questions / Help +description: If you have questions, please check our community forum or support +labels: ["Category: Question"] +body: + - type: markdown + attributes: + value: | + ### Please note that this issue tracker is not a help forum and this issue may be closed. + + Please check our [community forum](https://discuss.elastic.co/) or [contact Elastic support](https://www.elastic.co/support) if your issue is not specifically related to the documented functionality of this client library. + + It's not uncommon that somebody already opened an issue or in the best case it's already fixed but not merged. That's the reason why you should [search](https://github.com/elastic/elasticsearch-js/issues) at first before submitting a new one. + + - type: textarea + id: question + attributes: + label: Question + description: Your question or comment + validations: + required: true diff --git a/.github/ISSUE_TEMPLATE/regression.md b/.github/ISSUE_TEMPLATE/regression.md deleted file mode 100644 index b3c6c2606..000000000 --- a/.github/ISSUE_TEMPLATE/regression.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -name: 💥 Regression Report -about: Report unexpected behavior that worked in previous versions ---- - -It's not uncommon that somebody already opened an issue or in the best case it's already fixed but not merged. That's the reason why you should [search](https://github.com/elastic/elasticsearch-js/issues) at first before submitting a new one. - -**Please read this entire template before posting any issue. If you ignore these instructions -and post an issue here that does not follow the instructions, your issue might be closed, -locked, and assigned the `invalid` label.** - -## 💥 Regression Report - -A clear and concise description of what the regression is. - -## Last working version - -Worked up to version: - -Stopped working in version: - -## To Reproduce - -Steps to reproduce the behavior: - -Paste your code here: - -```js - -``` - - - -## Expected behavior - -A clear and concise description of what you expected to happen. - -Paste the results here: - -```js - -``` - -## Your Environment - -- *node version*: 6,8,10 -- `@elastic/elasticsearch` *version*: >=7.0.0 -- *os*: Mac, Windows, Linux -- *any other relevant information* diff --git a/.github/ISSUE_TEMPLATE/regression.yaml b/.github/ISSUE_TEMPLATE/regression.yaml new file mode 100644 index 000000000..5271be332 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/regression.yaml @@ -0,0 +1,92 @@ +--- +name: 💥 Regression Report +description: Report unexpected behavior that worked in previous versions +labels: ["Category: Bug"] +body: + - type: markdown + attributes: + value: | + It's not uncommon that somebody already opened an issue or in the best case it's already fixed but not merged. That's the reason why you should [search](https://github.com/elastic/elasticsearch-js/issues) at first before submitting a new one. + + **Please read this entire template before posting any issue. If you ignore these instructions + and post an issue here that does not follow the instructions, your issue might be closed, + locked, and assigned the `Category: Not an issue` label.** + + - type: textarea + id: report + attributes: + label: Regression report + description: A clear and concise description of what the regression is. + validations: + required: true + + - type: input + id: last-working-version + attributes: + label: Last working version + description: Version of `@elastic/elasticsearch` where this last worked. + validations: + required: true + + - type: textarea + id: to-reproduce + attributes: + label: To reproduce + description: | + Paste your code here that shows how to reproduce the behavior. + + In some cases, it might be challenging to reproduce the bug in a few lines of code. + You can fork the following repository, which contains all the configuration needed to spin up a three nodes Elasticsearch cluster with security enabled. + [This repository](https://github.com/delvedor/es-reproduce-issue) also contains a preconfigured client instance that you can use to reproduce the issue. + validations: + required: true + + - type: textarea + id: expected-behavior + attributes: + label: Expected behavior + description: A clear and concise description of what you expected to happen. + validations: + required: true + + - type: input + id: node-version + attributes: + label: Node.js version + description: What version of Node.js you are using (`node --version`). + validations: + required: true + + - type: input + id: typescript-version + attributes: + label: TypeScript version + description: TypeScript version you are using, if applicable. + + - type: input + id: elasticsearch-client-version + attributes: + label: Elasticsearch client version + description: What version of `@elastic/elasticsearch` and `@elastic/transport` you are using (`npm ls -a | grep '@elastic'`). + validations: + required: true + + - type: input + id: elasticsearch-version + attributes: + label: Elasticsearch server version + description: What version of Elasticsearch you are using. + validations: + required: true + + - type: input + id: operating-system + attributes: + label: Operating system + description: What operating system you are running. + placeholder: e.g. Linux, MacOS, Windows + + - type: textarea + id: env-info + attributes: + label: Any other relevant environment information. diff --git a/.github/ISSUE_TEMPLATE/security.md b/.github/ISSUE_TEMPLATE/security.md deleted file mode 100644 index 0529296fc..000000000 --- a/.github/ISSUE_TEMPLATE/security.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -name: 👮 Security Issue -about: Responsible Disclosure ---- - -If you want to report a security issue, please take a look at [elastic/security](https://www.elastic.co/community/security). diff --git a/.github/ISSUE_TEMPLATE/security.yaml b/.github/ISSUE_TEMPLATE/security.yaml new file mode 100644 index 000000000..e003a1e6b --- /dev/null +++ b/.github/ISSUE_TEMPLATE/security.yaml @@ -0,0 +1,8 @@ +--- +name: 👮 Security Issue +description: Responsible disclosure +body: + - type: markdown + attributes: + value: | + If you want to report a security issue, please take a look at [elastic/security](https://www.elastic.co/community/security). diff --git a/.github/make.sh b/.github/make.sh new file mode 100755 index 000000000..30cb63e70 --- /dev/null +++ b/.github/make.sh @@ -0,0 +1,222 @@ +#!/usr/bin/env bash +# ------------------------------------------------------- # +# +# Build entry script for elasticsearch-js +# +# Must be called: ./.github/make.sh +# +# Version: 1.1.0 +# +# Targets: +# --------------------------- +# assemble : build client artifacts with version +# bump : bump client internals to version +# bumpmatrix : bump stack version in test matrix to version +# codegen : generate endpoints +# docsgen : generate documentation +# examplegen : generate the doc examples +# clean : clean workspace +# +# ------------------------------------------------------- # + +# ------------------------------------------------------- # +# Bootstrap +# ------------------------------------------------------- # +script_path=$(dirname "$(realpath -s "$0")") +repo=$(realpath "$script_path/../") + +# shellcheck disable=SC1090 +CMD=$1 +TASK=$1 +TASK_ARGS=() +VERSION=$2 +STACK_VERSION=$VERSION +set -euo pipefail + +product="elastic/elasticsearch-js" +output_folder=".buildkite/output" +codegen_folder=".buildkite/output" +OUTPUT_DIR="$repo/${output_folder}" +NODE_VERSION=22 +WORKFLOW=${WORKFLOW-staging} +mkdir -p "$OUTPUT_DIR" + +echo -e "\033[34;1mINFO:\033[0m PRODUCT ${product}\033[0m" +echo -e "\033[34;1mINFO:\033[0m VERSION ${STACK_VERSION}\033[0m" +echo -e "\033[34;1mINFO:\033[0m OUTPUT_DIR ${OUTPUT_DIR}\033[0m" + +case $CMD in +clean) + echo -e "\033[36;1mTARGET: clean workspace $output_folder\033[0m" + rm -rf "$output_folder" + echo -e "\033[32;1mdone.\033[0m" + exit 0 + ;; +assemble) + if [ -v $VERSION ]; then + echo -e "\033[31;1mTARGET: assemble -> missing version parameter\033[0m" + exit 1 + fi + echo -e "\033[36;1mTARGET: assemble artifact $VERSION\033[0m" + TASK=release + TASK_ARGS=("$VERSION" "$output_folder") + ;; +codegen) + if [ -v "$VERSION" ] || [[ -z "$VERSION" ]]; then + # fall back to branch name or `main` if no VERSION is set + branch_name=$(git rev-parse --abbrev-ref HEAD) + if [[ "$branch_name" =~ ^[0-9]+\.([0-9]+|x) ]]; then + echo -e "\033[36;1mTARGET: codegen -> No VERSION argument found, using branch name: \`$branch_name\`\033[0m" + VERSION="$branch_name" + else + echo -e "\033[36;1mTARGET: codegen -> No VERSION argument found, using \`main\`\033[0m" + VERSION="main" + fi + fi + if [ "$VERSION" = 'main' ]; then + echo -e "\033[36;1mTARGET: codegen API $VERSION\033[0m" + else + echo -e "\033[36;1mTARGET: codegen API v$VERSION\033[0m" + fi + + TASK=codegen + TASK_ARGS=("$VERSION") + ;; +docsgen) + if [ -v $VERSION ]; then + echo -e "\033[31;1mTARGET: docsgen -> missing version parameter\033[0m" + exit 1 + fi + echo -e "\033[36;1mTARGET: generate docs for $VERSION\033[0m" + TASK=codegen + TASK_ARGS=("$VERSION" "$codegen_folder") + ;; +examplesgen) + echo -e "\033[36;1mTARGET: generate examples\033[0m" + TASK=codegen + TASK_ARGS=("$VERSION" "$codegen_folder") + ;; +bump) + if [ -v $VERSION ]; then + echo -e "\033[31;1mTARGET: bump -> missing version parameter\033[0m" + exit 1 + fi + echo -e "\033[36;1mTARGET: bump to version $VERSION\033[0m" + TASK=bump + TASK_ARGS=("$VERSION") + ;; +bumpmatrix) + if [ -v $VERSION ]; then + echo -e "\033[31;1mTARGET: bumpmatrix -> missing version parameter\033[0m" + exit 1 + fi + echo -e "\033[36;1mTARGET: bump stack in test matrix to version $VERSION\033[0m" + TASK=bumpmatrix + TASK_ARGS=("$VERSION") + ;; +*) + echo -e "\n'$CMD' is not supported right now\n" + echo -e "\nUsage:" + echo -e "\t $0 release \$VERSION\n" + echo -e "\t $0 bump \$VERSION" + echo -e "\t $0 codegen \$VERSION" + exit 1 + ;; +esac + +# ------------------------------------------------------- # +# Build Container +# ------------------------------------------------------- # + +echo -e "\033[34;1mINFO: building $product container\033[0m" + +docker build \ + --file .buildkite/Dockerfile-make \ + --tag "$product" \ + --build-arg NODE_VERSION="$NODE_VERSION" \ + --build-arg "BUILDER_UID=$(id -u)" \ + --build-arg "BUILDER_GID=$(id -g)" \ + . + +# ------------------------------------------------------- # +# Run the Container +# ------------------------------------------------------- # + +echo -e "\033[34;1mINFO: running $product container\033[0m" + +if [[ -z "${BUILDKITE+x}" ]] && [[ -z "${CI+x}" ]] && [[ -z "${GITHUB_ACTIONS+x}" ]]; then + echo -e "\033[34;1mINFO: Running in local mode" + docker run \ + -u "$(id -u):$(id -g)" \ + --volume "$repo:/usr/src/elasticsearch-js" \ + --volume /usr/src/elasticsearch-js/node_modules \ + --volume "$(realpath "$repo/../elastic-client-generator-js"):/usr/src/elastic-client-generator-js" \ + --env "WORKFLOW=$WORKFLOW" \ + --name make-elasticsearch-js \ + --rm \ + $product \ + /bin/bash -c "mkdir -p /usr/src/elastic-client-generator-js/output && \ + node .buildkite/make.mjs --task $TASK ${TASK_ARGS[*]}" +else + echo -e "\033[34;1mINFO: Running in CI mode" + + # determine branch to clone + GENERATOR_BRANCH="main" + if [[ "$VERSION" == 8.* ]]; then + GENERATOR_BRANCH="8.x" + fi + echo -e "\033[34;1mINFO: Generator branch: $GENERATOR_BRANCH" + + docker run \ + --volume "$repo:/usr/src/elasticsearch-js" \ + --volume /usr/src/elasticsearch-js/node_modules \ + -u "$(id -u):$(id -g)" \ + --env "WORKFLOW=$WORKFLOW" \ + --name make-elasticsearch-js \ + --rm \ + $product \ + /bin/bash -c "cd /usr/src && \ + git clone --branch $GENERATOR_BRANCH https://$CLIENTS_GITHUB_TOKEN@github.com/elastic/elastic-client-generator-js.git && \ + mkdir -p /usr/src/elastic-client-generator-js/output && \ + cd /usr/src/elasticsearch-js && \ + node .buildkite/make.mjs --task $TASK ${TASK_ARGS[*]}" +fi + +# ------------------------------------------------------- # +# Post Command tasks & checks +# ------------------------------------------------------- # + +if [[ "$CMD" == "assemble" ]]; then + if compgen -G ".buildkite/output/*" >/dev/null; then + echo -e "\033[32;1mTARGET: successfully assembled client v$VERSION\033[0m" + else + echo -e "\033[31;1mTARGET: assemble failed, empty workspace!\033[0m" + exit 1 + fi +fi + +if [[ "$CMD" == "bump" ]]; then + if [ -n "$(git status --porcelain)" ]; then + echo -e "\033[32;1mTARGET: successfully bumped client v$VERSION\033[0m" + else + echo -e "\033[31;1mTARGET: failed bumped client v$VERSION\033[0m" + exit 1 + fi +fi + +if [[ "$CMD" == "codegen" ]]; then + if [ -n "$(git status --porcelain)" ]; then + echo -e "\033[32;1mTARGET: successfully generated client v$VERSION\033[0m" + else + echo -e "\033[31;1mTARGET: failed generating client v$VERSION\033[0m" + exit 1 + fi +fi + +if [[ "$CMD" == "docsgen" ]]; then + echo "TODO" +fi + +if [[ "$CMD" == "examplesgen" ]]; then + echo "TODO" +fi diff --git a/.github/stale.yml b/.github/stale.yml deleted file mode 100644 index e4c52677c..000000000 --- a/.github/stale.yml +++ /dev/null @@ -1,26 +0,0 @@ -# Number of days of inactivity before an issue becomes stale -daysUntilStale: 15 - -# Number of days of inactivity before a stale issue is closed -daysUntilClose: 7 - -# Issues with these labels will never be considered stale -exemptLabels: - - "discussion" - - "feature request" - - "bug" - - "todo" - - "good first issue" - -# Label to use when marking an issue as stale -staleLabel: stale - -# Comment to post when marking an issue as stale. Set to `false` to disable -markComment: | - We understand that this might be important for you, but this issue has been automatically marked as stale because it has not had recent activity either from our end or yours. - It will be closed if no further activity occurs, please write a comment if you would like to keep this going. - - Note: in the past months we have built a new client, that has just landed in master. If you want to open an issue or a pr for the legacy client, you should do that in https://github.com/elastic/elasticsearch-js-legacy - -# Comment to post when closing a stale issue. Set to `false` to disable -closeComment: false diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index c0d07fef8..56d4f328a 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -1,16 +1,27 @@ +--- name: Backport on: - pull_request: + pull_request_target: types: - closed - labeled jobs: backport: - runs-on: ubuntu-latest name: Backport + runs-on: ubuntu-latest + # Only react to merged PRs for security reasons. + # See https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#pull_request_target. + if: > + github.event.pull_request.merged + && ( + github.event.action == 'closed' + || ( + github.event.action == 'labeled' + && contains(github.event.label.name, 'backport') + ) + ) steps: - - name: Backport - uses: tibdex/backport@v1 + - uses: tibdex/backport@9565281eda0731b1d20c4025c43339fb0a23812e # v2.0.4 with: github_token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/docs-build.yml b/.github/workflows/docs-build.yml new file mode 100644 index 000000000..adf95da5d --- /dev/null +++ b/.github/workflows/docs-build.yml @@ -0,0 +1,19 @@ +name: docs-build + +on: + push: + branches: + - main + pull_request_target: ~ + merge_group: ~ + +jobs: + docs-preview: + uses: elastic/docs-builder/.github/workflows/preview-build.yml@main + with: + path-pattern: docs/** + permissions: + deployments: write + id-token: write + contents: read + pull-requests: write diff --git a/.github/workflows/docs-cleanup.yml b/.github/workflows/docs-cleanup.yml new file mode 100644 index 000000000..f83e017b5 --- /dev/null +++ b/.github/workflows/docs-cleanup.yml @@ -0,0 +1,14 @@ +name: docs-cleanup + +on: + pull_request_target: + types: + - closed + +jobs: + docs-preview: + uses: elastic/docs-builder/.github/workflows/preview-cleanup.yml@main + permissions: + contents: none + id-token: write + deployments: write diff --git a/.github/workflows/nodejs.yml b/.github/workflows/nodejs.yml index 6e5f9fab0..0efb7f3a6 100644 --- a/.github/workflows/nodejs.yml +++ b/.github/workflows/nodejs.yml @@ -1,156 +1,129 @@ +--- name: Node CI -on: [push, pull_request] +on: + pull_request: {} jobs: - test: - name: Test - runs-on: ${{ matrix.os }} - - strategy: - matrix: - node-version: [10.x, 12.x, 14.x] - os: [ubuntu-latest, windows-latest, macOS-latest] - + paths-filter: + name: Detect files changed + runs-on: ubuntu-latest + outputs: + src-only: "${{ steps.changes.outputs.src-only }}" steps: - - uses: actions/checkout@v2 - - - name: Use Node.js ${{ matrix.node-version }} - uses: actions/setup-node@v1 - with: - node-version: ${{ matrix.node-version }} + - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + with: + persist-credentials: false + - uses: dorny/paths-filter/@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2 + id: changes + with: + filters: | + src-only: + - '!(**/*.{md,asciidoc,txt}|*.{md,asciidoc,txt}|{docs,.buildkite,scripts}/**/*|catalog-info.yaml)' + - '.github/workflows/**' - - name: Install - run: | - npm install - - - name: Lint - run: | - npm run lint - - - name: Unit test - run: | - npm run test:unit - - - name: Behavior test - run: | - npm run test:behavior - - - name: Type Definitions - run: | - npm run test:types - - test-node-v8: + test: name: Test runs-on: ${{ matrix.os }} + needs: paths-filter + env: + CODE_CHANGED: ${{ needs.paths-filter.outputs.src-only }} strategy: + fail-fast: false matrix: - node-version: [8.x] + node-version: [20.x, 22.x, 24.x] os: [ubuntu-latest, windows-latest, macOS-latest] steps: - - uses: actions/checkout@v2 - - - name: Use Node.js ${{ matrix.node-version }} - uses: actions/setup-node@v1 - with: - node-version: ${{ matrix.node-version }} - - - name: Install - run: | - npm install - - - name: Test - run: | - npm run test:node8 + - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + with: + persist-credentials: false + + - name: Use Node.js ${{ matrix.node-version }} + uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4 + with: + node-version: ${{ matrix.node-version }} + + - name: Install + run: | + npm install + + - name: Lint + shell: bash + run: | + [ "$CODE_CHANGED" = "true" ] && npm run lint || exit 0 + + - name: Unit test + shell: bash + run: | + [ "$CODE_CHANGED" = "true" ] && npm run test:unit || exit 0 + + - name: ECMAScript module test + shell: bash + run: | + [ "$CODE_CHANGED" = "true" ] && npm run test:esm || exit 0 - helpers-integration-test: - name: Helpers integration test + license: + name: License check runs-on: ubuntu-latest - strategy: - matrix: - node-version: [10.x, 12.x, 14.x] - steps: - - uses: actions/checkout@v2 - - - name: Configure sysctl limits - run: | - sudo swapoff -a - sudo sysctl -w vm.swappiness=1 - sudo sysctl -w fs.file-max=262144 - sudo sysctl -w vm.max_map_count=262144 - - - name: Runs Elasticsearch - uses: elastic/elastic-github-actions/elasticsearch@master - with: - stack-version: 8.0.0-SNAPSHOT - - - name: Use Node.js ${{ matrix.node-version }} - uses: actions/setup-node@v1 - with: - node-version: ${{ matrix.node-version }} - - - name: Install - run: | - npm install - - - name: Integration test - run: | - npm run test:integration:helpers - - code-coverage: - name: Code coverage - runs-on: ubuntu-latest + - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + with: + persist-credentials: false - strategy: - matrix: - node-version: [12.x] - - steps: - - uses: actions/checkout@v2 + - name: Use Node.js + uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4 + with: + node-version: 24.x - - name: Use Node.js ${{ matrix.node-version }} - uses: actions/setup-node@v1 - with: - node-version: ${{ matrix.node-version }} + - name: Install + run: | + npm install - - name: Install - run: | - npm install + - name: License checker + run: | + npm run license-checker - - name: Code coverage - run: | - npm run test:coverage + - name: SPDX header check + run: npm run license-header - - name: Upload coverage to Codecov - uses: codecov/codecov-action@v1 - with: - file: ./coverage.lcov - fail_ci_if_error: true - - license: - name: License check - runs-on: ubuntu-latest + test-bun: + name: Test Bun + runs-on: ${{ matrix.os }} + needs: paths-filter + env: + CODE_CHANGED: ${{ needs.paths-filter.outputs.src-only }} strategy: + fail-fast: false matrix: - node-version: [12.x] + os: [ubuntu-latest, windows-latest, macOS-latest] steps: - - uses: actions/checkout@v2 - - - name: Use Node.js ${{ matrix.node-version }} - uses: actions/setup-node@v1 - with: - node-version: ${{ matrix.node-version }} - - - name: Install - run: | - npm install - - - name: License checker - run: | - npm run license-checker + - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + with: + persist-credentials: false + + - name: Use Bun + uses: oven-sh/setup-bun@735343b667d3e6f658f44d0eca948eb6282f2b76 # v2 + + - name: Install + run: | + bun install + + - name: Lint + shell: bash + run: | + [ "$CODE_CHANGED" = "true" ] && bun run lint || exit 0 + + - name: Unit test + shell: bash + run: | + [ "$CODE_CHANGED" = "true" ] && bun run test:unit-bun || exit 0 + + - name: ECMAScript module test + shell: bash + run: | + [ "$CODE_CHANGED" = "true" ] && bun run test:esm || exit 0 diff --git a/.github/workflows/npm-publish-unstable.yml b/.github/workflows/npm-publish-unstable.yml new file mode 100644 index 000000000..662a394a3 --- /dev/null +++ b/.github/workflows/npm-publish-unstable.yml @@ -0,0 +1,80 @@ +--- +name: Publish unstable builds to npm +on: + push: + branches: + - main + +# kill in-progress action if another one is triggered +concurrency: + group: publish-unstable + cancel-in-progress: true + +jobs: + # don't publish if source code has not changed + paths-filter: + name: Detect files changed + runs-on: ubuntu-latest + outputs: + src: "${{ steps.changes.outputs.src }}" + steps: + - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + with: + persist-credentials: false + - uses: dorny/paths-filter/@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2 + id: changes + with: + filters: | + src: + - 'src/**' + - 'package.json' + - 'tsconfig.json' + - 'index.d.ts' + - 'index.js' + + test: + name: Run tests and publish unstable + needs: paths-filter + if: ${{ needs.paths-filter.outputs.src == 'true' }} + runs-on: ubuntu-latest + permissions: + contents: write + id-token: write + steps: + # pause for 30 minutes to avoid publishing more than 2x per hour + - name: Debounce 30 minutes + uses: zachary95/github-actions-debounce@ab7363483e2837992b8aa6be891763da00ac14f9 # v0.1.0 + with: + wait: 1800 + - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + with: + persist-credentials: false + ref: main + - uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4 + with: + node-version: "24.x" + registry-url: "https://registry.npmjs.org/" + - name: Install dependencies + run: | + npm install -g npm + npm install + - name: Run tests + run: npm test + # if tests pass, publish unstable + - name: publish unstable build + run: | + # set unstable version value + unstable_tag="unstable.$(date --utc +%Y%m%d%H%M%S)" + latest=$(npm view @elastic/elasticsearch --json | jq -r '.["dist-tags"].latest') + next=$(npx -y 'semver@^7.7.0' -i minor "$latest") + unstable_version="$next-$unstable_tag" + + # overwrite package.json with unstable version value + mv package.json package.json.bak + jq --arg v "$unstable_version" '.version = $v' package.json.bak > package.json + rm package.json.bak + + # publish to npm + npm publish --provenance --access public --tag "unstable" + env: + NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} diff --git a/.github/workflows/npm-publish.yml b/.github/workflows/npm-publish.yml new file mode 100644 index 000000000..398552980 --- /dev/null +++ b/.github/workflows/npm-publish.yml @@ -0,0 +1,71 @@ +name: Publish Package to npm +on: + workflow_dispatch: + inputs: + branch: + description: "Git branch to build and publish" + required: true +jobs: + build: + runs-on: ubuntu-latest + permissions: + contents: write + id-token: write + steps: + - uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4 + with: + persist-credentials: false + ref: ${{ github.event.inputs.branch }} + - uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4 + with: + node-version: "24.x" + registry-url: "https://registry.npmjs.org/" + - run: npm install -g npm + - run: npm install + - run: npm test + - name: npm publish + run: | + version=$(jq -r .version package.json) + tag_meta=$(echo "$version" | cut -s -d '-' -f2) + # if no meta info on the version (e.g. a '-alpha.1' prefix), publish as a stable release + if [[ -z "$tag_meta" ]]; then + # get latest version on npm + latest=$(npm view @elastic/elasticsearch --json | jq -r '.["dist-tags"].latest') + + # if $version is higher than the most recently published version, publish as-is + if [[ $(yes | npx semver "$version" "$latest" | tail -n1) == "$version" ]]; then + npm publish --provenance --access public + else + # otherwise, publish with "previous" tag + npm publish --provenance --access public --tag "previous" + fi + else + # publish as a non-stable release using the meta name (e.g. 'alpha') as the tag + tag=$(echo "$tag_meta" | cut -d '.' -f1) + npm publish --provenance --access public --tag "$tag" + fi + env: + NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} + - name: Publish version on GitHub + run: | + version=$(jq -r .version package.json) + tag_meta=$(echo "$version" | cut -s -d '-' -f2) + if [[ -z "$tag_meta" ]]; then + gh release create \ + -n "[Changelog](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/$BRANCH_NAME/changelog-client.html)" + --target "$BRANCH_NAME" \ + --title "v$version" \ + "v$version" + else + tag_main=$(echo "$version" | cut -d '-' -f1) + gh release create \ + -n "This is a $tag_main pre-release. Changes may not be stable." \ + --latest=false \ + --prerelease \ + --target "$BRANCH_NAME" \ + --title "v$version" \ + "v$version" + fi + env: + BRANCH_NAME: ${{ github.event.inputs.branch }} + GH_TOKEN: ${{ github.token }} diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml new file mode 100644 index 000000000..6a851af1b --- /dev/null +++ b/.github/workflows/stale.yml @@ -0,0 +1,21 @@ +--- +name: "Close stale issues and PRs" +on: + schedule: + - cron: "30 1 * * *" + +jobs: + stale: + runs-on: ubuntu-latest + steps: + - uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9 + with: + stale-issue-label: stale + stale-pr-label: stale + days-before-stale: 90 + days-before-close: 14 + exempt-issue-labels: "good first issue,tracking" + close-issue-label: closed-stale + close-pr-label: closed-stale + stale-issue-message: "This issue is stale because it has been open 90 days with no activity. Remove the `stale` label, or leave a comment, or this will be closed in 14 days." + stale-pr-message: "This pull request is stale because it has been open 90 days with no activity. Remove the `stale` label, or leave a comment, or this will be closed in 14 days." diff --git a/.gitignore b/.gitignore index c610926ce..07e49ff7b 100644 --- a/.gitignore +++ b/.gitignore @@ -56,3 +56,19 @@ elasticsearch* test/benchmarks/macro/fixtures/* *-junit.xml + +.cache + +test/bundlers/**/bundle.js +test/bundlers/parcel-test/.parcel-cache + +lib +junit-output +bun.lockb +test-results +processinfo +.tap +rest-api-spec +yaml-rest-tests +generated-tests +schema diff --git a/.npmignore b/.npmignore index becc00414..c890fc1da 100644 --- a/.npmignore +++ b/.npmignore @@ -54,19 +54,35 @@ elasticsearch* # because we should copy them in the main .d.ts file api/generated.d.ts -# Ignore doc folder +# Ignore docs docs +CODE_OF_CONDUCT.md +CONTRIBUTING.md -# Ignore test folder +# Ignore test-related files +codecov.yml test +.tap +rest-api-spec +yaml-rest-tests +generated-tests # Ignore scripts folder scripts # ci configuration -.ci .travis.yml +.buildkite certs .github -CODE_OF_CONDUCT.md -CONTRIBUTING.md +.dockerignore + +# ignore unbuilt source +src + +# Bun artifact +bun.lockb + +# Elastic org artifacts +renovate.json +catalog-info.yaml diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 000000000..56baaa2b7 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,3 @@ +303 See Other + +Location: https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/changelog-client.html diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 13a6bb39a..2ac7f14dd 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -8,7 +8,7 @@ improving the documentation, submitting bug reports and feature requests or writing code. ## Repository structure -The `master` branch is considered unstable, and it's compatible with Elasticsearch master. Unless you are patching an issue, new features should always be sent to the `master` branch, in case of a bugfix, it depends if the bug affects all the release lines.
+The `main` branch is considered unstable, and it's compatible with Elasticsearch main. Unless you are patching an issue, new features should always be sent to the `main` branch, in case of a bugfix, it depends if the bug affects all the release lines.
There is a branch for every supported release line, such as `7.x` or `6.x`. We release bugfixes as soon as possible, while minor and major releases are published at the same time of the Elastic Stack. Usually for every release line there will be a *published* version and a *next* version. Eg: the `7.x` branch contains the version published on npm, and bugfixes should be sent there, while `7.2` *(assuming that 7.1.x is released)* contains the next version, and new features should be sent there. @@ -31,7 +31,7 @@ Once your changes are ready to submit for review: 1. Test your changes Run the test suite to make sure that nothing is broken. - Usually run `npm test` is enough, our CI will take care of running the integration test. If you want to run the integration test yourself, see the *Testing* section below. + Usually running `npm test` is enough; our CI will take care of running the integration tests. If you want to run the integration tests yourself, see [the *Testing* section](#testing) below. 2. Submit a pull request @@ -58,36 +58,50 @@ Once your changes are ready to submit for review: ### Code generation -The entire content of the API folder is generated as well as the `docs/reference.asciidoc` file.
-If you want to run the code generation you should run the following command: -```sh -node scripts/generate --tag -# or -node scripts/generate --branch -``` -Then you should copy the content of `api/generated.d.ts` into the `index.d.ts` file *(automate this step would be a nice pr!)*. +The entire content of the `src/api/` directory is automatically generated from [the Elasticsearch specification](https://github.com/elastic/elasticsearch-specification), as is the `docs/reference.asciidoc` file. +This code generation is done using a separate repository that is not currently available to the public. + +If you find discrepancies between this client's API code and what you see when actually interacting with an Elasticsearch API, you can open a pull request here to fix it. +For API fixes, it's likely a change will need to be made to the specification as well, to ensure your fix is not undone by the code generation process. +We will do our best to make sure this is addressed when reviewing and merging your changes. + +PRs to improve the specification are also welcome! +It is implemented in TypeScript, so JavaScript devs should be able to understand it fairly easily. +Spec fixes are particularly helpful, as they will be reflected in ALL official Elasticsearch clients, not just this one. ### Testing -There are different test scripts, usually during development you only need to run `npm test`, but if you want you can run just a part of the suite, following you will find all the testing scripts and what they do. + +There are a few different test scripts. +Usually during development you only need to run `npm test`, but if you want you can run just a part of the suite: | Script | Description | |---|---| | `npm run test:unit` | Runs the content of the `test/unit` folder. | -| `npm run test:behavior` | Runs the content of the `test/behavior` folder. | -| `npm run test:types` | Runs the content of the `test/types` folder. | -| `npm run test:unit -- --cov --coverage-report=html` | Runs the content of the `test/unit` folder and calculates the code coverage. | -| `npm run test:integration` | Runs the integration test runner.
*Note: it requires a living instance of Elasticsearch.* | -| `npm run lint` | Run the [linter](https://standardjs.com/). | -| `npm run lint:fix` | Fixes the lint errors. | -| `npm test` | Runs lint, unit, behavior, and types test. | +| `npm run test:coverage-100` | Runs unit tests enforcing 100% coverage. | +| `npm run test:coverage-report` | Runs unit tests and generates an `lcov` coverage report. | +| `npm run test:coverage-ui` | Runs unit tests and generates an HTML coverage report. | +| `npm run test:integration` | Runs the integration test runner.
**Note: requires a living instance of Elasticsearch.** | +| `npm run lint` | Run the [linter](https://github.com/standard/ts-standard). | +| `npm run lint:fix` | Fixes linter errors. | +| `npm run license-checker` | Checks that all dependencies have acceptable open source licenses. | + +| `npm test` | Runs `lint` and `test:unit`. | #### Integration test -The integration test are generated on the fly by the runner you will find inside `test/integration`, once you execute it, it will clone the Elasticsearch repository and checkout the correct version to grab the [OSS yaml files](https://github.com/elastic/elasticsearch/tree/master/rest-api-spec/src/main/resources/rest-api-spec/test) and the [Elastic licensed yaml files](https://github.com/elastic/elasticsearch/tree/master/x-pack/plugin/src/test/resources/rest-api-spec/test) that will be used for generating the test. -Usually this step is executed by CI since it takes some time, but you can easily run this yourself! Just follow this steps: -1. Boot an Elasticsearch instance, you can do that by running `./scripts/es-docker.sh` or `./scripts/es-docker-platinum.sh`, the first one will work only with the OSS APIs, while the second will work also with the Elastic licensed APIs; -1. If you are running the OSS test, you should use `npm run test:integration`, otherwise use `TEST_ES_SERVER=https://elastic:changeme@localhost:9200 npm run test:integration`. You can also pass a `-b` parameter if you want the test to bail out at the first failure: `npm run test:integration -- -b`; -1. Grab a coffee, it will take some time ;) +The integration tests are generated on the fly by the runner you will find inside `test/integration`. +Once you execute it, it will fetch the [YAML REST test files](https://github.com/elastic/elasticsearch/tree/main/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test) from our artifacts API. +These are used to generate the integration tests. + +Usually this step is executed by CI since it takes some time, but you can easily run this yourself! +Just follow this steps: +1. Boot a fresh Elasticsearch instance, which can be done in a Docker container by running `STACK_VERSION=8.10.0 DETACH=true .buildkite/run-elasticsearch.sh`, where `STACK_VERSION` and `DETACH` environment variables can be adjusted to your needs. A `TEST_SUITE` env var can also be set to `free` or `platinum`, and defaults to `free`. +1. Run `npm run test:integration` to run the whole suite, or `npm run test:integration -- --bail` to stop after the first failure. +1. Grab a coffee, it will take some time. ;) + +This suite is very large, and not all tests will pass. +This is fine. +This suite is mostly used to identify notable changes in success/fail rate over time as we make changes to the client. ### Releasing diff --git a/NOTICE.txt b/NOTICE.txt new file mode 100644 index 000000000..0a7714782 --- /dev/null +++ b/NOTICE.txt @@ -0,0 +1,2 @@ +Elasticsearch JavaScript Client +Copyright 2022-2025 Elasticsearch B.V. diff --git a/README.md b/README.md index c4c54b789..41948d68b 100644 --- a/README.md +++ b/README.md @@ -2,197 +2,169 @@ # Elasticsearch Node.js client -[![js-standard-style](https://img.shields.io/badge/code%20style-standard-brightgreen.svg?style=flat)](http://standardjs.com/) [![Build Status](https://clients-ci.elastic.co/buildStatus/icon?job=elastic%2Belasticsearch-js%2Bmaster)](https://clients-ci.elastic.co/view/Javascript/job/elastic+elasticsearch-js+master/) [![codecov](https://codecov.io/gh/elastic/elasticsearch-js/branch/master/graph/badge.svg)](https://codecov.io/gh/elastic/elasticsearch-js) [![NPM downloads](https://img.shields.io/npm/dm/@elastic/elasticsearch.svg?style=flat)](https://www.npmjs.com/package/@elastic/elasticsearch) +[![js-standard-style](https://img.shields.io/badge/code%20style-standard-brightgreen.svg?style=flat)](http://standardjs.com/) [![Build Status](https://badge.buildkite.com/15e4246eb268ea78f6e10aa90bce38c1abb0a4489e79f5a0ac.svg)](https://buildkite.com/elastic/elasticsearch-javascript-client-integration-tests/builds?branch=main) [![Node CI](https://github.com/elastic/elasticsearch-js/actions/workflows/nodejs.yml/badge.svg)](https://github.com/elastic/elasticsearch-js/actions/workflows/nodejs.yml) [![codecov](https://codecov.io/gh/elastic/elasticsearch-js/branch/master/graph/badge.svg)](https://codecov.io/gh/elastic/elasticsearch-js) [![NPM downloads](https://img.shields.io/npm/dm/@elastic/elasticsearch.svg?style=flat)](https://www.npmjs.com/package/@elastic/elasticsearch) The official Node.js client for Elasticsearch. ---- +## Try Elasticsearch and Kibana locally -**Note:** In the past months we have worked on the new Elasticsearch Node.js client and you can use it by following the instructions below. If you're going to use the legacy one or report an issue, however, please check out [elastic/elasticsearch-js-legacy](https://github.com/elastic/elasticsearch-js-legacy). +If you want to try Elasticsearch and Kibana locally, you can run the following command: ---- +```bash +curl -fsSL https://elastic.co/start-local | sh -## Features -- One-to-one mapping with REST API. -- Generalized, pluggable architecture. -- Configurable, automatic discovery of cluster nodes. -- Persistent, Keep-Alive connections. -- Load balancing across all available nodes. -- Child client support. -- TypeScript support out of the box. - -## Install -``` -npm install @elastic/elasticsearch ``` -### Compatibility +This will run Elasticsearch at http://localhost:9200 and Kibana at http://localhost:5601. -The minimum supported version of Node.js is `v8`. +More information is available [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/run-elasticsearch-locally.html). -The library is compatible with all Elasticsearch versions since 5.x, and you should use the same major version of the Elasticsearch instance that you are using. +Alternatively, you can **[download the latest version of Elasticsearch](https://www.elastic.co/downloads/elasticsearch)** manually, or +**[sign-up](https://cloud.elastic.co/registration?elektra=en-ess-sign-up-page) for a free trial of Elastic Cloud**. -| Elasticsearch Version | Client Version | -| --------------------- |----------------| -| `master` | `master` | -| `7.x` | `7.x` | -| `6.x` | `6.x` | -| `5.x` | `5.x` | +## Installation -To install a specific major of the client, run the following command: -``` -npm install @elastic/elasticsearch@ -``` +Refer to the [Installation section](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_installation) +of the getting started documentation. + +## Connecting + +Refer to the [Connecting section](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_connecting) +of the getting started documentation. + +## Compatibility + +The Elasticsearch client is compatible with currently maintained JS versions. + +Language clients are forward compatible; meaning that clients support +communicating with greater or equal minor versions of Elasticsearch without +breaking. It does not mean that the client automatically supports new features +of newer Elasticsearch versions; it is only possible after a release of a new +client version. For example, a 8.12 client version won't automatically support +the new features of the 8.13 version of Elasticsearch, the 8.13 client version +is required for that. Elasticsearch language clients are only backwards +compatible with default distributions and without guarantees made. + +| Elasticsearch Version | Elasticsearch-JS Branch | +| --------------------- | ----------------------- | +| main | main | +| 9.x | 9.x | +| 8.x | 8.x | +| 7.x | 7.x | + +## Usage + +- [Creating an index](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_creating_an_index) +- [Indexing a document](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_indexing_documents) +- [Getting documents](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_getting_documents) +- [Searching documents](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_searching_documents) +- [Updating documents](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_updating_documents) +- [Deleting documents](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_deleting_documents) +- [Deleting an index](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html#_deleting_an_index) + +### Node.js support + +NOTE: The minimum supported version of Node.js is `v20`. + +The client versioning follows the Elastic Stack versioning, this means that +major, minor, and patch releases are done following a precise schedule that +often does not coincide with the [Node.js release](https://nodejs.org/en/about/releases/) times. + +To avoid support insecure and unsupported versions of Node.js, the +client **will drop the support of EOL versions of Node.js between minor releases**. +Typically, as soon as a Node.js version goes into EOL, the client will continue +to support that version for at least another minor release. If you are using the client +with a version of Node.js that will be unsupported soon, you will see a warning +in your logs (the client will start logging the warning with two minors in advance). + +Unless you are **always** using a supported version of Node.js, +we recommend defining the client dependency in your +`package.json` with the `~` instead of `^`. In this way, you will lock the +dependency on the minor release and not the major. (for example, `~7.10.0` instead +of `^7.10.0`). + +| Node.js Version | Node.js EOL date | End of support | +| --------------- | ---------------- | ------------------- | +| `8.x` | `December 2019` | `7.11` (early 2021) | +| `10.x` | `April 2021` | `7.12` (mid 2021) | +| `12.x` | `April 2022` | `8.2` (early 2022) | +| `14.x` | `April 2023` | `8.8` (early 2023) | +| `16.x` | `September 2023` | `8.11` (late 2023) | +| `18.x` | `April 2025` | `9.1` (mid 2025) | #### Browser -WARNING: There is no official support for the browser environment. It exposes your Elasticsearch instance to everyone, which could lead to security issues. -We recommend that you write a lightweight proxy that uses this client instead. +> [!WARNING] +> There is no official support for the browser environment. It exposes your Elasticsearch instance to everyone, which could lead to security issues. +> We recommend that you write a lightweight proxy that uses this client instead, you can see a proxy example [here](./docs/examples/proxy). ## Documentation - [Introduction](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/introduction.html) -- [Usage](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-usage.html) +- [Usage](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-connecting.html#client-usage) - [Client configuration](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-configuration.html) - [API reference](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/api-reference.html) -- [Breaking changes coming from the old client](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/breaking-changes.html) -- [Authentication](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/auth-reference.html) +- [Authentication](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-connecting.html#authentication) - [Observability](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/observability.html) -- [Creating a child client](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/child-client.html) -- [Extend the client](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/extend-client.html) +- [Creating a child client](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/child.html) - [Client helpers](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-helpers.html) - [Typescript support](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/typescript.html) - [Testing](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-testing.html) - [Examples](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/examples.html) -## Quick start - -First of all, require the client and initialize it: -```js -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) -``` - -You can use both the callback-style API and the promise-style API, both behave the same way. -```js -// promise API -const result = await client.search({ - index: 'my-index', - body: { foo: 'bar' } -}) - -// callback API -client.search({ - index: 'my-index', - body: { foo: 'bar' } -}, (err, result) => { - if (err) console.log(err) -}) -``` -The returned value of **every** API call is formed as follows: -```ts -{ - body: object | boolean - statusCode: number - headers: object - warnings: [string] - meta: object -} -``` - -Let's see a complete example! -```js -'use strict' - -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) - -async function run () { - // Let's start by indexing some data - await client.index({ - index: 'game-of-thrones', - // type: '_doc', // uncomment this line if you are using Elasticsearch ≤ 6 - body: { - character: 'Ned Stark', - quote: 'Winter is coming.' - } - }) - - await client.index({ - index: 'game-of-thrones', - // type: '_doc', // uncomment this line if you are using Elasticsearch ≤ 6 - body: { - character: 'Daenerys Targaryen', - quote: 'I am the blood of the dragon.' - } - }) - - await client.index({ - index: 'game-of-thrones', - // type: '_doc', // uncomment this line if you are using Elasticsearch ≤ 6 - body: { - character: 'Tyrion Lannister', - quote: 'A mind needs books like a sword needs a whetstone.' - } - }) - - // here we are forcing an index refresh, otherwise we will not - // get any result in the consequent search - await client.indices.refresh({ index: 'game-of-thrones' }) - - // Let's search! - const { body } = await client.search({ - index: 'game-of-thrones', - // type: '_doc', // uncomment this line if you are using Elasticsearch ≤ 6 - body: { - query: { - match: { quote: 'winter' } - } - } - }) - - console.log(body.hits.hits) -} - -run().catch(console.log) -``` - ## Install multiple versions + If you are using multiple versions of Elasticsearch, you need to use multiple versions of the client. In the past, install multiple versions of the same package was not possible, but with `npm v6.9`, you can do that via aliasing. The command you must run to install different version of the client is: + ```sh npm install @npm:@elastic/elasticsearch@ ``` -So for example if you need to install `7.x` and `6.x`, you will run + +So for example if you need to install `7.x` and `6.x`, you will run: + ```sh npm install es6@npm:@elastic/elasticsearch@6 npm install es7@npm:@elastic/elasticsearch@7 ``` + And your `package.json` will look like the following: + ```json "dependencies": { "es6": "npm:@elastic/elasticsearch@^6.7.0", "es7": "npm:@elastic/elasticsearch@^7.0.0" } ``` + You will require the packages from your code by using the alias you have defined. + ```js const { Client: Client6 } = require('es6') const { Client: Client7 } = require('es7') -const client6 = new Client6({ node: 'http://localhost:9200/' }) -const client7 = new Client7({ node: 'http://localhost:9201/' }) +const client6 = new Client6({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) +const client7 = new Client7({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) -client6.info(console.log) -client7.info(console.log) +client6.info().then(console.log, console.log) +client7.info().then(console.log, console.log) ``` -Finally, if you want to install the client for the next version of Elasticsearch *(the one that lives in Elasticsearch’s master branch)*, you can use the following command: +Finally, if you want to install the client for the next version of Elasticsearch +_(the one that lives in Elasticsearch’s main branch)_, you can use the following +command: + ```sh -npm install esmaster@github:elastic/elasticsearch-js +npm install esmain@github:elastic/elasticsearch-js ``` ## License -This software is licensed under the [Apache 2 license](./LICENSE). +This software is licensed under the [Apache License 2.0](./LICENSE). diff --git a/api/api/async_search.delete.js b/api/api/async_search.delete.js deleted file mode 100644 index 909f3a230..000000000 --- a/api/api/async_search.delete.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildAsyncSearchDelete (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a async_search.delete request - * Deletes an async search by ID. If the search is still running, the search request will be cancelled. Otherwise, the saved search results are deleted. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/async-search.html - */ - return function asyncSearchDelete (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['id'] == null) { - const err = new ConfigurationError('Missing required parameter: id') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'DELETE' - path = '/' + '_async_search' + '/' + encodeURIComponent(id) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildAsyncSearchDelete diff --git a/api/api/async_search.get.js b/api/api/async_search.get.js deleted file mode 100644 index e512e5ce3..000000000 --- a/api/api/async_search.get.js +++ /dev/null @@ -1,82 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildAsyncSearchGet (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'wait_for_completion_timeout', - 'keep_alive', - 'typed_keys' - ] - - const snakeCase = { - waitForCompletionTimeout: 'wait_for_completion_timeout', - keepAlive: 'keep_alive', - typedKeys: 'typed_keys' - } - - /** - * Perform a async_search.get request - * Retrieves the results of a previously submitted async search request given its ID. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/async-search.html - */ - return function asyncSearchGet (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['id'] == null) { - const err = new ConfigurationError('Missing required parameter: id') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + '_async_search' + '/' + encodeURIComponent(id) - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildAsyncSearchGet diff --git a/api/api/async_search.submit.js b/api/api/async_search.submit.js deleted file mode 100644 index e46a1785f..000000000 --- a/api/api/async_search.submit.js +++ /dev/null @@ -1,147 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildAsyncSearchSubmit (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'wait_for_completion_timeout', - 'keep_on_completion', - 'keep_alive', - 'batched_reduce_size', - 'request_cache', - 'analyzer', - 'analyze_wildcard', - 'default_operator', - 'df', - 'explain', - 'stored_fields', - 'docvalue_fields', - 'from', - 'ignore_unavailable', - 'ignore_throttled', - 'allow_no_indices', - 'expand_wildcards', - 'lenient', - 'preference', - 'q', - 'routing', - 'search_type', - 'size', - 'sort', - '_source', - '_source_excludes', - '_source_exclude', - '_source_includes', - '_source_include', - 'terminate_after', - 'stats', - 'suggest_field', - 'suggest_mode', - 'suggest_size', - 'suggest_text', - 'timeout', - 'track_scores', - 'track_total_hits', - 'allow_partial_search_results', - 'typed_keys', - 'version', - 'seq_no_primary_term', - 'max_concurrent_shard_requests' - ] - - const snakeCase = { - waitForCompletionTimeout: 'wait_for_completion_timeout', - keepOnCompletion: 'keep_on_completion', - keepAlive: 'keep_alive', - batchedReduceSize: 'batched_reduce_size', - requestCache: 'request_cache', - analyzeWildcard: 'analyze_wildcard', - defaultOperator: 'default_operator', - storedFields: 'stored_fields', - docvalueFields: 'docvalue_fields', - ignoreUnavailable: 'ignore_unavailable', - ignoreThrottled: 'ignore_throttled', - allowNoIndices: 'allow_no_indices', - expandWildcards: 'expand_wildcards', - searchType: 'search_type', - _sourceExcludes: '_source_excludes', - _sourceExclude: '_source_exclude', - _sourceIncludes: '_source_includes', - _sourceInclude: '_source_include', - terminateAfter: 'terminate_after', - suggestField: 'suggest_field', - suggestMode: 'suggest_mode', - suggestSize: 'suggest_size', - suggestText: 'suggest_text', - trackScores: 'track_scores', - trackTotalHits: 'track_total_hits', - allowPartialSearchResults: 'allow_partial_search_results', - typedKeys: 'typed_keys', - seqNoPrimaryTerm: 'seq_no_primary_term', - maxConcurrentShardRequests: 'max_concurrent_shard_requests' - } - - /** - * Perform a async_search.submit request - * Executes a search request asynchronously. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/async-search.html - */ - return function asyncSearchSubmit (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((index) != null) { - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_async_search' - } else { - if (method == null) method = 'POST' - path = '/' + '_async_search' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildAsyncSearchSubmit diff --git a/api/api/autoscaling.delete_autoscaling_policy.js b/api/api/autoscaling.delete_autoscaling_policy.js deleted file mode 100644 index 76d09d1d5..000000000 --- a/api/api/autoscaling.delete_autoscaling_policy.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildAutoscalingDeleteAutoscalingPolicy (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a autoscaling.delete_autoscaling_policy request - * Deletes an autoscaling policy. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/autoscaling-delete-autoscaling-policy.html - */ - return function autoscalingDeleteAutoscalingPolicy (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['name'] == null) { - const err = new ConfigurationError('Missing required parameter: name') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'DELETE' - path = '/' + '_autoscaling' + '/' + 'policy' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildAutoscalingDeleteAutoscalingPolicy diff --git a/api/api/autoscaling.get_autoscaling_decision.js b/api/api/autoscaling.get_autoscaling_decision.js deleted file mode 100644 index 9bd7388a7..000000000 --- a/api/api/autoscaling.get_autoscaling_decision.js +++ /dev/null @@ -1,72 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildAutoscalingGetAutoscalingDecision (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a autoscaling.get_autoscaling_decision request - * Gets the current autoscaling decision based on the configured autoscaling policy, indicating whether or not autoscaling is needed. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/autoscaling-get-autoscaling-decision.html - */ - return function autoscalingGetAutoscalingDecision (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + '_autoscaling' + '/' + 'decision' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildAutoscalingGetAutoscalingDecision diff --git a/api/api/autoscaling.get_autoscaling_policy.js b/api/api/autoscaling.get_autoscaling_policy.js deleted file mode 100644 index 0a85e303b..000000000 --- a/api/api/autoscaling.get_autoscaling_policy.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildAutoscalingGetAutoscalingPolicy (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a autoscaling.get_autoscaling_policy request - * Retrieves an autoscaling policy. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/autoscaling-get-autoscaling-policy.html - */ - return function autoscalingGetAutoscalingPolicy (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['name'] == null) { - const err = new ConfigurationError('Missing required parameter: name') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + '_autoscaling' + '/' + 'policy' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildAutoscalingGetAutoscalingPolicy diff --git a/api/api/autoscaling.put_autoscaling_policy.js b/api/api/autoscaling.put_autoscaling_policy.js deleted file mode 100644 index 6a45c7648..000000000 --- a/api/api/autoscaling.put_autoscaling_policy.js +++ /dev/null @@ -1,82 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildAutoscalingPutAutoscalingPolicy (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a autoscaling.put_autoscaling_policy request - * Creates a new autoscaling policy. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/autoscaling-put-autoscaling-policy.html - */ - return function autoscalingPutAutoscalingPolicy (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['name'] == null) { - const err = new ConfigurationError('Missing required parameter: name') - return handleError(err, callback) - } - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'PUT' - path = '/' + '_autoscaling' + '/' + 'policy' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildAutoscalingPutAutoscalingPolicy diff --git a/api/api/bulk.js b/api/api/bulk.js deleted file mode 100644 index 683c50310..000000000 --- a/api/api/bulk.js +++ /dev/null @@ -1,113 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildBulk (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'wait_for_active_shards', - 'refresh', - 'routing', - 'timeout', - 'type', - '_source', - '_source_excludes', - '_source_exclude', - '_source_includes', - '_source_include', - 'pipeline', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - waitForActiveShards: 'wait_for_active_shards', - _sourceExcludes: '_source_excludes', - _sourceExclude: '_source_exclude', - _sourceIncludes: '_source_includes', - _sourceInclude: '_source_include', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a bulk request - * Allows to perform multiple index/update/delete operations in a single request. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-bulk.html - */ - return function bulk (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // check required url components - if (params['type'] != null && (params['index'] == null)) { - const err = new ConfigurationError('Missing required parameter of the url: index') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, type, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((index) != null && (type) != null) { - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(index) + '/' + encodeURIComponent(type) + '/' + '_bulk' - } else if ((index) != null) { - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_bulk' - } else { - if (method == null) method = 'POST' - path = '/' + '_bulk' - } - - // build request object - const request = { - method, - path, - bulkBody: body, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildBulk diff --git a/api/api/cat.aliases.js b/api/api/cat.aliases.js deleted file mode 100644 index fe0675fbb..000000000 --- a/api/api/cat.aliases.js +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildCatAliases (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'format', - 'local', - 'h', - 'help', - 's', - 'v', - 'expand_wildcards', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - expandWildcards: 'expand_wildcards', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a cat.aliases request - * Shows information about currently configured aliases to indices including filter and routing infos. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-alias.html - */ - return function catAliases (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((name) != null) { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'aliases' + '/' + encodeURIComponent(name) - } else { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'aliases' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildCatAliases diff --git a/api/api/cat.allocation.js b/api/api/cat.allocation.js deleted file mode 100644 index 9bed54887..000000000 --- a/api/api/cat.allocation.js +++ /dev/null @@ -1,91 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildCatAllocation (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'format', - 'bytes', - 'local', - 'master_timeout', - 'h', - 'help', - 's', - 'v', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a cat.allocation request - * Provides a snapshot of how many shards are allocated to each data node and how much disk space they are using. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-allocation.html - */ - return function catAllocation (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, nodeId, node_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((node_id || nodeId) != null) { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'allocation' + '/' + encodeURIComponent(node_id || nodeId) - } else { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'allocation' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildCatAllocation diff --git a/api/api/cat.count.js b/api/api/cat.count.js deleted file mode 100644 index 10cf6fd2c..000000000 --- a/api/api/cat.count.js +++ /dev/null @@ -1,87 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildCatCount (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'format', - 'h', - 'help', - 's', - 'v', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a cat.count request - * Provides quick access to the document count of the entire cluster, or individual indices. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-count.html - */ - return function catCount (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((index) != null) { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'count' + '/' + encodeURIComponent(index) - } else { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'count' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildCatCount diff --git a/api/api/cat.fielddata.js b/api/api/cat.fielddata.js deleted file mode 100644 index e8767eb00..000000000 --- a/api/api/cat.fielddata.js +++ /dev/null @@ -1,89 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildCatFielddata (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'format', - 'bytes', - 'h', - 'help', - 's', - 'v', - 'fields', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a cat.fielddata request - * Shows how much heap memory is currently being used by fielddata on every data node in the cluster. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-fielddata.html - */ - return function catFielddata (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, fields, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((fields) != null) { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'fielddata' + '/' + encodeURIComponent(fields) - } else { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'fielddata' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildCatFielddata diff --git a/api/api/cat.health.js b/api/api/cat.health.js deleted file mode 100644 index c655333e7..000000000 --- a/api/api/cat.health.js +++ /dev/null @@ -1,84 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildCatHealth (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'format', - 'h', - 'help', - 's', - 'time', - 'ts', - 'v', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a cat.health request - * Returns a concise representation of the cluster health. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-health.html - */ - return function catHealth (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'health' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildCatHealth diff --git a/api/api/cat.help.js b/api/api/cat.help.js deleted file mode 100644 index d16f5ca1f..000000000 --- a/api/api/cat.help.js +++ /dev/null @@ -1,79 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildCatHelp (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'help', - 's', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a cat.help request - * Returns help for the Cat APIs. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/cat.html - */ - return function catHelp (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + '_cat' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildCatHelp diff --git a/api/api/cat.indices.js b/api/api/cat.indices.js deleted file mode 100644 index 53cabe4a3..000000000 --- a/api/api/cat.indices.js +++ /dev/null @@ -1,98 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildCatIndices (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'format', - 'bytes', - 'local', - 'master_timeout', - 'h', - 'health', - 'help', - 'pri', - 's', - 'time', - 'v', - 'include_unloaded_segments', - 'expand_wildcards', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - includeUnloadedSegments: 'include_unloaded_segments', - expandWildcards: 'expand_wildcards', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a cat.indices request - * Returns information about indices: number of primaries and replicas, document counts, disk size, ... - * https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-indices.html - */ - return function catIndices (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((index) != null) { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'indices' + '/' + encodeURIComponent(index) - } else { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'indices' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildCatIndices diff --git a/api/api/cat.master.js b/api/api/cat.master.js deleted file mode 100644 index 296a82aa5..000000000 --- a/api/api/cat.master.js +++ /dev/null @@ -1,85 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildCatMaster (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'format', - 'local', - 'master_timeout', - 'h', - 'help', - 's', - 'v', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a cat.master request - * Returns information about the master node. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-master.html - */ - return function catMaster (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'master' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildCatMaster diff --git a/api/api/cat.ml_data_frame_analytics.js b/api/api/cat.ml_data_frame_analytics.js deleted file mode 100644 index 18b071636..000000000 --- a/api/api/cat.ml_data_frame_analytics.js +++ /dev/null @@ -1,85 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildCatMlDataFrameAnalytics (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'allow_no_match', - 'bytes', - 'format', - 'h', - 'help', - 's', - 'time', - 'v' - ] - - const snakeCase = { - allowNoMatch: 'allow_no_match' - - } - - /** - * Perform a cat.ml_data_frame_analytics request - * Gets configuration and usage information about data frame analytics jobs. - * http://www.elastic.co/guide/en/elasticsearch/reference/current/cat-dfanalytics.html - */ - return function catMlDataFrameAnalytics (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((id) != null) { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'ml' + '/' + 'data_frame' + '/' + 'analytics' + '/' + encodeURIComponent(id) - } else { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'ml' + '/' + 'data_frame' + '/' + 'analytics' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildCatMlDataFrameAnalytics diff --git a/api/api/cat.ml_datafeeds.js b/api/api/cat.ml_datafeeds.js deleted file mode 100644 index 988d0b094..000000000 --- a/api/api/cat.ml_datafeeds.js +++ /dev/null @@ -1,84 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildCatMlDatafeeds (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'allow_no_datafeeds', - 'format', - 'h', - 'help', - 's', - 'time', - 'v' - ] - - const snakeCase = { - allowNoDatafeeds: 'allow_no_datafeeds' - - } - - /** - * Perform a cat.ml_datafeeds request - * Gets configuration and usage information about datafeeds. - * http://www.elastic.co/guide/en/elasticsearch/reference/current/cat-datafeeds.html - */ - return function catMlDatafeeds (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, datafeedId, datafeed_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((datafeed_id || datafeedId) != null) { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'ml' + '/' + 'datafeeds' + '/' + encodeURIComponent(datafeed_id || datafeedId) - } else { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'ml' + '/' + 'datafeeds' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildCatMlDatafeeds diff --git a/api/api/cat.ml_jobs.js b/api/api/cat.ml_jobs.js deleted file mode 100644 index 2311c796b..000000000 --- a/api/api/cat.ml_jobs.js +++ /dev/null @@ -1,85 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildCatMlJobs (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'allow_no_jobs', - 'bytes', - 'format', - 'h', - 'help', - 's', - 'time', - 'v' - ] - - const snakeCase = { - allowNoJobs: 'allow_no_jobs' - - } - - /** - * Perform a cat.ml_jobs request - * Gets configuration and usage information about anomaly detection jobs. - * http://www.elastic.co/guide/en/elasticsearch/reference/current/cat-anomaly-detectors.html - */ - return function catMlJobs (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, jobId, job_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((job_id || jobId) != null) { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) - } else { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'ml' + '/' + 'anomaly_detectors' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildCatMlJobs diff --git a/api/api/cat.ml_trained_models.js b/api/api/cat.ml_trained_models.js deleted file mode 100644 index 4249d5f19..000000000 --- a/api/api/cat.ml_trained_models.js +++ /dev/null @@ -1,87 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildCatMlTrainedModels (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'allow_no_match', - 'from', - 'size', - 'bytes', - 'format', - 'h', - 'help', - 's', - 'time', - 'v' - ] - - const snakeCase = { - allowNoMatch: 'allow_no_match' - - } - - /** - * Perform a cat.ml_trained_models request - * Gets configuration and usage information about inference trained models. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-trained-model.html - */ - return function catMlTrainedModels (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, modelId, model_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((model_id || modelId) != null) { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'ml' + '/' + 'trained_models' + '/' + encodeURIComponent(model_id || modelId) - } else { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'ml' + '/' + 'trained_models' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildCatMlTrainedModels diff --git a/api/api/cat.nodeattrs.js b/api/api/cat.nodeattrs.js deleted file mode 100644 index c59442ea5..000000000 --- a/api/api/cat.nodeattrs.js +++ /dev/null @@ -1,85 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildCatNodeattrs (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'format', - 'local', - 'master_timeout', - 'h', - 'help', - 's', - 'v', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a cat.nodeattrs request - * Returns information about custom node attributes. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-nodeattrs.html - */ - return function catNodeattrs (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'nodeattrs' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildCatNodeattrs diff --git a/api/api/cat.nodes.js b/api/api/cat.nodes.js deleted file mode 100644 index f6c45d534..000000000 --- a/api/api/cat.nodes.js +++ /dev/null @@ -1,88 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildCatNodes (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'bytes', - 'format', - 'full_id', - 'master_timeout', - 'h', - 'help', - 's', - 'time', - 'v', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - fullId: 'full_id', - masterTimeout: 'master_timeout', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a cat.nodes request - * Returns basic statistics about performance of cluster nodes. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-nodes.html - */ - return function catNodes (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'nodes' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildCatNodes diff --git a/api/api/cat.pending_tasks.js b/api/api/cat.pending_tasks.js deleted file mode 100644 index 10f6b5ab0..000000000 --- a/api/api/cat.pending_tasks.js +++ /dev/null @@ -1,86 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildCatPendingTasks (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'format', - 'local', - 'master_timeout', - 'h', - 'help', - 's', - 'time', - 'v', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a cat.pending_tasks request - * Returns a concise representation of the cluster pending tasks. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-pending-tasks.html - */ - return function catPendingTasks (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'pending_tasks' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildCatPendingTasks diff --git a/api/api/cat.plugins.js b/api/api/cat.plugins.js deleted file mode 100644 index 709bc13cf..000000000 --- a/api/api/cat.plugins.js +++ /dev/null @@ -1,85 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildCatPlugins (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'format', - 'local', - 'master_timeout', - 'h', - 'help', - 's', - 'v', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a cat.plugins request - * Returns information about installed plugins across nodes node. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-plugins.html - */ - return function catPlugins (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'plugins' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildCatPlugins diff --git a/api/api/cat.recovery.js b/api/api/cat.recovery.js deleted file mode 100644 index 7a75f6433..000000000 --- a/api/api/cat.recovery.js +++ /dev/null @@ -1,93 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildCatRecovery (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'format', - 'active_only', - 'bytes', - 'detailed', - 'h', - 'help', - 'index', - 's', - 'time', - 'v', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - activeOnly: 'active_only', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a cat.recovery request - * Returns information about index shard recoveries, both on-going completed. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-recovery.html - */ - return function catRecovery (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((index) != null) { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'recovery' + '/' + encodeURIComponent(index) - } else { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'recovery' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildCatRecovery diff --git a/api/api/cat.repositories.js b/api/api/cat.repositories.js deleted file mode 100644 index f00592fcf..000000000 --- a/api/api/cat.repositories.js +++ /dev/null @@ -1,85 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildCatRepositories (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'format', - 'local', - 'master_timeout', - 'h', - 'help', - 's', - 'v', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a cat.repositories request - * Returns information about snapshot repositories registered in the cluster. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-repositories.html - */ - return function catRepositories (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'repositories' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildCatRepositories diff --git a/api/api/cat.segments.js b/api/api/cat.segments.js deleted file mode 100644 index 89d2b5672..000000000 --- a/api/api/cat.segments.js +++ /dev/null @@ -1,88 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildCatSegments (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'format', - 'bytes', - 'h', - 'help', - 's', - 'v', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a cat.segments request - * Provides low-level information about the segments in the shards of an index. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-segments.html - */ - return function catSegments (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((index) != null) { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'segments' + '/' + encodeURIComponent(index) - } else { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'segments' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildCatSegments diff --git a/api/api/cat.shards.js b/api/api/cat.shards.js deleted file mode 100644 index 634d78cd0..000000000 --- a/api/api/cat.shards.js +++ /dev/null @@ -1,92 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildCatShards (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'format', - 'bytes', - 'local', - 'master_timeout', - 'h', - 'help', - 's', - 'time', - 'v', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a cat.shards request - * Provides a detailed view of shard allocation on nodes. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-shards.html - */ - return function catShards (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((index) != null) { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'shards' + '/' + encodeURIComponent(index) - } else { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'shards' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildCatShards diff --git a/api/api/cat.snapshots.js b/api/api/cat.snapshots.js deleted file mode 100644 index 78d8921b9..000000000 --- a/api/api/cat.snapshots.js +++ /dev/null @@ -1,92 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildCatSnapshots (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'format', - 'ignore_unavailable', - 'master_timeout', - 'h', - 'help', - 's', - 'time', - 'v', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - ignoreUnavailable: 'ignore_unavailable', - masterTimeout: 'master_timeout', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a cat.snapshots request - * Returns all snapshots in a specific repository. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-snapshots.html - */ - return function catSnapshots (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, repository, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((repository) != null) { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'snapshots' + '/' + encodeURIComponent(repository) - } else { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'snapshots' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildCatSnapshots diff --git a/api/api/cat.tasks.js b/api/api/cat.tasks.js deleted file mode 100644 index 1e0eda879..000000000 --- a/api/api/cat.tasks.js +++ /dev/null @@ -1,89 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildCatTasks (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'format', - 'node_id', - 'actions', - 'detailed', - 'parent_task', - 'h', - 'help', - 's', - 'time', - 'v', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - nodeId: 'node_id', - parentTask: 'parent_task', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a cat.tasks request - * Returns information about the tasks currently executing on one or more nodes in the cluster. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html - */ - return function catTasks (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'tasks' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildCatTasks diff --git a/api/api/cat.templates.js b/api/api/cat.templates.js deleted file mode 100644 index adfc17124..000000000 --- a/api/api/cat.templates.js +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildCatTemplates (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'format', - 'local', - 'master_timeout', - 'h', - 'help', - 's', - 'v', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a cat.templates request - * Returns information about existing templates. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-templates.html - */ - return function catTemplates (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((name) != null) { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'templates' + '/' + encodeURIComponent(name) - } else { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'templates' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildCatTemplates diff --git a/api/api/cat.thread_pool.js b/api/api/cat.thread_pool.js deleted file mode 100644 index e761b4593..000000000 --- a/api/api/cat.thread_pool.js +++ /dev/null @@ -1,92 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildCatThreadPool (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'format', - 'time', - 'local', - 'master_timeout', - 'h', - 'help', - 's', - 'v', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a cat.thread_pool request - * Returns cluster-wide thread pool statistics per node. -By default the active, queue and rejected statistics are returned for all thread pools. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-thread-pool.html - */ - return function catThreadPool (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, threadPoolPatterns, thread_pool_patterns, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((thread_pool_patterns || threadPoolPatterns) != null) { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'thread_pool' + '/' + encodeURIComponent(thread_pool_patterns || threadPoolPatterns) - } else { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'thread_pool' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildCatThreadPool diff --git a/api/api/cat.transforms.js b/api/api/cat.transforms.js deleted file mode 100644 index 446b4c953..000000000 --- a/api/api/cat.transforms.js +++ /dev/null @@ -1,86 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildCatTransforms (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'from', - 'size', - 'allow_no_match', - 'format', - 'h', - 'help', - 's', - 'time', - 'v' - ] - - const snakeCase = { - allowNoMatch: 'allow_no_match' - - } - - /** - * Perform a cat.transforms request - * Gets configuration and usage information about transforms. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-transforms.html - */ - return function catTransforms (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, transformId, transform_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((transform_id || transformId) != null) { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'transforms' + '/' + encodeURIComponent(transform_id || transformId) - } else { - if (method == null) method = 'GET' - path = '/' + '_cat' + '/' + 'transforms' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildCatTransforms diff --git a/api/api/ccr.delete_auto_follow_pattern.js b/api/api/ccr.delete_auto_follow_pattern.js deleted file mode 100644 index df97e2bbb..000000000 --- a/api/api/ccr.delete_auto_follow_pattern.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildCcrDeleteAutoFollowPattern (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a ccr.delete_auto_follow_pattern request - * Deletes auto-follow patterns. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-delete-auto-follow-pattern.html - */ - return function ccrDeleteAutoFollowPattern (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['name'] == null) { - const err = new ConfigurationError('Missing required parameter: name') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'DELETE' - path = '/' + '_ccr' + '/' + 'auto_follow' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildCcrDeleteAutoFollowPattern diff --git a/api/api/ccr.follow.js b/api/api/ccr.follow.js deleted file mode 100644 index a193fe80c..000000000 --- a/api/api/ccr.follow.js +++ /dev/null @@ -1,82 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildCcrFollow (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'wait_for_active_shards' - ] - - const snakeCase = { - waitForActiveShards: 'wait_for_active_shards' - } - - /** - * Perform a ccr.follow request - * Creates a new follower index configured to follow the referenced leader index. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-put-follow.html - */ - return function ccrFollow (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['index'] == null) { - const err = new ConfigurationError('Missing required parameter: index') - return handleError(err, callback) - } - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'PUT' - path = '/' + encodeURIComponent(index) + '/' + '_ccr' + '/' + 'follow' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildCcrFollow diff --git a/api/api/ccr.follow_info.js b/api/api/ccr.follow_info.js deleted file mode 100644 index d0c0c329a..000000000 --- a/api/api/ccr.follow_info.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildCcrFollowInfo (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a ccr.follow_info request - * Retrieves information about all follower indices, including parameters and status for each follower index - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-get-follow-info.html - */ - return function ccrFollowInfo (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['index'] == null) { - const err = new ConfigurationError('Missing required parameter: index') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + encodeURIComponent(index) + '/' + '_ccr' + '/' + 'info' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildCcrFollowInfo diff --git a/api/api/ccr.follow_stats.js b/api/api/ccr.follow_stats.js deleted file mode 100644 index 1a66b8645..000000000 --- a/api/api/ccr.follow_stats.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildCcrFollowStats (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a ccr.follow_stats request - * Retrieves follower stats. return shard-level stats about the following tasks associated with each shard for the specified indices. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-get-follow-stats.html - */ - return function ccrFollowStats (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['index'] == null) { - const err = new ConfigurationError('Missing required parameter: index') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + encodeURIComponent(index) + '/' + '_ccr' + '/' + 'stats' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildCcrFollowStats diff --git a/api/api/ccr.forget_follower.js b/api/api/ccr.forget_follower.js deleted file mode 100644 index b340ef9d2..000000000 --- a/api/api/ccr.forget_follower.js +++ /dev/null @@ -1,82 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildCcrForgetFollower (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a ccr.forget_follower request - * Removes the follower retention leases from the leader. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-post-forget-follower.html - */ - return function ccrForgetFollower (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['index'] == null) { - const err = new ConfigurationError('Missing required parameter: index') - return handleError(err, callback) - } - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_ccr' + '/' + 'forget_follower' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildCcrForgetFollower diff --git a/api/api/ccr.get_auto_follow_pattern.js b/api/api/ccr.get_auto_follow_pattern.js deleted file mode 100644 index fbc05aaba..000000000 --- a/api/api/ccr.get_auto_follow_pattern.js +++ /dev/null @@ -1,77 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildCcrGetAutoFollowPattern (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a ccr.get_auto_follow_pattern request - * Gets configured auto-follow patterns. Returns the specified auto-follow pattern collection. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-get-auto-follow-pattern.html - */ - return function ccrGetAutoFollowPattern (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((name) != null) { - if (method == null) method = 'GET' - path = '/' + '_ccr' + '/' + 'auto_follow' + '/' + encodeURIComponent(name) - } else { - if (method == null) method = 'GET' - path = '/' + '_ccr' + '/' + 'auto_follow' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildCcrGetAutoFollowPattern diff --git a/api/api/ccr.pause_auto_follow_pattern.js b/api/api/ccr.pause_auto_follow_pattern.js deleted file mode 100644 index 1d3f2dbad..000000000 --- a/api/api/ccr.pause_auto_follow_pattern.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildCcrPauseAutoFollowPattern (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a ccr.pause_auto_follow_pattern request - * Pauses an auto-follow pattern - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-pause-auto-follow-pattern.html - */ - return function ccrPauseAutoFollowPattern (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['name'] == null) { - const err = new ConfigurationError('Missing required parameter: name') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_ccr' + '/' + 'auto_follow' + '/' + encodeURIComponent(name) + '/' + 'pause' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildCcrPauseAutoFollowPattern diff --git a/api/api/ccr.pause_follow.js b/api/api/ccr.pause_follow.js deleted file mode 100644 index 9756389e9..000000000 --- a/api/api/ccr.pause_follow.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildCcrPauseFollow (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a ccr.pause_follow request - * Pauses a follower index. The follower index will not fetch any additional operations from the leader index. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-post-pause-follow.html - */ - return function ccrPauseFollow (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['index'] == null) { - const err = new ConfigurationError('Missing required parameter: index') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_ccr' + '/' + 'pause_follow' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildCcrPauseFollow diff --git a/api/api/ccr.put_auto_follow_pattern.js b/api/api/ccr.put_auto_follow_pattern.js deleted file mode 100644 index 55e2bde6e..000000000 --- a/api/api/ccr.put_auto_follow_pattern.js +++ /dev/null @@ -1,82 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildCcrPutAutoFollowPattern (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a ccr.put_auto_follow_pattern request - * Creates a new named collection of auto-follow patterns against a specified remote cluster. Newly created indices on the remote cluster matching any of the specified patterns will be automatically configured as follower indices. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-put-auto-follow-pattern.html - */ - return function ccrPutAutoFollowPattern (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['name'] == null) { - const err = new ConfigurationError('Missing required parameter: name') - return handleError(err, callback) - } - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'PUT' - path = '/' + '_ccr' + '/' + 'auto_follow' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildCcrPutAutoFollowPattern diff --git a/api/api/ccr.resume_auto_follow_pattern.js b/api/api/ccr.resume_auto_follow_pattern.js deleted file mode 100644 index d2139af1e..000000000 --- a/api/api/ccr.resume_auto_follow_pattern.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildCcrResumeAutoFollowPattern (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a ccr.resume_auto_follow_pattern request - * Resumes an auto-follow pattern that has been paused - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-resume-auto-follow-pattern.html - */ - return function ccrResumeAutoFollowPattern (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['name'] == null) { - const err = new ConfigurationError('Missing required parameter: name') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_ccr' + '/' + 'auto_follow' + '/' + encodeURIComponent(name) + '/' + 'resume' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildCcrResumeAutoFollowPattern diff --git a/api/api/ccr.resume_follow.js b/api/api/ccr.resume_follow.js deleted file mode 100644 index 164bdd453..000000000 --- a/api/api/ccr.resume_follow.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildCcrResumeFollow (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a ccr.resume_follow request - * Resumes a follower index that has been paused - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-post-resume-follow.html - */ - return function ccrResumeFollow (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['index'] == null) { - const err = new ConfigurationError('Missing required parameter: index') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_ccr' + '/' + 'resume_follow' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildCcrResumeFollow diff --git a/api/api/ccr.stats.js b/api/api/ccr.stats.js deleted file mode 100644 index fc067a5cb..000000000 --- a/api/api/ccr.stats.js +++ /dev/null @@ -1,72 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildCcrStats (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a ccr.stats request - * Gets all stats related to cross-cluster replication. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-get-stats.html - */ - return function ccrStats (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + '_ccr' + '/' + 'stats' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildCcrStats diff --git a/api/api/ccr.unfollow.js b/api/api/ccr.unfollow.js deleted file mode 100644 index 629d73352..000000000 --- a/api/api/ccr.unfollow.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildCcrUnfollow (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a ccr.unfollow request - * Stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-post-unfollow.html - */ - return function ccrUnfollow (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['index'] == null) { - const err = new ConfigurationError('Missing required parameter: index') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_ccr' + '/' + 'unfollow' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildCcrUnfollow diff --git a/api/api/clear_scroll.js b/api/api/clear_scroll.js deleted file mode 100644 index ba3fef02f..000000000 --- a/api/api/clear_scroll.js +++ /dev/null @@ -1,82 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildClearScroll (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a clear_scroll request - * Explicitly clears the search context for a scroll. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/search-request-body.html#_clear_scroll_api - */ - return function clearScroll (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, scrollId, scroll_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((scroll_id || scrollId) != null) { - if (method == null) method = 'DELETE' - path = '/' + '_search' + '/' + 'scroll' + '/' + encodeURIComponent(scroll_id || scrollId) - } else { - if (method == null) method = 'DELETE' - path = '/' + '_search' + '/' + 'scroll' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildClearScroll diff --git a/api/api/cluster.allocation_explain.js b/api/api/cluster.allocation_explain.js deleted file mode 100644 index c17b40213..000000000 --- a/api/api/cluster.allocation_explain.js +++ /dev/null @@ -1,81 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildClusterAllocationExplain (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'include_yes_decisions', - 'include_disk_info', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - includeYesDecisions: 'include_yes_decisions', - includeDiskInfo: 'include_disk_info', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a cluster.allocation_explain request - * Provides explanations for shard allocations in the cluster. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-allocation-explain.html - */ - return function clusterAllocationExplain (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_cluster' + '/' + 'allocation' + '/' + 'explain' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildClusterAllocationExplain diff --git a/api/api/cluster.delete_component_template.js b/api/api/cluster.delete_component_template.js deleted file mode 100644 index d47afdd27..000000000 --- a/api/api/cluster.delete_component_template.js +++ /dev/null @@ -1,86 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildClusterDeleteComponentTemplate (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'timeout', - 'master_timeout', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a cluster.delete_component_template request - * Deletes a component template - * https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-component-template.html - */ - return function clusterDeleteComponentTemplate (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['name'] == null) { - const err = new ConfigurationError('Missing required parameter: name') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'DELETE' - path = '/' + '_component_template' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildClusterDeleteComponentTemplate diff --git a/api/api/cluster.delete_voting_config_exclusions.js b/api/api/cluster.delete_voting_config_exclusions.js deleted file mode 100644 index fce5bfd28..000000000 --- a/api/api/cluster.delete_voting_config_exclusions.js +++ /dev/null @@ -1,79 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildClusterDeleteVotingConfigExclusions (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'wait_for_removal', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - waitForRemoval: 'wait_for_removal', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a cluster.delete_voting_config_exclusions request - * Clears cluster voting config exclusions. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/voting-config-exclusions.html - */ - return function clusterDeleteVotingConfigExclusions (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'DELETE' - path = '/' + '_cluster' + '/' + 'voting_config_exclusions' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildClusterDeleteVotingConfigExclusions diff --git a/api/api/cluster.exists_component_template.js b/api/api/cluster.exists_component_template.js deleted file mode 100644 index cfdb42494..000000000 --- a/api/api/cluster.exists_component_template.js +++ /dev/null @@ -1,86 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildClusterExistsComponentTemplate (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'master_timeout', - 'local', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a cluster.exists_component_template request - * Returns information about whether a particular component template exist - * https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-component-template.html - */ - return function clusterExistsComponentTemplate (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['name'] == null) { - const err = new ConfigurationError('Missing required parameter: name') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'HEAD' - path = '/' + '_component_template' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildClusterExistsComponentTemplate diff --git a/api/api/cluster.get_component_template.js b/api/api/cluster.get_component_template.js deleted file mode 100644 index 8a6d3030f..000000000 --- a/api/api/cluster.get_component_template.js +++ /dev/null @@ -1,85 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildClusterGetComponentTemplate (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'master_timeout', - 'local', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a cluster.get_component_template request - * Returns one or more component templates - * https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-component-template.html - */ - return function clusterGetComponentTemplate (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((name) != null) { - if (method == null) method = 'GET' - path = '/' + '_component_template' + '/' + encodeURIComponent(name) - } else { - if (method == null) method = 'GET' - path = '/' + '_component_template' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildClusterGetComponentTemplate diff --git a/api/api/cluster.get_settings.js b/api/api/cluster.get_settings.js deleted file mode 100644 index 3ba0d4d9f..000000000 --- a/api/api/cluster.get_settings.js +++ /dev/null @@ -1,84 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildClusterGetSettings (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'flat_settings', - 'master_timeout', - 'timeout', - 'include_defaults', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - flatSettings: 'flat_settings', - masterTimeout: 'master_timeout', - includeDefaults: 'include_defaults', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a cluster.get_settings request - * Returns cluster settings. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-update-settings.html - */ - return function clusterGetSettings (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + '_cluster' + '/' + 'settings' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildClusterGetSettings diff --git a/api/api/cluster.health.js b/api/api/cluster.health.js deleted file mode 100644 index 077dd1a89..000000000 --- a/api/api/cluster.health.js +++ /dev/null @@ -1,101 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildClusterHealth (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'expand_wildcards', - 'level', - 'local', - 'master_timeout', - 'timeout', - 'wait_for_active_shards', - 'wait_for_nodes', - 'wait_for_events', - 'wait_for_no_relocating_shards', - 'wait_for_no_initializing_shards', - 'wait_for_status', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - expandWildcards: 'expand_wildcards', - masterTimeout: 'master_timeout', - waitForActiveShards: 'wait_for_active_shards', - waitForNodes: 'wait_for_nodes', - waitForEvents: 'wait_for_events', - waitForNoRelocatingShards: 'wait_for_no_relocating_shards', - waitForNoInitializingShards: 'wait_for_no_initializing_shards', - waitForStatus: 'wait_for_status', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a cluster.health request - * Returns basic information about the health of the cluster. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-health.html - */ - return function clusterHealth (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((index) != null) { - if (method == null) method = 'GET' - path = '/' + '_cluster' + '/' + 'health' + '/' + encodeURIComponent(index) - } else { - if (method == null) method = 'GET' - path = '/' + '_cluster' + '/' + 'health' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildClusterHealth diff --git a/api/api/cluster.pending_tasks.js b/api/api/cluster.pending_tasks.js deleted file mode 100644 index bde8cd9d8..000000000 --- a/api/api/cluster.pending_tasks.js +++ /dev/null @@ -1,81 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildClusterPendingTasks (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'local', - 'master_timeout', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a cluster.pending_tasks request - * Returns a list of any cluster-level changes (e.g. create index, update mapping, -allocate or fail shard) which have not yet been executed. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-pending.html - */ - return function clusterPendingTasks (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + '_cluster' + '/' + 'pending_tasks' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildClusterPendingTasks diff --git a/api/api/cluster.post_voting_config_exclusions.js b/api/api/cluster.post_voting_config_exclusions.js deleted file mode 100644 index 14cd90379..000000000 --- a/api/api/cluster.post_voting_config_exclusions.js +++ /dev/null @@ -1,82 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildClusterPostVotingConfigExclusions (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'node_ids', - 'node_names', - 'timeout', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - nodeIds: 'node_ids', - nodeNames: 'node_names', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a cluster.post_voting_config_exclusions request - * Updates the cluster voting config exclusions by node ids or node names. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/voting-config-exclusions.html - */ - return function clusterPostVotingConfigExclusions (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_cluster' + '/' + 'voting_config_exclusions' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildClusterPostVotingConfigExclusions diff --git a/api/api/cluster.put_component_template.js b/api/api/cluster.put_component_template.js deleted file mode 100644 index 6dbcacf9f..000000000 --- a/api/api/cluster.put_component_template.js +++ /dev/null @@ -1,91 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildClusterPutComponentTemplate (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'create', - 'timeout', - 'master_timeout', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a cluster.put_component_template request - * Creates or updates a component template - * https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-component-template.html - */ - return function clusterPutComponentTemplate (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['name'] == null) { - const err = new ConfigurationError('Missing required parameter: name') - return handleError(err, callback) - } - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'PUT' - path = '/' + '_component_template' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildClusterPutComponentTemplate diff --git a/api/api/cluster.put_settings.js b/api/api/cluster.put_settings.js deleted file mode 100644 index d49a6261a..000000000 --- a/api/api/cluster.put_settings.js +++ /dev/null @@ -1,88 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildClusterPutSettings (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'flat_settings', - 'master_timeout', - 'timeout', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - flatSettings: 'flat_settings', - masterTimeout: 'master_timeout', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a cluster.put_settings request - * Updates the cluster settings. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-update-settings.html - */ - return function clusterPutSettings (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'PUT' - path = '/' + '_cluster' + '/' + 'settings' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildClusterPutSettings diff --git a/api/api/cluster.remote_info.js b/api/api/cluster.remote_info.js deleted file mode 100644 index a18e6d6dc..000000000 --- a/api/api/cluster.remote_info.js +++ /dev/null @@ -1,77 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildClusterRemoteInfo (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a cluster.remote_info request - * Returns the information about configured remote clusters. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-remote-info.html - */ - return function clusterRemoteInfo (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + '_remote' + '/' + 'info' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildClusterRemoteInfo diff --git a/api/api/cluster.reroute.js b/api/api/cluster.reroute.js deleted file mode 100644 index 6b00af451..000000000 --- a/api/api/cluster.reroute.js +++ /dev/null @@ -1,86 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildClusterReroute (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'dry_run', - 'explain', - 'retry_failed', - 'metric', - 'master_timeout', - 'timeout', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - dryRun: 'dry_run', - retryFailed: 'retry_failed', - masterTimeout: 'master_timeout', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a cluster.reroute request - * Allows to manually change the allocation of individual shards in the cluster. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-reroute.html - */ - return function clusterReroute (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_cluster' + '/' + 'reroute' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildClusterReroute diff --git a/api/api/cluster.state.js b/api/api/cluster.state.js deleted file mode 100644 index bc54b2a0e..000000000 --- a/api/api/cluster.state.js +++ /dev/null @@ -1,106 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildClusterState (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'local', - 'master_timeout', - 'flat_settings', - 'wait_for_metadata_version', - 'wait_for_timeout', - 'ignore_unavailable', - 'allow_no_indices', - 'expand_wildcards', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - flatSettings: 'flat_settings', - waitForMetadataVersion: 'wait_for_metadata_version', - waitForTimeout: 'wait_for_timeout', - ignoreUnavailable: 'ignore_unavailable', - allowNoIndices: 'allow_no_indices', - expandWildcards: 'expand_wildcards', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a cluster.state request - * Returns a comprehensive information about the state of the cluster. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-state.html - */ - return function clusterState (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required url components - if (params['index'] != null && (params['metric'] == null)) { - const err = new ConfigurationError('Missing required parameter of the url: metric') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, metric, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((metric) != null && (index) != null) { - if (method == null) method = 'GET' - path = '/' + '_cluster' + '/' + 'state' + '/' + encodeURIComponent(metric) + '/' + encodeURIComponent(index) - } else if ((metric) != null) { - if (method == null) method = 'GET' - path = '/' + '_cluster' + '/' + 'state' + '/' + encodeURIComponent(metric) - } else { - if (method == null) method = 'GET' - path = '/' + '_cluster' + '/' + 'state' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildClusterState diff --git a/api/api/cluster.stats.js b/api/api/cluster.stats.js deleted file mode 100644 index be39dc291..000000000 --- a/api/api/cluster.stats.js +++ /dev/null @@ -1,85 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildClusterStats (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'flat_settings', - 'timeout', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - flatSettings: 'flat_settings', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a cluster.stats request - * Returns high-level overview of cluster statistics. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-stats.html - */ - return function clusterStats (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, nodeId, node_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((node_id || nodeId) != null) { - if (method == null) method = 'GET' - path = '/' + '_cluster' + '/' + 'stats' + '/' + 'nodes' + '/' + encodeURIComponent(node_id || nodeId) - } else { - if (method == null) method = 'GET' - path = '/' + '_cluster' + '/' + 'stats' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildClusterStats diff --git a/api/api/count.js b/api/api/count.js deleted file mode 100644 index eada42c7a..000000000 --- a/api/api/count.js +++ /dev/null @@ -1,104 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildCount (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'ignore_unavailable', - 'ignore_throttled', - 'allow_no_indices', - 'expand_wildcards', - 'min_score', - 'preference', - 'routing', - 'q', - 'analyzer', - 'analyze_wildcard', - 'default_operator', - 'df', - 'lenient', - 'terminate_after', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - ignoreUnavailable: 'ignore_unavailable', - ignoreThrottled: 'ignore_throttled', - allowNoIndices: 'allow_no_indices', - expandWildcards: 'expand_wildcards', - minScore: 'min_score', - analyzeWildcard: 'analyze_wildcard', - defaultOperator: 'default_operator', - terminateAfter: 'terminate_after', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a count request - * Returns number of documents matching a query. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/search-count.html - */ - return function count (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((index) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_count' - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_count' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildCount diff --git a/api/api/create.js b/api/api/create.js deleted file mode 100644 index d9bedb3d9..000000000 --- a/api/api/create.js +++ /dev/null @@ -1,107 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildCreate (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'wait_for_active_shards', - 'refresh', - 'routing', - 'timeout', - 'version', - 'version_type', - 'pipeline', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - waitForActiveShards: 'wait_for_active_shards', - versionType: 'version_type', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a create request - * Creates a new document in the index. - -Returns a 409 response when a document with a same ID already exists in the index. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-index_.html - */ - return function create (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['id'] == null) { - const err = new ConfigurationError('Missing required parameter: id') - return handleError(err, callback) - } - if (params['index'] == null) { - const err = new ConfigurationError('Missing required parameter: index') - return handleError(err, callback) - } - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, id, index, type, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((index) != null && (type) != null && (id) != null) { - if (method == null) method = 'PUT' - path = '/' + encodeURIComponent(index) + '/' + encodeURIComponent(type) + '/' + encodeURIComponent(id) + '/' + '_create' - } else { - if (method == null) method = 'PUT' - path = '/' + encodeURIComponent(index) + '/' + '_create' + '/' + encodeURIComponent(id) - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildCreate diff --git a/api/api/data_frame_transform_deprecated.delete_transform.js b/api/api/data_frame_transform_deprecated.delete_transform.js deleted file mode 100644 index a0de655cd..000000000 --- a/api/api/data_frame_transform_deprecated.delete_transform.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildDataFrameTransformDeprecatedDeleteTransform (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'force' - ] - - const snakeCase = { - - } - - /** - * Perform a data_frame_transform_deprecated.delete_transform request - * Deletes an existing transform. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-transform.html - */ - return function dataFrameTransformDeprecatedDeleteTransform (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['transform_id'] == null && params['transformId'] == null) { - const err = new ConfigurationError('Missing required parameter: transform_id or transformId') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, transformId, transform_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'DELETE' - path = '/' + '_data_frame' + '/' + 'transforms' + '/' + encodeURIComponent(transform_id || transformId) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildDataFrameTransformDeprecatedDeleteTransform diff --git a/api/api/data_frame_transform_deprecated.get_transform.js b/api/api/data_frame_transform_deprecated.get_transform.js deleted file mode 100644 index d1ace6cdd..000000000 --- a/api/api/data_frame_transform_deprecated.get_transform.js +++ /dev/null @@ -1,74 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildDataFrameTransformDeprecatedGetTransform (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'from', - 'size', - 'allow_no_match' - ] - - const snakeCase = { - allowNoMatch: 'allow_no_match' - } - - /** - * Perform a data_frame_transform_deprecated.get_transform request - * Retrieves configuration information for transforms. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/get-transform.html - */ - return function dataFrameTransformDeprecatedGetTransform (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, transformId, transform_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + '_data_frame' + '/' + 'transforms' + '/' + encodeURIComponent(transform_id || transformId) - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildDataFrameTransformDeprecatedGetTransform diff --git a/api/api/data_frame_transform_deprecated.get_transform_stats.js b/api/api/data_frame_transform_deprecated.get_transform_stats.js deleted file mode 100644 index c0eea1fe4..000000000 --- a/api/api/data_frame_transform_deprecated.get_transform_stats.js +++ /dev/null @@ -1,80 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildDataFrameTransformDeprecatedGetTransformStats (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'from', - 'size', - 'allow_no_match' - ] - - const snakeCase = { - allowNoMatch: 'allow_no_match' - } - - /** - * Perform a data_frame_transform_deprecated.get_transform_stats request - * Retrieves usage information for transforms. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/get-transform-stats.html - */ - return function dataFrameTransformDeprecatedGetTransformStats (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['transform_id'] == null && params['transformId'] == null) { - const err = new ConfigurationError('Missing required parameter: transform_id or transformId') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, transformId, transform_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + '_data_frame' + '/' + 'transforms' + '/' + encodeURIComponent(transform_id || transformId) + '/' + '_stats' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildDataFrameTransformDeprecatedGetTransformStats diff --git a/api/api/data_frame_transform_deprecated.preview_transform.js b/api/api/data_frame_transform_deprecated.preview_transform.js deleted file mode 100644 index 3a601c48c..000000000 --- a/api/api/data_frame_transform_deprecated.preview_transform.js +++ /dev/null @@ -1,75 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildDataFrameTransformDeprecatedPreviewTransform (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a data_frame_transform_deprecated.preview_transform request - * Previews a transform. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/preview-transform.html - */ - return function dataFrameTransformDeprecatedPreviewTransform (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildDataFrameTransformDeprecatedPreviewTransform diff --git a/api/api/data_frame_transform_deprecated.put_transform.js b/api/api/data_frame_transform_deprecated.put_transform.js deleted file mode 100644 index 636d656a3..000000000 --- a/api/api/data_frame_transform_deprecated.put_transform.js +++ /dev/null @@ -1,82 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildDataFrameTransformDeprecatedPutTransform (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'defer_validation' - ] - - const snakeCase = { - deferValidation: 'defer_validation' - } - - /** - * Perform a data_frame_transform_deprecated.put_transform request - * Instantiates a transform. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/put-transform.html - */ - return function dataFrameTransformDeprecatedPutTransform (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['transform_id'] == null && params['transformId'] == null) { - const err = new ConfigurationError('Missing required parameter: transform_id or transformId') - return handleError(err, callback) - } - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, transformId, transform_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'PUT' - path = '/' + '_data_frame' + '/' + 'transforms' + '/' + encodeURIComponent(transform_id || transformId) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildDataFrameTransformDeprecatedPutTransform diff --git a/api/api/data_frame_transform_deprecated.start_transform.js b/api/api/data_frame_transform_deprecated.start_transform.js deleted file mode 100644 index eac2c67ad..000000000 --- a/api/api/data_frame_transform_deprecated.start_transform.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildDataFrameTransformDeprecatedStartTransform (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'timeout' - ] - - const snakeCase = { - - } - - /** - * Perform a data_frame_transform_deprecated.start_transform request - * Starts one or more transforms. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/start-transform.html - */ - return function dataFrameTransformDeprecatedStartTransform (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['transform_id'] == null && params['transformId'] == null) { - const err = new ConfigurationError('Missing required parameter: transform_id or transformId') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, transformId, transform_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_data_frame' + '/' + 'transforms' + '/' + encodeURIComponent(transform_id || transformId) + '/' + '_start' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildDataFrameTransformDeprecatedStartTransform diff --git a/api/api/data_frame_transform_deprecated.stop_transform.js b/api/api/data_frame_transform_deprecated.stop_transform.js deleted file mode 100644 index 48403d42b..000000000 --- a/api/api/data_frame_transform_deprecated.stop_transform.js +++ /dev/null @@ -1,81 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildDataFrameTransformDeprecatedStopTransform (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'wait_for_completion', - 'timeout', - 'allow_no_match' - ] - - const snakeCase = { - waitForCompletion: 'wait_for_completion', - allowNoMatch: 'allow_no_match' - } - - /** - * Perform a data_frame_transform_deprecated.stop_transform request - * Stops one or more transforms. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/stop-transform.html - */ - return function dataFrameTransformDeprecatedStopTransform (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['transform_id'] == null && params['transformId'] == null) { - const err = new ConfigurationError('Missing required parameter: transform_id or transformId') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, transformId, transform_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_data_frame' + '/' + 'transforms' + '/' + encodeURIComponent(transform_id || transformId) + '/' + '_stop' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildDataFrameTransformDeprecatedStopTransform diff --git a/api/api/data_frame_transform_deprecated.update_transform.js b/api/api/data_frame_transform_deprecated.update_transform.js deleted file mode 100644 index 2f4a887fe..000000000 --- a/api/api/data_frame_transform_deprecated.update_transform.js +++ /dev/null @@ -1,82 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildDataFrameTransformDeprecatedUpdateTransform (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'defer_validation' - ] - - const snakeCase = { - deferValidation: 'defer_validation' - } - - /** - * Perform a data_frame_transform_deprecated.update_transform request - * Updates certain properties of a transform. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/update-transform.html - */ - return function dataFrameTransformDeprecatedUpdateTransform (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['transform_id'] == null && params['transformId'] == null) { - const err = new ConfigurationError('Missing required parameter: transform_id or transformId') - return handleError(err, callback) - } - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, transformId, transform_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_data_frame' + '/' + 'transforms' + '/' + encodeURIComponent(transform_id || transformId) + '/' + '_update' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildDataFrameTransformDeprecatedUpdateTransform diff --git a/api/api/delete.js b/api/api/delete.js deleted file mode 100644 index 146ec5d63..000000000 --- a/api/api/delete.js +++ /dev/null @@ -1,104 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildDelete (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'wait_for_active_shards', - 'refresh', - 'routing', - 'timeout', - 'if_seq_no', - 'if_primary_term', - 'version', - 'version_type', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - waitForActiveShards: 'wait_for_active_shards', - ifSeqNo: 'if_seq_no', - ifPrimaryTerm: 'if_primary_term', - versionType: 'version_type', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a delete request - * Removes a document from the index. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete.html - */ - return function _delete (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['id'] == null) { - const err = new ConfigurationError('Missing required parameter: id') - return handleError(err, callback) - } - if (params['index'] == null) { - const err = new ConfigurationError('Missing required parameter: index') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, id, index, type, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((index) != null && (type) != null && (id) != null) { - if (method == null) method = 'DELETE' - path = '/' + encodeURIComponent(index) + '/' + encodeURIComponent(type) + '/' + encodeURIComponent(id) - } else { - if (method == null) method = 'DELETE' - path = '/' + encodeURIComponent(index) + '/' + '_doc' + '/' + encodeURIComponent(id) - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildDelete diff --git a/api/api/delete_by_query.js b/api/api/delete_by_query.js deleted file mode 100644 index c19fe1e68..000000000 --- a/api/api/delete_by_query.js +++ /dev/null @@ -1,139 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildDeleteByQuery (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'analyzer', - 'analyze_wildcard', - 'default_operator', - 'df', - 'from', - 'ignore_unavailable', - 'allow_no_indices', - 'conflicts', - 'expand_wildcards', - 'lenient', - 'preference', - 'q', - 'routing', - 'scroll', - 'search_type', - 'search_timeout', - 'max_docs', - 'sort', - '_source', - '_source_excludes', - '_source_exclude', - '_source_includes', - '_source_include', - 'terminate_after', - 'stats', - 'version', - 'request_cache', - 'refresh', - 'timeout', - 'wait_for_active_shards', - 'scroll_size', - 'wait_for_completion', - 'requests_per_second', - 'slices', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - analyzeWildcard: 'analyze_wildcard', - defaultOperator: 'default_operator', - ignoreUnavailable: 'ignore_unavailable', - allowNoIndices: 'allow_no_indices', - expandWildcards: 'expand_wildcards', - searchType: 'search_type', - searchTimeout: 'search_timeout', - maxDocs: 'max_docs', - _sourceExcludes: '_source_excludes', - _sourceExclude: '_source_exclude', - _sourceIncludes: '_source_includes', - _sourceInclude: '_source_include', - terminateAfter: 'terminate_after', - requestCache: 'request_cache', - waitForActiveShards: 'wait_for_active_shards', - scrollSize: 'scroll_size', - waitForCompletion: 'wait_for_completion', - requestsPerSecond: 'requests_per_second', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a delete_by_query request - * Deletes documents matching the provided query. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete-by-query.html - */ - return function deleteByQuery (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['index'] == null) { - const err = new ConfigurationError('Missing required parameter: index') - return handleError(err, callback) - } - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_delete_by_query' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildDeleteByQuery diff --git a/api/api/delete_by_query_rethrottle.js b/api/api/delete_by_query_rethrottle.js deleted file mode 100644 index 066823a68..000000000 --- a/api/api/delete_by_query_rethrottle.js +++ /dev/null @@ -1,89 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildDeleteByQueryRethrottle (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'requests_per_second', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - requestsPerSecond: 'requests_per_second', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a delete_by_query_rethrottle request - * Changes the number of requests per second for a particular Delete By Query operation. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete-by-query.html - */ - return function deleteByQueryRethrottle (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['task_id'] == null && params['taskId'] == null) { - const err = new ConfigurationError('Missing required parameter: task_id or taskId') - return handleError(err, callback) - } - if (params['requests_per_second'] == null && params['requestsPerSecond'] == null) { - const err = new ConfigurationError('Missing required parameter: requests_per_second or requestsPerSecond') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, taskId, task_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_delete_by_query' + '/' + encodeURIComponent(task_id || taskId) + '/' + '_rethrottle' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildDeleteByQueryRethrottle diff --git a/api/api/delete_script.js b/api/api/delete_script.js deleted file mode 100644 index edf6ed203..000000000 --- a/api/api/delete_script.js +++ /dev/null @@ -1,86 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildDeleteScript (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'timeout', - 'master_timeout', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a delete_script request - * Deletes a script. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html - */ - return function deleteScript (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['id'] == null) { - const err = new ConfigurationError('Missing required parameter: id') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'DELETE' - path = '/' + '_scripts' + '/' + encodeURIComponent(id) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildDeleteScript diff --git a/api/api/enrich.delete_policy.js b/api/api/enrich.delete_policy.js deleted file mode 100644 index 790e0aec4..000000000 --- a/api/api/enrich.delete_policy.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildEnrichDeletePolicy (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a enrich.delete_policy request - * Deletes an existing enrich policy and its enrich index. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-enrich-policy-api.html - */ - return function enrichDeletePolicy (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['name'] == null) { - const err = new ConfigurationError('Missing required parameter: name') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'DELETE' - path = '/' + '_enrich' + '/' + 'policy' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildEnrichDeletePolicy diff --git a/api/api/enrich.execute_policy.js b/api/api/enrich.execute_policy.js deleted file mode 100644 index 76dc6555b..000000000 --- a/api/api/enrich.execute_policy.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildEnrichExecutePolicy (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'wait_for_completion' - ] - - const snakeCase = { - waitForCompletion: 'wait_for_completion' - } - - /** - * Perform a enrich.execute_policy request - * Creates the enrich index for an existing enrich policy. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/execute-enrich-policy-api.html - */ - return function enrichExecutePolicy (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['name'] == null) { - const err = new ConfigurationError('Missing required parameter: name') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'PUT' - path = '/' + '_enrich' + '/' + 'policy' + '/' + encodeURIComponent(name) + '/' + '_execute' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildEnrichExecutePolicy diff --git a/api/api/enrich.get_policy.js b/api/api/enrich.get_policy.js deleted file mode 100644 index 66dc936c3..000000000 --- a/api/api/enrich.get_policy.js +++ /dev/null @@ -1,77 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildEnrichGetPolicy (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a enrich.get_policy request - * Gets information about an enrich policy. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/get-enrich-policy-api.html - */ - return function enrichGetPolicy (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((name) != null) { - if (method == null) method = 'GET' - path = '/' + '_enrich' + '/' + 'policy' + '/' + encodeURIComponent(name) - } else { - if (method == null) method = 'GET' - path = '/' + '_enrich' + '/' + 'policy' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildEnrichGetPolicy diff --git a/api/api/enrich.put_policy.js b/api/api/enrich.put_policy.js deleted file mode 100644 index 8e5ce4423..000000000 --- a/api/api/enrich.put_policy.js +++ /dev/null @@ -1,82 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildEnrichPutPolicy (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a enrich.put_policy request - * Creates a new enrich policy. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/put-enrich-policy-api.html - */ - return function enrichPutPolicy (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['name'] == null) { - const err = new ConfigurationError('Missing required parameter: name') - return handleError(err, callback) - } - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'PUT' - path = '/' + '_enrich' + '/' + 'policy' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildEnrichPutPolicy diff --git a/api/api/enrich.stats.js b/api/api/enrich.stats.js deleted file mode 100644 index eef1acb3a..000000000 --- a/api/api/enrich.stats.js +++ /dev/null @@ -1,72 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildEnrichStats (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a enrich.stats request - * Gets enrich coordinator statistics and information about enrich policies that are currently executing. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/enrich-stats-api.html - */ - return function enrichStats (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + '_enrich' + '/' + '_stats' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildEnrichStats diff --git a/api/api/eql.search.js b/api/api/eql.search.js deleted file mode 100644 index 576846fce..000000000 --- a/api/api/eql.search.js +++ /dev/null @@ -1,82 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildEqlSearch (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a eql.search request - * Returns results matching a query expressed in Event Query Language (EQL) - * https://www.elastic.co/guide/en/elasticsearch/reference/current/eql-search-api.html - */ - return function eqlSearch (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['index'] == null) { - const err = new ConfigurationError('Missing required parameter: index') - return handleError(err, callback) - } - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_eql' + '/' + 'search' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildEqlSearch diff --git a/api/api/exists.js b/api/api/exists.js deleted file mode 100644 index 7f24bef7e..000000000 --- a/api/api/exists.js +++ /dev/null @@ -1,105 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildExists (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'stored_fields', - 'preference', - 'realtime', - 'refresh', - 'routing', - '_source', - '_source_excludes', - '_source_exclude', - '_source_includes', - '_source_include', - 'version', - 'version_type', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - storedFields: 'stored_fields', - _sourceExcludes: '_source_excludes', - _sourceExclude: '_source_exclude', - _sourceIncludes: '_source_includes', - _sourceInclude: '_source_include', - versionType: 'version_type', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a exists request - * Returns information about whether a document exists in an index. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html - */ - return function exists (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['id'] == null) { - const err = new ConfigurationError('Missing required parameter: id') - return handleError(err, callback) - } - if (params['index'] == null) { - const err = new ConfigurationError('Missing required parameter: index') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, id, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'HEAD' - path = '/' + encodeURIComponent(index) + '/' + '_doc' + '/' + encodeURIComponent(id) - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildExists diff --git a/api/api/exists_source.js b/api/api/exists_source.js deleted file mode 100644 index fab3f6d85..000000000 --- a/api/api/exists_source.js +++ /dev/null @@ -1,117 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildExistsSource (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'preference', - 'realtime', - 'refresh', - 'routing', - '_source', - '_source_excludes', - '_source_exclude', - '_source_includes', - '_source_include', - 'version', - 'version_type', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - _sourceExcludes: '_source_excludes', - _sourceExclude: '_source_exclude', - _sourceIncludes: '_source_includes', - _sourceInclude: '_source_include', - versionType: 'version_type', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a exists_source request - * Returns information about whether a document source exists in an index. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html - */ - return function existsSource (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['id'] == null) { - const err = new ConfigurationError('Missing required parameter: id') - return handleError(err, callback) - } - if (params['index'] == null) { - const err = new ConfigurationError('Missing required parameter: index') - return handleError(err, callback) - } - - // check required url components - if (params['id'] != null && (params['type'] == null || params['index'] == null)) { - const err = new ConfigurationError('Missing required parameter of the url: type, index') - return handleError(err, callback) - } else if (params['type'] != null && (params['index'] == null)) { - const err = new ConfigurationError('Missing required parameter of the url: index') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, id, index, type, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((index) != null && (type) != null && (id) != null) { - if (method == null) method = 'HEAD' - path = '/' + encodeURIComponent(index) + '/' + encodeURIComponent(type) + '/' + encodeURIComponent(id) + '/' + '_source' - } else { - if (method == null) method = 'HEAD' - path = '/' + encodeURIComponent(index) + '/' + '_source' + '/' + encodeURIComponent(id) - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildExistsSource diff --git a/api/api/explain.js b/api/api/explain.js deleted file mode 100644 index 108a3fa25..000000000 --- a/api/api/explain.js +++ /dev/null @@ -1,108 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildExplain (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'analyze_wildcard', - 'analyzer', - 'default_operator', - 'df', - 'stored_fields', - 'lenient', - 'preference', - 'q', - 'routing', - '_source', - '_source_excludes', - '_source_exclude', - '_source_includes', - '_source_include', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - analyzeWildcard: 'analyze_wildcard', - defaultOperator: 'default_operator', - storedFields: 'stored_fields', - _sourceExcludes: '_source_excludes', - _sourceExclude: '_source_exclude', - _sourceIncludes: '_source_includes', - _sourceInclude: '_source_include', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a explain request - * Returns information about why a specific matches (or doesn't match) a query. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/search-explain.html - */ - return function explain (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['id'] == null) { - const err = new ConfigurationError('Missing required parameter: id') - return handleError(err, callback) - } - if (params['index'] == null) { - const err = new ConfigurationError('Missing required parameter: index') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, id, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_explain' + '/' + encodeURIComponent(id) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildExplain diff --git a/api/api/field_caps.js b/api/api/field_caps.js deleted file mode 100644 index 94e2f601b..000000000 --- a/api/api/field_caps.js +++ /dev/null @@ -1,91 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildFieldCaps (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'fields', - 'ignore_unavailable', - 'allow_no_indices', - 'expand_wildcards', - 'include_unmapped', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - ignoreUnavailable: 'ignore_unavailable', - allowNoIndices: 'allow_no_indices', - expandWildcards: 'expand_wildcards', - includeUnmapped: 'include_unmapped', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a field_caps request - * Returns the information about the capabilities of fields among multiple indices. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/search-field-caps.html - */ - return function fieldCaps (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((index) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_field_caps' - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_field_caps' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildFieldCaps diff --git a/api/api/get.js b/api/api/get.js deleted file mode 100644 index 58e76212b..000000000 --- a/api/api/get.js +++ /dev/null @@ -1,105 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildGet (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'stored_fields', - 'preference', - 'realtime', - 'refresh', - 'routing', - '_source', - '_source_excludes', - '_source_exclude', - '_source_includes', - '_source_include', - 'version', - 'version_type', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - storedFields: 'stored_fields', - _sourceExcludes: '_source_excludes', - _sourceExclude: '_source_exclude', - _sourceIncludes: '_source_includes', - _sourceInclude: '_source_include', - versionType: 'version_type', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a get request - * Returns a document. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html - */ - return function get (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['id'] == null) { - const err = new ConfigurationError('Missing required parameter: id') - return handleError(err, callback) - } - if (params['index'] == null) { - const err = new ConfigurationError('Missing required parameter: index') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, id, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + encodeURIComponent(index) + '/' + '_doc' + '/' + encodeURIComponent(id) - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildGet diff --git a/api/api/get_script.js b/api/api/get_script.js deleted file mode 100644 index 0531b98a2..000000000 --- a/api/api/get_script.js +++ /dev/null @@ -1,85 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildGetScript (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'master_timeout', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a get_script request - * Returns a script. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html - */ - return function getScript (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['id'] == null) { - const err = new ConfigurationError('Missing required parameter: id') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + '_scripts' + '/' + encodeURIComponent(id) - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildGetScript diff --git a/api/api/get_script_context.js b/api/api/get_script_context.js deleted file mode 100644 index 1edc10047..000000000 --- a/api/api/get_script_context.js +++ /dev/null @@ -1,77 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildGetScriptContext (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a get_script_context request - * Returns all script contexts. - * https://www.elastic.co/guide/en/elasticsearch/painless/master/painless-contexts.html - */ - return function getScriptContext (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + '_script_context' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildGetScriptContext diff --git a/api/api/get_script_languages.js b/api/api/get_script_languages.js deleted file mode 100644 index 38f70452b..000000000 --- a/api/api/get_script_languages.js +++ /dev/null @@ -1,77 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildGetScriptLanguages (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a get_script_languages request - * Returns available script types, languages and contexts - * https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html - */ - return function getScriptLanguages (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + '_script_language' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildGetScriptLanguages diff --git a/api/api/get_source.js b/api/api/get_source.js deleted file mode 100644 index 8712667cb..000000000 --- a/api/api/get_source.js +++ /dev/null @@ -1,103 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildGetSource (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'preference', - 'realtime', - 'refresh', - 'routing', - '_source', - '_source_excludes', - '_source_exclude', - '_source_includes', - '_source_include', - 'version', - 'version_type', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - _sourceExcludes: '_source_excludes', - _sourceExclude: '_source_exclude', - _sourceIncludes: '_source_includes', - _sourceInclude: '_source_include', - versionType: 'version_type', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a get_source request - * Returns the source of a document. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html - */ - return function getSource (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['id'] == null) { - const err = new ConfigurationError('Missing required parameter: id') - return handleError(err, callback) - } - if (params['index'] == null) { - const err = new ConfigurationError('Missing required parameter: index') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, id, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + encodeURIComponent(index) + '/' + '_source' + '/' + encodeURIComponent(id) - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildGetSource diff --git a/api/api/graph.explore.js b/api/api/graph.explore.js deleted file mode 100644 index 31a1ed011..000000000 --- a/api/api/graph.explore.js +++ /dev/null @@ -1,79 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildGraphExplore (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'routing', - 'timeout' - ] - - const snakeCase = { - - } - - /** - * Perform a graph.explore request - * Explore extracted and summarized information about the documents and terms in an index. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/graph-explore-api.html - */ - return function graphExplore (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['index'] == null) { - const err = new ConfigurationError('Missing required parameter: index') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_graph' + '/' + 'explore' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildGraphExplore diff --git a/api/api/ilm.delete_lifecycle.js b/api/api/ilm.delete_lifecycle.js deleted file mode 100644 index 798cf7871..000000000 --- a/api/api/ilm.delete_lifecycle.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIlmDeleteLifecycle (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a ilm.delete_lifecycle request - * Deletes the specified lifecycle policy definition. A currently used policy cannot be deleted. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-delete-lifecycle.html - */ - return function ilmDeleteLifecycle (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['policy'] == null) { - const err = new ConfigurationError('Missing required parameter: policy') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, policy, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'DELETE' - path = '/' + '_ilm' + '/' + 'policy' + '/' + encodeURIComponent(policy) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIlmDeleteLifecycle diff --git a/api/api/ilm.explain_lifecycle.js b/api/api/ilm.explain_lifecycle.js deleted file mode 100644 index d2e85be72..000000000 --- a/api/api/ilm.explain_lifecycle.js +++ /dev/null @@ -1,80 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIlmExplainLifecycle (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'only_managed', - 'only_errors' - ] - - const snakeCase = { - onlyManaged: 'only_managed', - onlyErrors: 'only_errors' - } - - /** - * Perform a ilm.explain_lifecycle request - * Retrieves information about the index's current lifecycle state, such as the currently executing phase, action, and step. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-explain-lifecycle.html - */ - return function ilmExplainLifecycle (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['index'] == null) { - const err = new ConfigurationError('Missing required parameter: index') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + encodeURIComponent(index) + '/' + '_ilm' + '/' + 'explain' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIlmExplainLifecycle diff --git a/api/api/ilm.get_lifecycle.js b/api/api/ilm.get_lifecycle.js deleted file mode 100644 index bd82e7423..000000000 --- a/api/api/ilm.get_lifecycle.js +++ /dev/null @@ -1,77 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIlmGetLifecycle (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a ilm.get_lifecycle request - * Returns the specified policy definition. Includes the policy version and last modified date. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-get-lifecycle.html - */ - return function ilmGetLifecycle (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, policy, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((policy) != null) { - if (method == null) method = 'GET' - path = '/' + '_ilm' + '/' + 'policy' + '/' + encodeURIComponent(policy) - } else { - if (method == null) method = 'GET' - path = '/' + '_ilm' + '/' + 'policy' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIlmGetLifecycle diff --git a/api/api/ilm.get_status.js b/api/api/ilm.get_status.js deleted file mode 100644 index 13a26bfd8..000000000 --- a/api/api/ilm.get_status.js +++ /dev/null @@ -1,72 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIlmGetStatus (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a ilm.get_status request - * Retrieves the current index lifecycle management (ILM) status. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-get-status.html - */ - return function ilmGetStatus (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + '_ilm' + '/' + 'status' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIlmGetStatus diff --git a/api/api/ilm.move_to_step.js b/api/api/ilm.move_to_step.js deleted file mode 100644 index 5ce32fcd9..000000000 --- a/api/api/ilm.move_to_step.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIlmMoveToStep (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a ilm.move_to_step request - * Manually moves an index into the specified step and executes that step. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-move-to-step.html - */ - return function ilmMoveToStep (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['index'] == null) { - const err = new ConfigurationError('Missing required parameter: index') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_ilm' + '/' + 'move' + '/' + encodeURIComponent(index) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIlmMoveToStep diff --git a/api/api/ilm.put_lifecycle.js b/api/api/ilm.put_lifecycle.js deleted file mode 100644 index 301484462..000000000 --- a/api/api/ilm.put_lifecycle.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIlmPutLifecycle (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a ilm.put_lifecycle request - * Creates a lifecycle policy - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-put-lifecycle.html - */ - return function ilmPutLifecycle (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['policy'] == null) { - const err = new ConfigurationError('Missing required parameter: policy') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, policy, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'PUT' - path = '/' + '_ilm' + '/' + 'policy' + '/' + encodeURIComponent(policy) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIlmPutLifecycle diff --git a/api/api/ilm.remove_policy.js b/api/api/ilm.remove_policy.js deleted file mode 100644 index cf3d650af..000000000 --- a/api/api/ilm.remove_policy.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIlmRemovePolicy (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a ilm.remove_policy request - * Removes the assigned lifecycle policy and stops managing the specified index - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-remove-policy.html - */ - return function ilmRemovePolicy (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['index'] == null) { - const err = new ConfigurationError('Missing required parameter: index') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_ilm' + '/' + 'remove' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIlmRemovePolicy diff --git a/api/api/ilm.retry.js b/api/api/ilm.retry.js deleted file mode 100644 index 92c2441ff..000000000 --- a/api/api/ilm.retry.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIlmRetry (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a ilm.retry request - * Retries executing the policy for an index that is in the ERROR step. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-retry-policy.html - */ - return function ilmRetry (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['index'] == null) { - const err = new ConfigurationError('Missing required parameter: index') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_ilm' + '/' + 'retry' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIlmRetry diff --git a/api/api/ilm.start.js b/api/api/ilm.start.js deleted file mode 100644 index 0cfa45824..000000000 --- a/api/api/ilm.start.js +++ /dev/null @@ -1,72 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIlmStart (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a ilm.start request - * Start the index lifecycle management (ILM) plugin. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-start.html - */ - return function ilmStart (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_ilm' + '/' + 'start' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIlmStart diff --git a/api/api/ilm.stop.js b/api/api/ilm.stop.js deleted file mode 100644 index 1d28947e7..000000000 --- a/api/api/ilm.stop.js +++ /dev/null @@ -1,72 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIlmStop (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a ilm.stop request - * Halts all lifecycle management operations and stops the index lifecycle management (ILM) plugin - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-stop.html - */ - return function ilmStop (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_ilm' + '/' + 'stop' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIlmStop diff --git a/api/api/index.js b/api/api/index.js deleted file mode 100644 index 334e60b1c..000000000 --- a/api/api/index.js +++ /dev/null @@ -1,107 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndex (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'wait_for_active_shards', - 'op_type', - 'refresh', - 'routing', - 'timeout', - 'version', - 'version_type', - 'if_seq_no', - 'if_primary_term', - 'pipeline', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - waitForActiveShards: 'wait_for_active_shards', - opType: 'op_type', - versionType: 'version_type', - ifSeqNo: 'if_seq_no', - ifPrimaryTerm: 'if_primary_term', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a index request - * Creates or updates a document in an index. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-index_.html - */ - return function _index (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['index'] == null) { - const err = new ConfigurationError('Missing required parameter: index') - return handleError(err, callback) - } - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, id, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((index) != null && (id) != null) { - if (method == null) method = 'PUT' - path = '/' + encodeURIComponent(index) + '/' + '_doc' + '/' + encodeURIComponent(id) - } else { - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_doc' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndex diff --git a/api/api/indices.analyze.js b/api/api/indices.analyze.js deleted file mode 100644 index 609c29b4c..000000000 --- a/api/api/indices.analyze.js +++ /dev/null @@ -1,83 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesAnalyze (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'index', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a indices.analyze request - * Performs the analysis process on a text and return the tokens breakdown of the text. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-analyze.html - */ - return function indicesAnalyze (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((index) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_analyze' - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_analyze' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesAnalyze diff --git a/api/api/indices.clear_cache.js b/api/api/indices.clear_cache.js deleted file mode 100644 index 634b51cd9..000000000 --- a/api/api/indices.clear_cache.js +++ /dev/null @@ -1,93 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesClearCache (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'fielddata', - 'fields', - 'query', - 'ignore_unavailable', - 'allow_no_indices', - 'expand_wildcards', - 'index', - 'request', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - ignoreUnavailable: 'ignore_unavailable', - allowNoIndices: 'allow_no_indices', - expandWildcards: 'expand_wildcards', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a indices.clear_cache request - * Clears all or specific caches for one or more indices. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-clearcache.html - */ - return function indicesClearCache (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((index) != null) { - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_cache' + '/' + 'clear' - } else { - if (method == null) method = 'POST' - path = '/' + '_cache' + '/' + 'clear' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesClearCache diff --git a/api/api/indices.clone.js b/api/api/indices.clone.js deleted file mode 100644 index e9539bc9d..000000000 --- a/api/api/indices.clone.js +++ /dev/null @@ -1,98 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesClone (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'timeout', - 'master_timeout', - 'wait_for_active_shards', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - waitForActiveShards: 'wait_for_active_shards', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a indices.clone request - * Clones an index - * https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-clone-index.html - */ - return function indicesClone (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['index'] == null) { - const err = new ConfigurationError('Missing required parameter: index') - return handleError(err, callback) - } - if (params['target'] == null) { - const err = new ConfigurationError('Missing required parameter: target') - return handleError(err, callback) - } - - // check required url components - if (params['target'] != null && (params['index'] == null)) { - const err = new ConfigurationError('Missing required parameter of the url: index') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, target, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'PUT' - path = '/' + encodeURIComponent(index) + '/' + '_clone' + '/' + encodeURIComponent(target) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesClone diff --git a/api/api/indices.close.js b/api/api/indices.close.js deleted file mode 100644 index d44ed86b5..000000000 --- a/api/api/indices.close.js +++ /dev/null @@ -1,94 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesClose (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'timeout', - 'master_timeout', - 'ignore_unavailable', - 'allow_no_indices', - 'expand_wildcards', - 'wait_for_active_shards', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - ignoreUnavailable: 'ignore_unavailable', - allowNoIndices: 'allow_no_indices', - expandWildcards: 'expand_wildcards', - waitForActiveShards: 'wait_for_active_shards', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a indices.close request - * Closes an index. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-open-close.html - */ - return function indicesClose (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['index'] == null) { - const err = new ConfigurationError('Missing required parameter: index') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_close' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesClose diff --git a/api/api/indices.create.js b/api/api/indices.create.js deleted file mode 100644 index e720bd152..000000000 --- a/api/api/indices.create.js +++ /dev/null @@ -1,88 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesCreate (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'wait_for_active_shards', - 'timeout', - 'master_timeout', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - waitForActiveShards: 'wait_for_active_shards', - masterTimeout: 'master_timeout', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a indices.create request - * Creates an index with optional settings and mappings. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-create-index.html - */ - return function indicesCreate (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['index'] == null) { - const err = new ConfigurationError('Missing required parameter: index') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'PUT' - path = '/' + encodeURIComponent(index) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesCreate diff --git a/api/api/indices.create_data_stream.js b/api/api/indices.create_data_stream.js deleted file mode 100644 index 017720d9f..000000000 --- a/api/api/indices.create_data_stream.js +++ /dev/null @@ -1,83 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesCreateDataStream (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a indices.create_data_stream request - * Creates or updates a data stream - * https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html - */ - return function indicesCreateDataStream (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['name'] == null) { - const err = new ConfigurationError('Missing required parameter: name') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'PUT' - path = '/' + '_data_stream' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesCreateDataStream diff --git a/api/api/indices.delete.js b/api/api/indices.delete.js deleted file mode 100644 index e10b4b41e..000000000 --- a/api/api/indices.delete.js +++ /dev/null @@ -1,92 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesDelete (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'timeout', - 'master_timeout', - 'ignore_unavailable', - 'allow_no_indices', - 'expand_wildcards', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - ignoreUnavailable: 'ignore_unavailable', - allowNoIndices: 'allow_no_indices', - expandWildcards: 'expand_wildcards', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a indices.delete request - * Deletes an index. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-delete-index.html - */ - return function indicesDelete (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['index'] == null) { - const err = new ConfigurationError('Missing required parameter: index') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'DELETE' - path = '/' + encodeURIComponent(index) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesDelete diff --git a/api/api/indices.delete_alias.js b/api/api/indices.delete_alias.js deleted file mode 100644 index 7f5da226b..000000000 --- a/api/api/indices.delete_alias.js +++ /dev/null @@ -1,101 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesDeleteAlias (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'timeout', - 'master_timeout', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a indices.delete_alias request - * Deletes an alias. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html - */ - return function indicesDeleteAlias (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['index'] == null) { - const err = new ConfigurationError('Missing required parameter: index') - return handleError(err, callback) - } - if (params['name'] == null) { - const err = new ConfigurationError('Missing required parameter: name') - return handleError(err, callback) - } - - // check required url components - if (params['name'] != null && (params['index'] == null)) { - const err = new ConfigurationError('Missing required parameter of the url: index') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((index) != null && (name) != null) { - if (method == null) method = 'DELETE' - path = '/' + encodeURIComponent(index) + '/' + '_alias' + '/' + encodeURIComponent(name) - } else { - if (method == null) method = 'DELETE' - path = '/' + encodeURIComponent(index) + '/' + '_aliases' + '/' + encodeURIComponent(name) - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesDeleteAlias diff --git a/api/api/indices.delete_data_stream.js b/api/api/indices.delete_data_stream.js deleted file mode 100644 index 10e5d6fe1..000000000 --- a/api/api/indices.delete_data_stream.js +++ /dev/null @@ -1,83 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesDeleteDataStream (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a indices.delete_data_stream request - * Deletes a data stream. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html - */ - return function indicesDeleteDataStream (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['name'] == null) { - const err = new ConfigurationError('Missing required parameter: name') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'DELETE' - path = '/' + '_data_stream' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesDeleteDataStream diff --git a/api/api/indices.delete_index_template.js b/api/api/indices.delete_index_template.js deleted file mode 100644 index 94ca1beb0..000000000 --- a/api/api/indices.delete_index_template.js +++ /dev/null @@ -1,86 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesDeleteIndexTemplate (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'timeout', - 'master_timeout', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a indices.delete_index_template request - * Deletes an index template. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html - */ - return function indicesDeleteIndexTemplate (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['name'] == null) { - const err = new ConfigurationError('Missing required parameter: name') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'DELETE' - path = '/' + '_index_template' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesDeleteIndexTemplate diff --git a/api/api/indices.delete_template.js b/api/api/indices.delete_template.js deleted file mode 100644 index d4900659a..000000000 --- a/api/api/indices.delete_template.js +++ /dev/null @@ -1,86 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesDeleteTemplate (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'timeout', - 'master_timeout', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a indices.delete_template request - * Deletes an index template. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html - */ - return function indicesDeleteTemplate (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['name'] == null) { - const err = new ConfigurationError('Missing required parameter: name') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'DELETE' - path = '/' + '_template' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesDeleteTemplate diff --git a/api/api/indices.exists.js b/api/api/indices.exists.js deleted file mode 100644 index aadaca04c..000000000 --- a/api/api/indices.exists.js +++ /dev/null @@ -1,94 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesExists (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'local', - 'ignore_unavailable', - 'allow_no_indices', - 'expand_wildcards', - 'flat_settings', - 'include_defaults', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - ignoreUnavailable: 'ignore_unavailable', - allowNoIndices: 'allow_no_indices', - expandWildcards: 'expand_wildcards', - flatSettings: 'flat_settings', - includeDefaults: 'include_defaults', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a indices.exists request - * Returns information about whether a particular index exists. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-exists.html - */ - return function indicesExists (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['index'] == null) { - const err = new ConfigurationError('Missing required parameter: index') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'HEAD' - path = '/' + encodeURIComponent(index) - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesExists diff --git a/api/api/indices.exists_alias.js b/api/api/indices.exists_alias.js deleted file mode 100644 index 06ad61598..000000000 --- a/api/api/indices.exists_alias.js +++ /dev/null @@ -1,95 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesExistsAlias (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'ignore_unavailable', - 'allow_no_indices', - 'expand_wildcards', - 'local', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - ignoreUnavailable: 'ignore_unavailable', - allowNoIndices: 'allow_no_indices', - expandWildcards: 'expand_wildcards', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a indices.exists_alias request - * Returns information about whether a particular alias exists. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html - */ - return function indicesExistsAlias (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['name'] == null) { - const err = new ConfigurationError('Missing required parameter: name') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, name, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((index) != null && (name) != null) { - if (method == null) method = 'HEAD' - path = '/' + encodeURIComponent(index) + '/' + '_alias' + '/' + encodeURIComponent(name) - } else { - if (method == null) method = 'HEAD' - path = '/' + '_alias' + '/' + encodeURIComponent(name) - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesExistsAlias diff --git a/api/api/indices.exists_index_template.js b/api/api/indices.exists_index_template.js deleted file mode 100644 index 5bb126810..000000000 --- a/api/api/indices.exists_index_template.js +++ /dev/null @@ -1,88 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesExistsIndexTemplate (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'flat_settings', - 'master_timeout', - 'local', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - flatSettings: 'flat_settings', - masterTimeout: 'master_timeout', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a indices.exists_index_template request - * Returns information about whether a particular index template exists. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html - */ - return function indicesExistsIndexTemplate (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['name'] == null) { - const err = new ConfigurationError('Missing required parameter: name') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'HEAD' - path = '/' + '_index_template' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesExistsIndexTemplate diff --git a/api/api/indices.exists_template.js b/api/api/indices.exists_template.js deleted file mode 100644 index b1baa6ad1..000000000 --- a/api/api/indices.exists_template.js +++ /dev/null @@ -1,88 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesExistsTemplate (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'flat_settings', - 'master_timeout', - 'local', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - flatSettings: 'flat_settings', - masterTimeout: 'master_timeout', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a indices.exists_template request - * Returns information about whether a particular index template exists. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html - */ - return function indicesExistsTemplate (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['name'] == null) { - const err = new ConfigurationError('Missing required parameter: name') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'HEAD' - path = '/' + '_template' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesExistsTemplate diff --git a/api/api/indices.exists_type.js b/api/api/indices.exists_type.js deleted file mode 100644 index d19c3d688..000000000 --- a/api/api/indices.exists_type.js +++ /dev/null @@ -1,100 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesExistsType (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'ignore_unavailable', - 'allow_no_indices', - 'expand_wildcards', - 'local', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - ignoreUnavailable: 'ignore_unavailable', - allowNoIndices: 'allow_no_indices', - expandWildcards: 'expand_wildcards', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a indices.exists_type request - * Returns information about whether a particular document type exists. (DEPRECATED) - * https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-types-exists.html - */ - return function indicesExistsType (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['index'] == null) { - const err = new ConfigurationError('Missing required parameter: index') - return handleError(err, callback) - } - if (params['type'] == null) { - const err = new ConfigurationError('Missing required parameter: type') - return handleError(err, callback) - } - - // check required url components - if (params['type'] != null && (params['index'] == null)) { - const err = new ConfigurationError('Missing required parameter of the url: index') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, type, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'HEAD' - path = '/' + encodeURIComponent(index) + '/' + '_mapping' + '/' + encodeURIComponent(type) - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesExistsType diff --git a/api/api/indices.flush.js b/api/api/indices.flush.js deleted file mode 100644 index 4349c8e65..000000000 --- a/api/api/indices.flush.js +++ /dev/null @@ -1,91 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesFlush (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'force', - 'wait_if_ongoing', - 'ignore_unavailable', - 'allow_no_indices', - 'expand_wildcards', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - waitIfOngoing: 'wait_if_ongoing', - ignoreUnavailable: 'ignore_unavailable', - allowNoIndices: 'allow_no_indices', - expandWildcards: 'expand_wildcards', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a indices.flush request - * Performs the flush operation on one or more indices. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-flush.html - */ - return function indicesFlush (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((index) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_flush' - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_flush' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesFlush diff --git a/api/api/indices.forcemerge.js b/api/api/indices.forcemerge.js deleted file mode 100644 index b3e2c3d1d..000000000 --- a/api/api/indices.forcemerge.js +++ /dev/null @@ -1,93 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesForcemerge (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'flush', - 'ignore_unavailable', - 'allow_no_indices', - 'expand_wildcards', - 'max_num_segments', - 'only_expunge_deletes', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - ignoreUnavailable: 'ignore_unavailable', - allowNoIndices: 'allow_no_indices', - expandWildcards: 'expand_wildcards', - maxNumSegments: 'max_num_segments', - onlyExpungeDeletes: 'only_expunge_deletes', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a indices.forcemerge request - * Performs the force merge operation on one or more indices. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-forcemerge.html - */ - return function indicesForcemerge (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((index) != null) { - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_forcemerge' - } else { - if (method == null) method = 'POST' - path = '/' + '_forcemerge' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesForcemerge diff --git a/api/api/indices.freeze.js b/api/api/indices.freeze.js deleted file mode 100644 index 033335bc3..000000000 --- a/api/api/indices.freeze.js +++ /dev/null @@ -1,87 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesFreeze (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'timeout', - 'master_timeout', - 'ignore_unavailable', - 'allow_no_indices', - 'expand_wildcards', - 'wait_for_active_shards' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - ignoreUnavailable: 'ignore_unavailable', - allowNoIndices: 'allow_no_indices', - expandWildcards: 'expand_wildcards', - waitForActiveShards: 'wait_for_active_shards' - } - - /** - * Perform a indices.freeze request - * Freezes an index. A frozen index has almost no overhead on the cluster (except for maintaining its metadata in memory) and is read-only. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/freeze-index-api.html - */ - return function indicesFreeze (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['index'] == null) { - const err = new ConfigurationError('Missing required parameter: index') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_freeze' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesFreeze diff --git a/api/api/indices.get.js b/api/api/indices.get.js deleted file mode 100644 index b34ff7717..000000000 --- a/api/api/indices.get.js +++ /dev/null @@ -1,96 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesGet (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'local', - 'ignore_unavailable', - 'allow_no_indices', - 'expand_wildcards', - 'flat_settings', - 'include_defaults', - 'master_timeout', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - ignoreUnavailable: 'ignore_unavailable', - allowNoIndices: 'allow_no_indices', - expandWildcards: 'expand_wildcards', - flatSettings: 'flat_settings', - includeDefaults: 'include_defaults', - masterTimeout: 'master_timeout', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a indices.get request - * Returns information about one or more indices. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-index.html - */ - return function indicesGet (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['index'] == null) { - const err = new ConfigurationError('Missing required parameter: index') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + encodeURIComponent(index) - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesGet diff --git a/api/api/indices.get_alias.js b/api/api/indices.get_alias.js deleted file mode 100644 index 4cbdb2fff..000000000 --- a/api/api/indices.get_alias.js +++ /dev/null @@ -1,95 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesGetAlias (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'ignore_unavailable', - 'allow_no_indices', - 'expand_wildcards', - 'local', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - ignoreUnavailable: 'ignore_unavailable', - allowNoIndices: 'allow_no_indices', - expandWildcards: 'expand_wildcards', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a indices.get_alias request - * Returns an alias. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html - */ - return function indicesGetAlias (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, name, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((index) != null && (name) != null) { - if (method == null) method = 'GET' - path = '/' + encodeURIComponent(index) + '/' + '_alias' + '/' + encodeURIComponent(name) - } else if ((name) != null) { - if (method == null) method = 'GET' - path = '/' + '_alias' + '/' + encodeURIComponent(name) - } else if ((index) != null) { - if (method == null) method = 'GET' - path = '/' + encodeURIComponent(index) + '/' + '_alias' - } else { - if (method == null) method = 'GET' - path = '/' + '_alias' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesGetAlias diff --git a/api/api/indices.get_data_stream.js b/api/api/indices.get_data_stream.js deleted file mode 100644 index 002024fc9..000000000 --- a/api/api/indices.get_data_stream.js +++ /dev/null @@ -1,82 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesGetDataStream (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a indices.get_data_stream request - * Returns data streams. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html - */ - return function indicesGetDataStream (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((name) != null) { - if (method == null) method = 'GET' - path = '/' + '_data_stream' + '/' + encodeURIComponent(name) - } else { - if (method == null) method = 'GET' - path = '/' + '_data_stream' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesGetDataStream diff --git a/api/api/indices.get_field_mapping.js b/api/api/indices.get_field_mapping.js deleted file mode 100644 index 14ef3c26b..000000000 --- a/api/api/indices.get_field_mapping.js +++ /dev/null @@ -1,97 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesGetFieldMapping (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'include_defaults', - 'ignore_unavailable', - 'allow_no_indices', - 'expand_wildcards', - 'local', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - includeDefaults: 'include_defaults', - ignoreUnavailable: 'ignore_unavailable', - allowNoIndices: 'allow_no_indices', - expandWildcards: 'expand_wildcards', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a indices.get_field_mapping request - * Returns mapping for one or more fields. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-field-mapping.html - */ - return function indicesGetFieldMapping (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['fields'] == null) { - const err = new ConfigurationError('Missing required parameter: fields') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, fields, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((index) != null && (fields) != null) { - if (method == null) method = 'GET' - path = '/' + encodeURIComponent(index) + '/' + '_mapping' + '/' + 'field' + '/' + encodeURIComponent(fields) - } else { - if (method == null) method = 'GET' - path = '/' + '_mapping' + '/' + 'field' + '/' + encodeURIComponent(fields) - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesGetFieldMapping diff --git a/api/api/indices.get_index_template.js b/api/api/indices.get_index_template.js deleted file mode 100644 index 5464faef2..000000000 --- a/api/api/indices.get_index_template.js +++ /dev/null @@ -1,87 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesGetIndexTemplate (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'flat_settings', - 'master_timeout', - 'local', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - flatSettings: 'flat_settings', - masterTimeout: 'master_timeout', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a indices.get_index_template request - * Returns an index template. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html - */ - return function indicesGetIndexTemplate (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((name) != null) { - if (method == null) method = 'GET' - path = '/' + '_index_template' + '/' + encodeURIComponent(name) - } else { - if (method == null) method = 'GET' - path = '/' + '_index_template' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesGetIndexTemplate diff --git a/api/api/indices.get_mapping.js b/api/api/indices.get_mapping.js deleted file mode 100644 index 8c03a7485..000000000 --- a/api/api/indices.get_mapping.js +++ /dev/null @@ -1,91 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesGetMapping (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'ignore_unavailable', - 'allow_no_indices', - 'expand_wildcards', - 'master_timeout', - 'local', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - ignoreUnavailable: 'ignore_unavailable', - allowNoIndices: 'allow_no_indices', - expandWildcards: 'expand_wildcards', - masterTimeout: 'master_timeout', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a indices.get_mapping request - * Returns mappings for one or more indices. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-mapping.html - */ - return function indicesGetMapping (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((index) != null) { - if (method == null) method = 'GET' - path = '/' + encodeURIComponent(index) + '/' + '_mapping' - } else { - if (method == null) method = 'GET' - path = '/' + '_mapping' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesGetMapping diff --git a/api/api/indices.get_settings.js b/api/api/indices.get_settings.js deleted file mode 100644 index c4e60fef4..000000000 --- a/api/api/indices.get_settings.js +++ /dev/null @@ -1,101 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesGetSettings (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'master_timeout', - 'ignore_unavailable', - 'allow_no_indices', - 'expand_wildcards', - 'flat_settings', - 'local', - 'include_defaults', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - ignoreUnavailable: 'ignore_unavailable', - allowNoIndices: 'allow_no_indices', - expandWildcards: 'expand_wildcards', - flatSettings: 'flat_settings', - includeDefaults: 'include_defaults', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a indices.get_settings request - * Returns settings for one or more indices. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-settings.html - */ - return function indicesGetSettings (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((index) != null && (name) != null) { - if (method == null) method = 'GET' - path = '/' + encodeURIComponent(index) + '/' + '_settings' + '/' + encodeURIComponent(name) - } else if ((index) != null) { - if (method == null) method = 'GET' - path = '/' + encodeURIComponent(index) + '/' + '_settings' - } else if ((name) != null) { - if (method == null) method = 'GET' - path = '/' + '_settings' + '/' + encodeURIComponent(name) - } else { - if (method == null) method = 'GET' - path = '/' + '_settings' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesGetSettings diff --git a/api/api/indices.get_template.js b/api/api/indices.get_template.js deleted file mode 100644 index ffdc93363..000000000 --- a/api/api/indices.get_template.js +++ /dev/null @@ -1,87 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesGetTemplate (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'flat_settings', - 'master_timeout', - 'local', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - flatSettings: 'flat_settings', - masterTimeout: 'master_timeout', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a indices.get_template request - * Returns an index template. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html - */ - return function indicesGetTemplate (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((name) != null) { - if (method == null) method = 'GET' - path = '/' + '_template' + '/' + encodeURIComponent(name) - } else { - if (method == null) method = 'GET' - path = '/' + '_template' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesGetTemplate diff --git a/api/api/indices.get_upgrade.js b/api/api/indices.get_upgrade.js deleted file mode 100644 index 13598cfe1..000000000 --- a/api/api/indices.get_upgrade.js +++ /dev/null @@ -1,83 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesGetUpgrade (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'ignore_unavailable', - 'allow_no_indices', - 'expand_wildcards', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - ignoreUnavailable: 'ignore_unavailable', - allowNoIndices: 'allow_no_indices', - expandWildcards: 'expand_wildcards', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a indices.get_upgrade request - * DEPRECATED Returns a progress status of current upgrade. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-upgrade.html - */ - return function indicesGetUpgrade (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + encodeURIComponent(index) + '/' + '_upgrade' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesGetUpgrade diff --git a/api/api/indices.open.js b/api/api/indices.open.js deleted file mode 100644 index 1fa3455eb..000000000 --- a/api/api/indices.open.js +++ /dev/null @@ -1,94 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesOpen (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'timeout', - 'master_timeout', - 'ignore_unavailable', - 'allow_no_indices', - 'expand_wildcards', - 'wait_for_active_shards', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - ignoreUnavailable: 'ignore_unavailable', - allowNoIndices: 'allow_no_indices', - expandWildcards: 'expand_wildcards', - waitForActiveShards: 'wait_for_active_shards', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a indices.open request - * Opens an index. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-open-close.html - */ - return function indicesOpen (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['index'] == null) { - const err = new ConfigurationError('Missing required parameter: index') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_open' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesOpen diff --git a/api/api/indices.put_alias.js b/api/api/indices.put_alias.js deleted file mode 100644 index 4e72de0d6..000000000 --- a/api/api/indices.put_alias.js +++ /dev/null @@ -1,101 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesPutAlias (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'timeout', - 'master_timeout', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a indices.put_alias request - * Creates or updates an alias. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html - */ - return function indicesPutAlias (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['index'] == null) { - const err = new ConfigurationError('Missing required parameter: index') - return handleError(err, callback) - } - if (params['name'] == null) { - const err = new ConfigurationError('Missing required parameter: name') - return handleError(err, callback) - } - - // check required url components - if (params['name'] != null && (params['index'] == null)) { - const err = new ConfigurationError('Missing required parameter of the url: index') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((index) != null && (name) != null) { - if (method == null) method = 'PUT' - path = '/' + encodeURIComponent(index) + '/' + '_alias' + '/' + encodeURIComponent(name) - } else { - if (method == null) method = 'PUT' - path = '/' + encodeURIComponent(index) + '/' + '_aliases' + '/' + encodeURIComponent(name) - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesPutAlias diff --git a/api/api/indices.put_index_template.js b/api/api/indices.put_index_template.js deleted file mode 100644 index 3d088a83e..000000000 --- a/api/api/indices.put_index_template.js +++ /dev/null @@ -1,91 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesPutIndexTemplate (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'create', - 'cause', - 'master_timeout', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a indices.put_index_template request - * Creates or updates an index template. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html - */ - return function indicesPutIndexTemplate (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['name'] == null) { - const err = new ConfigurationError('Missing required parameter: name') - return handleError(err, callback) - } - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'PUT' - path = '/' + '_index_template' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesPutIndexTemplate diff --git a/api/api/indices.put_mapping.js b/api/api/indices.put_mapping.js deleted file mode 100644 index 5f162e5df..000000000 --- a/api/api/indices.put_mapping.js +++ /dev/null @@ -1,96 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesPutMapping (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'timeout', - 'master_timeout', - 'ignore_unavailable', - 'allow_no_indices', - 'expand_wildcards', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - ignoreUnavailable: 'ignore_unavailable', - allowNoIndices: 'allow_no_indices', - expandWildcards: 'expand_wildcards', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a indices.put_mapping request - * Updates the index mappings. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-put-mapping.html - */ - return function indicesPutMapping (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['index'] == null) { - const err = new ConfigurationError('Missing required parameter: index') - return handleError(err, callback) - } - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'PUT' - path = '/' + encodeURIComponent(index) + '/' + '_mapping' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesPutMapping diff --git a/api/api/indices.put_settings.js b/api/api/indices.put_settings.js deleted file mode 100644 index f2f67e2a4..000000000 --- a/api/api/indices.put_settings.js +++ /dev/null @@ -1,101 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesPutSettings (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'master_timeout', - 'timeout', - 'preserve_existing', - 'ignore_unavailable', - 'allow_no_indices', - 'expand_wildcards', - 'flat_settings', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - preserveExisting: 'preserve_existing', - ignoreUnavailable: 'ignore_unavailable', - allowNoIndices: 'allow_no_indices', - expandWildcards: 'expand_wildcards', - flatSettings: 'flat_settings', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a indices.put_settings request - * Updates the index settings. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-update-settings.html - */ - return function indicesPutSettings (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((index) != null) { - if (method == null) method = 'PUT' - path = '/' + encodeURIComponent(index) + '/' + '_settings' - } else { - if (method == null) method = 'PUT' - path = '/' + '_settings' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesPutSettings diff --git a/api/api/indices.put_template.js b/api/api/indices.put_template.js deleted file mode 100644 index 9574a20f2..000000000 --- a/api/api/indices.put_template.js +++ /dev/null @@ -1,91 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesPutTemplate (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'order', - 'create', - 'master_timeout', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a indices.put_template request - * Creates or updates an index template. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html - */ - return function indicesPutTemplate (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['name'] == null) { - const err = new ConfigurationError('Missing required parameter: name') - return handleError(err, callback) - } - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'PUT' - path = '/' + '_template' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesPutTemplate diff --git a/api/api/indices.recovery.js b/api/api/indices.recovery.js deleted file mode 100644 index e29dc818d..000000000 --- a/api/api/indices.recovery.js +++ /dev/null @@ -1,85 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesRecovery (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'detailed', - 'active_only', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - activeOnly: 'active_only', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a indices.recovery request - * Returns information about ongoing index shard recoveries. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-recovery.html - */ - return function indicesRecovery (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((index) != null) { - if (method == null) method = 'GET' - path = '/' + encodeURIComponent(index) + '/' + '_recovery' - } else { - if (method == null) method = 'GET' - path = '/' + '_recovery' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesRecovery diff --git a/api/api/indices.refresh.js b/api/api/indices.refresh.js deleted file mode 100644 index 40a92c754..000000000 --- a/api/api/indices.refresh.js +++ /dev/null @@ -1,88 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesRefresh (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'ignore_unavailable', - 'allow_no_indices', - 'expand_wildcards', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - ignoreUnavailable: 'ignore_unavailable', - allowNoIndices: 'allow_no_indices', - expandWildcards: 'expand_wildcards', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a indices.refresh request - * Performs the refresh operation in one or more indices. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-refresh.html - */ - return function indicesRefresh (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((index) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_refresh' - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_refresh' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesRefresh diff --git a/api/api/indices.reload_search_analyzers.js b/api/api/indices.reload_search_analyzers.js deleted file mode 100644 index f0284d279..000000000 --- a/api/api/indices.reload_search_analyzers.js +++ /dev/null @@ -1,82 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesReloadSearchAnalyzers (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'ignore_unavailable', - 'allow_no_indices', - 'expand_wildcards' - ] - - const snakeCase = { - ignoreUnavailable: 'ignore_unavailable', - allowNoIndices: 'allow_no_indices', - expandWildcards: 'expand_wildcards' - } - - /** - * Perform a indices.reload_search_analyzers request - * Reloads an index's search analyzers and their resources. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-reload-analyzers.html - */ - return function indicesReloadSearchAnalyzers (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['index'] == null) { - const err = new ConfigurationError('Missing required parameter: index') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_reload_search_analyzers' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesReloadSearchAnalyzers diff --git a/api/api/indices.rollover.js b/api/api/indices.rollover.js deleted file mode 100644 index 5f4d0d281..000000000 --- a/api/api/indices.rollover.js +++ /dev/null @@ -1,102 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesRollover (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'timeout', - 'dry_run', - 'master_timeout', - 'wait_for_active_shards', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - dryRun: 'dry_run', - masterTimeout: 'master_timeout', - waitForActiveShards: 'wait_for_active_shards', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a indices.rollover request - * Updates an alias to point to a new index when the existing index -is considered to be too large or too old. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-rollover-index.html - */ - return function indicesRollover (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['alias'] == null) { - const err = new ConfigurationError('Missing required parameter: alias') - return handleError(err, callback) - } - - // check required url components - if ((params['new_index'] != null || params['newIndex'] != null) && (params['alias'] == null)) { - const err = new ConfigurationError('Missing required parameter of the url: alias') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, alias, newIndex, new_index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((alias) != null && (new_index || newIndex) != null) { - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(alias) + '/' + '_rollover' + '/' + encodeURIComponent(new_index || newIndex) - } else { - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(alias) + '/' + '_rollover' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesRollover diff --git a/api/api/indices.segments.js b/api/api/indices.segments.js deleted file mode 100644 index d02c4b593..000000000 --- a/api/api/indices.segments.js +++ /dev/null @@ -1,89 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesSegments (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'ignore_unavailable', - 'allow_no_indices', - 'expand_wildcards', - 'verbose', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - ignoreUnavailable: 'ignore_unavailable', - allowNoIndices: 'allow_no_indices', - expandWildcards: 'expand_wildcards', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a indices.segments request - * Provides low-level information about segments in a Lucene index. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-segments.html - */ - return function indicesSegments (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((index) != null) { - if (method == null) method = 'GET' - path = '/' + encodeURIComponent(index) + '/' + '_segments' - } else { - if (method == null) method = 'GET' - path = '/' + '_segments' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesSegments diff --git a/api/api/indices.shard_stores.js b/api/api/indices.shard_stores.js deleted file mode 100644 index 62e4fc64b..000000000 --- a/api/api/indices.shard_stores.js +++ /dev/null @@ -1,89 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesShardStores (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'status', - 'ignore_unavailable', - 'allow_no_indices', - 'expand_wildcards', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - ignoreUnavailable: 'ignore_unavailable', - allowNoIndices: 'allow_no_indices', - expandWildcards: 'expand_wildcards', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a indices.shard_stores request - * Provides store information for shard copies of indices. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-shards-stores.html - */ - return function indicesShardStores (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((index) != null) { - if (method == null) method = 'GET' - path = '/' + encodeURIComponent(index) + '/' + '_shard_stores' - } else { - if (method == null) method = 'GET' - path = '/' + '_shard_stores' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesShardStores diff --git a/api/api/indices.shrink.js b/api/api/indices.shrink.js deleted file mode 100644 index 8fccf0fa3..000000000 --- a/api/api/indices.shrink.js +++ /dev/null @@ -1,98 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesShrink (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'timeout', - 'master_timeout', - 'wait_for_active_shards', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - waitForActiveShards: 'wait_for_active_shards', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a indices.shrink request - * Allow to shrink an existing index into a new index with fewer primary shards. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-shrink-index.html - */ - return function indicesShrink (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['index'] == null) { - const err = new ConfigurationError('Missing required parameter: index') - return handleError(err, callback) - } - if (params['target'] == null) { - const err = new ConfigurationError('Missing required parameter: target') - return handleError(err, callback) - } - - // check required url components - if (params['target'] != null && (params['index'] == null)) { - const err = new ConfigurationError('Missing required parameter of the url: index') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, target, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'PUT' - path = '/' + encodeURIComponent(index) + '/' + '_shrink' + '/' + encodeURIComponent(target) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesShrink diff --git a/api/api/indices.simulate_index_template.js b/api/api/indices.simulate_index_template.js deleted file mode 100644 index 791806076..000000000 --- a/api/api/indices.simulate_index_template.js +++ /dev/null @@ -1,87 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesSimulateIndexTemplate (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'create', - 'cause', - 'master_timeout', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a indices.simulate_index_template request - * Simulate matching the given index name against the index templates in the system - * https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html - */ - return function indicesSimulateIndexTemplate (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['name'] == null) { - const err = new ConfigurationError('Missing required parameter: name') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_index_template' + '/' + '_simulate_index' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesSimulateIndexTemplate diff --git a/api/api/indices.simulate_template.js b/api/api/indices.simulate_template.js deleted file mode 100644 index 94c22fd2e..000000000 --- a/api/api/indices.simulate_template.js +++ /dev/null @@ -1,86 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesSimulateTemplate (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'create', - 'cause', - 'master_timeout', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a indices.simulate_template request - * Simulate resolving the given template name or body - * https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html - */ - return function indicesSimulateTemplate (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((name) != null) { - if (method == null) method = 'POST' - path = '/' + '_index_template' + '/' + '_simulate' + '/' + encodeURIComponent(name) - } else { - if (method == null) method = 'POST' - path = '/' + '_index_template' + '/' + '_simulate' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesSimulateTemplate diff --git a/api/api/indices.split.js b/api/api/indices.split.js deleted file mode 100644 index da862880a..000000000 --- a/api/api/indices.split.js +++ /dev/null @@ -1,98 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesSplit (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'timeout', - 'master_timeout', - 'wait_for_active_shards', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - waitForActiveShards: 'wait_for_active_shards', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a indices.split request - * Allows you to split an existing index into a new index with more primary shards. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-split-index.html - */ - return function indicesSplit (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['index'] == null) { - const err = new ConfigurationError('Missing required parameter: index') - return handleError(err, callback) - } - if (params['target'] == null) { - const err = new ConfigurationError('Missing required parameter: target') - return handleError(err, callback) - } - - // check required url components - if (params['target'] != null && (params['index'] == null)) { - const err = new ConfigurationError('Missing required parameter of the url: index') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, target, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'PUT' - path = '/' + encodeURIComponent(index) + '/' + '_split' + '/' + encodeURIComponent(target) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesSplit diff --git a/api/api/indices.stats.js b/api/api/indices.stats.js deleted file mode 100644 index 787ede172..000000000 --- a/api/api/indices.stats.js +++ /dev/null @@ -1,104 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesStats (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'completion_fields', - 'fielddata_fields', - 'fields', - 'groups', - 'level', - 'types', - 'include_segment_file_sizes', - 'include_unloaded_segments', - 'expand_wildcards', - 'forbid_closed_indices', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - completionFields: 'completion_fields', - fielddataFields: 'fielddata_fields', - includeSegmentFileSizes: 'include_segment_file_sizes', - includeUnloadedSegments: 'include_unloaded_segments', - expandWildcards: 'expand_wildcards', - forbidClosedIndices: 'forbid_closed_indices', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a indices.stats request - * Provides statistics on operations happening in an index. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-stats.html - */ - return function indicesStats (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, metric, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((index) != null && (metric) != null) { - if (method == null) method = 'GET' - path = '/' + encodeURIComponent(index) + '/' + '_stats' + '/' + encodeURIComponent(metric) - } else if ((metric) != null) { - if (method == null) method = 'GET' - path = '/' + '_stats' + '/' + encodeURIComponent(metric) - } else if ((index) != null) { - if (method == null) method = 'GET' - path = '/' + encodeURIComponent(index) + '/' + '_stats' - } else { - if (method == null) method = 'GET' - path = '/' + '_stats' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesStats diff --git a/api/api/indices.unfreeze.js b/api/api/indices.unfreeze.js deleted file mode 100644 index 00a31a84d..000000000 --- a/api/api/indices.unfreeze.js +++ /dev/null @@ -1,87 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesUnfreeze (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'timeout', - 'master_timeout', - 'ignore_unavailable', - 'allow_no_indices', - 'expand_wildcards', - 'wait_for_active_shards' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - ignoreUnavailable: 'ignore_unavailable', - allowNoIndices: 'allow_no_indices', - expandWildcards: 'expand_wildcards', - waitForActiveShards: 'wait_for_active_shards' - } - - /** - * Perform a indices.unfreeze request - * Unfreezes an index. When a frozen index is unfrozen, the index goes through the normal recovery process and becomes writeable again. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/unfreeze-index-api.html - */ - return function indicesUnfreeze (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['index'] == null) { - const err = new ConfigurationError('Missing required parameter: index') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_unfreeze' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesUnfreeze diff --git a/api/api/indices.update_aliases.js b/api/api/indices.update_aliases.js deleted file mode 100644 index 4633aaa94..000000000 --- a/api/api/indices.update_aliases.js +++ /dev/null @@ -1,86 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesUpdateAliases (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'timeout', - 'master_timeout', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a indices.update_aliases request - * Updates index aliases. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html - */ - return function indicesUpdateAliases (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_aliases' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesUpdateAliases diff --git a/api/api/indices.upgrade.js b/api/api/indices.upgrade.js deleted file mode 100644 index 09a366e93..000000000 --- a/api/api/indices.upgrade.js +++ /dev/null @@ -1,87 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesUpgrade (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'allow_no_indices', - 'expand_wildcards', - 'ignore_unavailable', - 'wait_for_completion', - 'only_ancient_segments', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - allowNoIndices: 'allow_no_indices', - expandWildcards: 'expand_wildcards', - ignoreUnavailable: 'ignore_unavailable', - waitForCompletion: 'wait_for_completion', - onlyAncientSegments: 'only_ancient_segments', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a indices.upgrade request - * DEPRECATED Upgrades to the current version of Lucene. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-upgrade.html - */ - return function indicesUpgrade (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_upgrade' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesUpgrade diff --git a/api/api/indices.validate_query.js b/api/api/indices.validate_query.js deleted file mode 100644 index 385685b95..000000000 --- a/api/api/indices.validate_query.js +++ /dev/null @@ -1,109 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIndicesValidateQuery (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'explain', - 'ignore_unavailable', - 'allow_no_indices', - 'expand_wildcards', - 'q', - 'analyzer', - 'analyze_wildcard', - 'default_operator', - 'df', - 'lenient', - 'rewrite', - 'all_shards', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - ignoreUnavailable: 'ignore_unavailable', - allowNoIndices: 'allow_no_indices', - expandWildcards: 'expand_wildcards', - analyzeWildcard: 'analyze_wildcard', - defaultOperator: 'default_operator', - allShards: 'all_shards', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a indices.validate_query request - * Allows a user to validate a potentially expensive query without executing it. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/search-validate.html - */ - return function indicesValidateQuery (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required url components - if (params['type'] != null && (params['index'] == null)) { - const err = new ConfigurationError('Missing required parameter of the url: index') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, type, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((index) != null && (type) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + encodeURIComponent(index) + '/' + encodeURIComponent(type) + '/' + '_validate' + '/' + 'query' - } else if ((index) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_validate' + '/' + 'query' - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_validate' + '/' + 'query' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIndicesValidateQuery diff --git a/api/api/info.js b/api/api/info.js deleted file mode 100644 index b0d0201f2..000000000 --- a/api/api/info.js +++ /dev/null @@ -1,77 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildInfo (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a info request - * Returns basic information about the cluster. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html - */ - return function info (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildInfo diff --git a/api/api/ingest.delete_pipeline.js b/api/api/ingest.delete_pipeline.js deleted file mode 100644 index fffff5a5d..000000000 --- a/api/api/ingest.delete_pipeline.js +++ /dev/null @@ -1,86 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIngestDeletePipeline (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'master_timeout', - 'timeout', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a ingest.delete_pipeline request - * Deletes a pipeline. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-pipeline-api.html - */ - return function ingestDeletePipeline (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['id'] == null) { - const err = new ConfigurationError('Missing required parameter: id') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'DELETE' - path = '/' + '_ingest' + '/' + 'pipeline' + '/' + encodeURIComponent(id) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIngestDeletePipeline diff --git a/api/api/ingest.get_pipeline.js b/api/api/ingest.get_pipeline.js deleted file mode 100644 index 5008d0d9a..000000000 --- a/api/api/ingest.get_pipeline.js +++ /dev/null @@ -1,84 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIngestGetPipeline (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'master_timeout', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a ingest.get_pipeline request - * Returns a pipeline. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/get-pipeline-api.html - */ - return function ingestGetPipeline (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((id) != null) { - if (method == null) method = 'GET' - path = '/' + '_ingest' + '/' + 'pipeline' + '/' + encodeURIComponent(id) - } else { - if (method == null) method = 'GET' - path = '/' + '_ingest' + '/' + 'pipeline' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIngestGetPipeline diff --git a/api/api/ingest.processor_grok.js b/api/api/ingest.processor_grok.js deleted file mode 100644 index 8a1c2c883..000000000 --- a/api/api/ingest.processor_grok.js +++ /dev/null @@ -1,77 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIngestProcessorGrok (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a ingest.processor_grok request - * Returns a list of the built-in patterns. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/grok-processor.html#grok-processor-rest-get - */ - return function ingestProcessorGrok (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + '_ingest' + '/' + 'processor' + '/' + 'grok' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIngestProcessorGrok diff --git a/api/api/ingest.put_pipeline.js b/api/api/ingest.put_pipeline.js deleted file mode 100644 index f57a932c9..000000000 --- a/api/api/ingest.put_pipeline.js +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIngestPutPipeline (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'master_timeout', - 'timeout', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a ingest.put_pipeline request - * Creates or updates a pipeline. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/put-pipeline-api.html - */ - return function ingestPutPipeline (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['id'] == null) { - const err = new ConfigurationError('Missing required parameter: id') - return handleError(err, callback) - } - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'PUT' - path = '/' + '_ingest' + '/' + 'pipeline' + '/' + encodeURIComponent(id) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIngestPutPipeline diff --git a/api/api/ingest.simulate.js b/api/api/ingest.simulate.js deleted file mode 100644 index 30dac263c..000000000 --- a/api/api/ingest.simulate.js +++ /dev/null @@ -1,89 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildIngestSimulate (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'verbose', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a ingest.simulate request - * Allows to simulate a pipeline with example documents. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/simulate-pipeline-api.html - */ - return function ingestSimulate (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((id) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_ingest' + '/' + 'pipeline' + '/' + encodeURIComponent(id) + '/' + '_simulate' - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_ingest' + '/' + 'pipeline' + '/' + '_simulate' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildIngestSimulate diff --git a/api/api/license.delete.js b/api/api/license.delete.js deleted file mode 100644 index c099da457..000000000 --- a/api/api/license.delete.js +++ /dev/null @@ -1,72 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildLicenseDelete (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a license.delete request - * Deletes licensing information for the cluster - * https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-license.html - */ - return function licenseDelete (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'DELETE' - path = '/' + '_license' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildLicenseDelete diff --git a/api/api/license.get.js b/api/api/license.get.js deleted file mode 100644 index 75ef90a54..000000000 --- a/api/api/license.get.js +++ /dev/null @@ -1,73 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildLicenseGet (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'local', - 'accept_enterprise' - ] - - const snakeCase = { - acceptEnterprise: 'accept_enterprise' - } - - /** - * Perform a license.get request - * Retrieves licensing information for the cluster - * https://www.elastic.co/guide/en/elasticsearch/reference/master/get-license.html - */ - return function licenseGet (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + '_license' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildLicenseGet diff --git a/api/api/license.get_basic_status.js b/api/api/license.get_basic_status.js deleted file mode 100644 index 2d8fdde52..000000000 --- a/api/api/license.get_basic_status.js +++ /dev/null @@ -1,72 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildLicenseGetBasicStatus (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a license.get_basic_status request - * Retrieves information about the status of the basic license. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/get-basic-status.html - */ - return function licenseGetBasicStatus (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + '_license' + '/' + 'basic_status' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildLicenseGetBasicStatus diff --git a/api/api/license.get_trial_status.js b/api/api/license.get_trial_status.js deleted file mode 100644 index e7b16218c..000000000 --- a/api/api/license.get_trial_status.js +++ /dev/null @@ -1,72 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildLicenseGetTrialStatus (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a license.get_trial_status request - * Retrieves information about the status of the trial license. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/get-trial-status.html - */ - return function licenseGetTrialStatus (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + '_license' + '/' + 'trial_status' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildLicenseGetTrialStatus diff --git a/api/api/license.post.js b/api/api/license.post.js deleted file mode 100644 index d4463f5a8..000000000 --- a/api/api/license.post.js +++ /dev/null @@ -1,72 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildLicensePost (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'acknowledge' - ] - - const snakeCase = { - - } - - /** - * Perform a license.post request - * Updates the license for the cluster. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/update-license.html - */ - return function licensePost (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'PUT' - path = '/' + '_license' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildLicensePost diff --git a/api/api/license.post_start_basic.js b/api/api/license.post_start_basic.js deleted file mode 100644 index 7b9a1dec1..000000000 --- a/api/api/license.post_start_basic.js +++ /dev/null @@ -1,72 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildLicensePostStartBasic (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'acknowledge' - ] - - const snakeCase = { - - } - - /** - * Perform a license.post_start_basic request - * Starts an indefinite basic license. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/start-basic.html - */ - return function licensePostStartBasic (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_license' + '/' + 'start_basic' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildLicensePostStartBasic diff --git a/api/api/license.post_start_trial.js b/api/api/license.post_start_trial.js deleted file mode 100644 index 08e876df2..000000000 --- a/api/api/license.post_start_trial.js +++ /dev/null @@ -1,73 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildLicensePostStartTrial (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'type', - 'acknowledge' - ] - - const snakeCase = { - - } - - /** - * Perform a license.post_start_trial request - * starts a limited time trial license. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/start-trial.html - */ - return function licensePostStartTrial (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_license' + '/' + 'start_trial' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildLicensePostStartTrial diff --git a/api/api/mget.js b/api/api/mget.js deleted file mode 100644 index bd5512cc6..000000000 --- a/api/api/mget.js +++ /dev/null @@ -1,103 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMget (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'stored_fields', - 'preference', - 'realtime', - 'refresh', - 'routing', - '_source', - '_source_excludes', - '_source_exclude', - '_source_includes', - '_source_include', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - storedFields: 'stored_fields', - _sourceExcludes: '_source_excludes', - _sourceExclude: '_source_exclude', - _sourceIncludes: '_source_includes', - _sourceInclude: '_source_include', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a mget request - * Allows to get multiple documents in one request. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-multi-get.html - */ - return function mget (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((index) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_mget' - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_mget' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMget diff --git a/api/api/migration.deprecations.js b/api/api/migration.deprecations.js deleted file mode 100644 index 1a0efca36..000000000 --- a/api/api/migration.deprecations.js +++ /dev/null @@ -1,77 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMigrationDeprecations (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a migration.deprecations request - * Retrieves information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/migration-api-deprecation.html - */ - return function migrationDeprecations (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((index) != null) { - if (method == null) method = 'GET' - path = '/' + encodeURIComponent(index) + '/' + '_migration' + '/' + 'deprecations' - } else { - if (method == null) method = 'GET' - path = '/' + '_migration' + '/' + 'deprecations' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMigrationDeprecations diff --git a/api/api/ml.close_job.js b/api/api/ml.close_job.js deleted file mode 100644 index fb853a188..000000000 --- a/api/api/ml.close_job.js +++ /dev/null @@ -1,81 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlCloseJob (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'allow_no_jobs', - 'force', - 'timeout' - ] - - const snakeCase = { - allowNoJobs: 'allow_no_jobs' - - } - - /** - * Perform a ml.close_job request - * Closes one or more anomaly detection jobs. A job can be opened and closed multiple times throughout its lifecycle. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-close-job.html - */ - return function mlCloseJob (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['job_id'] == null && params['jobId'] == null) { - const err = new ConfigurationError('Missing required parameter: job_id or jobId') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, jobId, job_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) + '/' + '_close' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlCloseJob diff --git a/api/api/ml.delete_calendar.js b/api/api/ml.delete_calendar.js deleted file mode 100644 index 7ce88dbac..000000000 --- a/api/api/ml.delete_calendar.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlDeleteCalendar (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a ml.delete_calendar request - * Deletes a calendar. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-calendar.html - */ - return function mlDeleteCalendar (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['calendar_id'] == null && params['calendarId'] == null) { - const err = new ConfigurationError('Missing required parameter: calendar_id or calendarId') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, calendarId, calendar_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'DELETE' - path = '/' + '_ml' + '/' + 'calendars' + '/' + encodeURIComponent(calendar_id || calendarId) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlDeleteCalendar diff --git a/api/api/ml.delete_calendar_event.js b/api/api/ml.delete_calendar_event.js deleted file mode 100644 index 027c8ebaf..000000000 --- a/api/api/ml.delete_calendar_event.js +++ /dev/null @@ -1,88 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlDeleteCalendarEvent (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a ml.delete_calendar_event request - * Deletes scheduled events from a calendar. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-calendar-event.html - */ - return function mlDeleteCalendarEvent (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['calendar_id'] == null && params['calendarId'] == null) { - const err = new ConfigurationError('Missing required parameter: calendar_id or calendarId') - return handleError(err, callback) - } - if (params['event_id'] == null && params['eventId'] == null) { - const err = new ConfigurationError('Missing required parameter: event_id or eventId') - return handleError(err, callback) - } - - // check required url components - if ((params['event_id'] != null || params['eventId'] != null) && ((params['calendar_id'] == null && params['calendarId'] == null))) { - const err = new ConfigurationError('Missing required parameter of the url: calendar_id') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, calendarId, calendar_id, eventId, event_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'DELETE' - path = '/' + '_ml' + '/' + 'calendars' + '/' + encodeURIComponent(calendar_id || calendarId) + '/' + 'events' + '/' + encodeURIComponent(event_id || eventId) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlDeleteCalendarEvent diff --git a/api/api/ml.delete_calendar_job.js b/api/api/ml.delete_calendar_job.js deleted file mode 100644 index cd9cee576..000000000 --- a/api/api/ml.delete_calendar_job.js +++ /dev/null @@ -1,88 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlDeleteCalendarJob (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a ml.delete_calendar_job request - * Deletes anomaly detection jobs from a calendar. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-calendar-job.html - */ - return function mlDeleteCalendarJob (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['calendar_id'] == null && params['calendarId'] == null) { - const err = new ConfigurationError('Missing required parameter: calendar_id or calendarId') - return handleError(err, callback) - } - if (params['job_id'] == null && params['jobId'] == null) { - const err = new ConfigurationError('Missing required parameter: job_id or jobId') - return handleError(err, callback) - } - - // check required url components - if ((params['job_id'] != null || params['jobId'] != null) && ((params['calendar_id'] == null && params['calendarId'] == null))) { - const err = new ConfigurationError('Missing required parameter of the url: calendar_id') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, calendarId, calendar_id, jobId, job_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'DELETE' - path = '/' + '_ml' + '/' + 'calendars' + '/' + encodeURIComponent(calendar_id || calendarId) + '/' + 'jobs' + '/' + encodeURIComponent(job_id || jobId) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlDeleteCalendarJob diff --git a/api/api/ml.delete_data_frame_analytics.js b/api/api/ml.delete_data_frame_analytics.js deleted file mode 100644 index a067f15ba..000000000 --- a/api/api/ml.delete_data_frame_analytics.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlDeleteDataFrameAnalytics (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'force' - ] - - const snakeCase = { - - } - - /** - * Perform a ml.delete_data_frame_analytics request - * Deletes an existing data frame analytics job. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-dfanalytics.html - */ - return function mlDeleteDataFrameAnalytics (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['id'] == null) { - const err = new ConfigurationError('Missing required parameter: id') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'DELETE' - path = '/' + '_ml' + '/' + 'data_frame' + '/' + 'analytics' + '/' + encodeURIComponent(id) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlDeleteDataFrameAnalytics diff --git a/api/api/ml.delete_datafeed.js b/api/api/ml.delete_datafeed.js deleted file mode 100644 index ecf9a4f92..000000000 --- a/api/api/ml.delete_datafeed.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlDeleteDatafeed (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'force' - ] - - const snakeCase = { - - } - - /** - * Perform a ml.delete_datafeed request - * Deletes an existing datafeed. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-datafeed.html - */ - return function mlDeleteDatafeed (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['datafeed_id'] == null && params['datafeedId'] == null) { - const err = new ConfigurationError('Missing required parameter: datafeed_id or datafeedId') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, datafeedId, datafeed_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'DELETE' - path = '/' + '_ml' + '/' + 'datafeeds' + '/' + encodeURIComponent(datafeed_id || datafeedId) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlDeleteDatafeed diff --git a/api/api/ml.delete_expired_data.js b/api/api/ml.delete_expired_data.js deleted file mode 100644 index a9fa3368d..000000000 --- a/api/api/ml.delete_expired_data.js +++ /dev/null @@ -1,72 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlDeleteExpiredData (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a ml.delete_expired_data request - * Deletes expired and unused machine learning data. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-expired-data.html - */ - return function mlDeleteExpiredData (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'DELETE' - path = '/' + '_ml' + '/' + '_delete_expired_data' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlDeleteExpiredData diff --git a/api/api/ml.delete_filter.js b/api/api/ml.delete_filter.js deleted file mode 100644 index 439346fcd..000000000 --- a/api/api/ml.delete_filter.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlDeleteFilter (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a ml.delete_filter request - * Deletes a filter. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-filter.html - */ - return function mlDeleteFilter (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['filter_id'] == null && params['filterId'] == null) { - const err = new ConfigurationError('Missing required parameter: filter_id or filterId') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, filterId, filter_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'DELETE' - path = '/' + '_ml' + '/' + 'filters' + '/' + encodeURIComponent(filter_id || filterId) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlDeleteFilter diff --git a/api/api/ml.delete_forecast.js b/api/api/ml.delete_forecast.js deleted file mode 100644 index 4f2e7f46f..000000000 --- a/api/api/ml.delete_forecast.js +++ /dev/null @@ -1,91 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlDeleteForecast (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'allow_no_forecasts', - 'timeout' - ] - - const snakeCase = { - allowNoForecasts: 'allow_no_forecasts' - - } - - /** - * Perform a ml.delete_forecast request - * Deletes forecasts from a machine learning job. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-forecast.html - */ - return function mlDeleteForecast (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['job_id'] == null && params['jobId'] == null) { - const err = new ConfigurationError('Missing required parameter: job_id or jobId') - return handleError(err, callback) - } - - // check required url components - if ((params['forecast_id'] != null || params['forecastId'] != null) && ((params['job_id'] == null && params['jobId'] == null))) { - const err = new ConfigurationError('Missing required parameter of the url: job_id') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, jobId, job_id, forecastId, forecast_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((job_id || jobId) != null && (forecast_id || forecastId) != null) { - if (method == null) method = 'DELETE' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) + '/' + '_forecast' + '/' + encodeURIComponent(forecast_id || forecastId) - } else { - if (method == null) method = 'DELETE' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) + '/' + '_forecast' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlDeleteForecast diff --git a/api/api/ml.delete_job.js b/api/api/ml.delete_job.js deleted file mode 100644 index 31122255c..000000000 --- a/api/api/ml.delete_job.js +++ /dev/null @@ -1,79 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlDeleteJob (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'force', - 'wait_for_completion' - ] - - const snakeCase = { - waitForCompletion: 'wait_for_completion' - } - - /** - * Perform a ml.delete_job request - * Deletes an existing anomaly detection job. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-job.html - */ - return function mlDeleteJob (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['job_id'] == null && params['jobId'] == null) { - const err = new ConfigurationError('Missing required parameter: job_id or jobId') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, jobId, job_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'DELETE' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlDeleteJob diff --git a/api/api/ml.delete_model_snapshot.js b/api/api/ml.delete_model_snapshot.js deleted file mode 100644 index 5768d1d6a..000000000 --- a/api/api/ml.delete_model_snapshot.js +++ /dev/null @@ -1,88 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlDeleteModelSnapshot (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a ml.delete_model_snapshot request - * Deletes an existing model snapshot. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-snapshot.html - */ - return function mlDeleteModelSnapshot (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['job_id'] == null && params['jobId'] == null) { - const err = new ConfigurationError('Missing required parameter: job_id or jobId') - return handleError(err, callback) - } - if (params['snapshot_id'] == null && params['snapshotId'] == null) { - const err = new ConfigurationError('Missing required parameter: snapshot_id or snapshotId') - return handleError(err, callback) - } - - // check required url components - if ((params['snapshot_id'] != null || params['snapshotId'] != null) && ((params['job_id'] == null && params['jobId'] == null))) { - const err = new ConfigurationError('Missing required parameter of the url: job_id') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, jobId, job_id, snapshotId, snapshot_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'DELETE' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) + '/' + 'model_snapshots' + '/' + encodeURIComponent(snapshot_id || snapshotId) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlDeleteModelSnapshot diff --git a/api/api/ml.delete_trained_model.js b/api/api/ml.delete_trained_model.js deleted file mode 100644 index 0aacc49ba..000000000 --- a/api/api/ml.delete_trained_model.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlDeleteTrainedModel (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a ml.delete_trained_model request - * Deletes an existing trained inference model that is currently not referenced by an ingest pipeline. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-inference.html - */ - return function mlDeleteTrainedModel (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['model_id'] == null && params['modelId'] == null) { - const err = new ConfigurationError('Missing required parameter: model_id or modelId') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, modelId, model_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'DELETE' - path = '/' + '_ml' + '/' + 'inference' + '/' + encodeURIComponent(model_id || modelId) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlDeleteTrainedModel diff --git a/api/api/ml.estimate_model_memory.js b/api/api/ml.estimate_model_memory.js deleted file mode 100644 index 4287cfec6..000000000 --- a/api/api/ml.estimate_model_memory.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlEstimateModelMemory (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a ml.estimate_model_memory request - * Estimates the model memory - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-apis.html - */ - return function mlEstimateModelMemory (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + '_estimate_model_memory' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlEstimateModelMemory diff --git a/api/api/ml.evaluate_data_frame.js b/api/api/ml.evaluate_data_frame.js deleted file mode 100644 index 24ca8e3d2..000000000 --- a/api/api/ml.evaluate_data_frame.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlEvaluateDataFrame (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a ml.evaluate_data_frame request - * Evaluates the data frame analytics for an annotated index. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/evaluate-dfanalytics.html - */ - return function mlEvaluateDataFrame (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_ml' + '/' + 'data_frame' + '/' + '_evaluate' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlEvaluateDataFrame diff --git a/api/api/ml.explain_data_frame_analytics.js b/api/api/ml.explain_data_frame_analytics.js deleted file mode 100644 index dec8dcf79..000000000 --- a/api/api/ml.explain_data_frame_analytics.js +++ /dev/null @@ -1,77 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlExplainDataFrameAnalytics (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a ml.explain_data_frame_analytics request - * Explains a data frame analytics config. - * http://www.elastic.co/guide/en/elasticsearch/reference/current/explain-dfanalytics.html - */ - return function mlExplainDataFrameAnalytics (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((id) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_ml' + '/' + 'data_frame' + '/' + 'analytics' + '/' + encodeURIComponent(id) + '/' + '_explain' - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_ml' + '/' + 'data_frame' + '/' + 'analytics' + '/' + '_explain' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlExplainDataFrameAnalytics diff --git a/api/api/ml.find_file_structure.js b/api/api/ml.find_file_structure.js deleted file mode 100644 index e8002c15d..000000000 --- a/api/api/ml.find_file_structure.js +++ /dev/null @@ -1,99 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlFindFileStructure (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'lines_to_sample', - 'line_merge_size_limit', - 'timeout', - 'charset', - 'format', - 'has_header_row', - 'column_names', - 'delimiter', - 'quote', - 'should_trim_fields', - 'grok_pattern', - 'timestamp_field', - 'timestamp_format', - 'explain' - ] - - const snakeCase = { - linesToSample: 'lines_to_sample', - lineMergeSizeLimit: 'line_merge_size_limit', - hasHeaderRow: 'has_header_row', - columnNames: 'column_names', - shouldTrimFields: 'should_trim_fields', - grokPattern: 'grok_pattern', - timestampField: 'timestamp_field', - timestampFormat: 'timestamp_format' - - } - - /** - * Perform a ml.find_file_structure request - * Finds the structure of a text file. The text file must contain data that is suitable to be ingested into Elasticsearch. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-find-file-structure.html - */ - return function mlFindFileStructure (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_ml' + '/' + 'find_file_structure' - - // build request object - const request = { - method, - path, - bulkBody: body, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlFindFileStructure diff --git a/api/api/ml.flush_job.js b/api/api/ml.flush_job.js deleted file mode 100644 index 4ca50f6bd..000000000 --- a/api/api/ml.flush_job.js +++ /dev/null @@ -1,84 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlFlushJob (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'calc_interim', - 'start', - 'end', - 'advance_time', - 'skip_time' - ] - - const snakeCase = { - calcInterim: 'calc_interim', - advanceTime: 'advance_time', - skipTime: 'skip_time' - } - - /** - * Perform a ml.flush_job request - * Forces any buffered data to be processed by the job. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-flush-job.html - */ - return function mlFlushJob (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['job_id'] == null && params['jobId'] == null) { - const err = new ConfigurationError('Missing required parameter: job_id or jobId') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, jobId, job_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) + '/' + '_flush' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlFlushJob diff --git a/api/api/ml.forecast.js b/api/api/ml.forecast.js deleted file mode 100644 index b1af60455..000000000 --- a/api/api/ml.forecast.js +++ /dev/null @@ -1,81 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlForecast (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'duration', - 'expires_in', - 'max_model_memory' - ] - - const snakeCase = { - expiresIn: 'expires_in', - maxModelMemory: 'max_model_memory' - } - - /** - * Perform a ml.forecast request - * Predicts the future behavior of a time series by using its historical behavior. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-forecast.html - */ - return function mlForecast (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['job_id'] == null && params['jobId'] == null) { - const err = new ConfigurationError('Missing required parameter: job_id or jobId') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, jobId, job_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) + '/' + '_forecast' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlForecast diff --git a/api/api/ml.get_buckets.js b/api/api/ml.get_buckets.js deleted file mode 100644 index 3e7585a5f..000000000 --- a/api/api/ml.get_buckets.js +++ /dev/null @@ -1,99 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlGetBuckets (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'expand', - 'exclude_interim', - 'from', - 'size', - 'start', - 'end', - 'anomaly_score', - 'sort', - 'desc' - ] - - const snakeCase = { - excludeInterim: 'exclude_interim', - anomalyScore: 'anomaly_score' - - } - - /** - * Perform a ml.get_buckets request - * Retrieves anomaly detection job results for one or more buckets. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-bucket.html - */ - return function mlGetBuckets (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['job_id'] == null && params['jobId'] == null) { - const err = new ConfigurationError('Missing required parameter: job_id or jobId') - return handleError(err, callback) - } - - // check required url components - if (params['timestamp'] != null && ((params['job_id'] == null && params['jobId'] == null))) { - const err = new ConfigurationError('Missing required parameter of the url: job_id') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, jobId, job_id, timestamp, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((job_id || jobId) != null && (timestamp) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) + '/' + 'results' + '/' + 'buckets' + '/' + encodeURIComponent(timestamp) - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) + '/' + 'results' + '/' + 'buckets' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlGetBuckets diff --git a/api/api/ml.get_calendar_events.js b/api/api/ml.get_calendar_events.js deleted file mode 100644 index 6bb299b77..000000000 --- a/api/api/ml.get_calendar_events.js +++ /dev/null @@ -1,83 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlGetCalendarEvents (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'job_id', - 'start', - 'end', - 'from', - 'size' - ] - - const snakeCase = { - jobId: 'job_id' - - } - - /** - * Perform a ml.get_calendar_events request - * Retrieves information about the scheduled events in calendars. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-calendar-event.html - */ - return function mlGetCalendarEvents (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['calendar_id'] == null && params['calendarId'] == null) { - const err = new ConfigurationError('Missing required parameter: calendar_id or calendarId') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, calendarId, calendar_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + '_ml' + '/' + 'calendars' + '/' + encodeURIComponent(calendar_id || calendarId) + '/' + 'events' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlGetCalendarEvents diff --git a/api/api/ml.get_calendars.js b/api/api/ml.get_calendars.js deleted file mode 100644 index 53440e052..000000000 --- a/api/api/ml.get_calendars.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlGetCalendars (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'from', - 'size' - ] - - const snakeCase = { - - } - - /** - * Perform a ml.get_calendars request - * Retrieves configuration information for calendars. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-calendar.html - */ - return function mlGetCalendars (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, calendarId, calendar_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((calendar_id || calendarId) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_ml' + '/' + 'calendars' + '/' + encodeURIComponent(calendar_id || calendarId) - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_ml' + '/' + 'calendars' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlGetCalendars diff --git a/api/api/ml.get_categories.js b/api/api/ml.get_categories.js deleted file mode 100644 index 68250b7c9..000000000 --- a/api/api/ml.get_categories.js +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlGetCategories (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'from', - 'size' - ] - - const snakeCase = { - - } - - /** - * Perform a ml.get_categories request - * Retrieves anomaly detection job results for one or more categories. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-category.html - */ - return function mlGetCategories (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['job_id'] == null && params['jobId'] == null) { - const err = new ConfigurationError('Missing required parameter: job_id or jobId') - return handleError(err, callback) - } - - // check required url components - if ((params['category_id'] != null || params['categoryId'] != null) && ((params['job_id'] == null && params['jobId'] == null))) { - const err = new ConfigurationError('Missing required parameter of the url: job_id') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, jobId, job_id, categoryId, category_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((job_id || jobId) != null && (category_id || categoryId) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) + '/' + 'results' + '/' + 'categories' + '/' + encodeURIComponent(category_id || categoryId) - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) + '/' + 'results' + '/' + 'categories' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlGetCategories diff --git a/api/api/ml.get_data_frame_analytics.js b/api/api/ml.get_data_frame_analytics.js deleted file mode 100644 index 0d3d99008..000000000 --- a/api/api/ml.get_data_frame_analytics.js +++ /dev/null @@ -1,80 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlGetDataFrameAnalytics (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'allow_no_match', - 'from', - 'size' - ] - - const snakeCase = { - allowNoMatch: 'allow_no_match' - - } - - /** - * Perform a ml.get_data_frame_analytics request - * Retrieves configuration information for data frame analytics jobs. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/get-dfanalytics.html - */ - return function mlGetDataFrameAnalytics (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((id) != null) { - if (method == null) method = 'GET' - path = '/' + '_ml' + '/' + 'data_frame' + '/' + 'analytics' + '/' + encodeURIComponent(id) - } else { - if (method == null) method = 'GET' - path = '/' + '_ml' + '/' + 'data_frame' + '/' + 'analytics' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlGetDataFrameAnalytics diff --git a/api/api/ml.get_data_frame_analytics_stats.js b/api/api/ml.get_data_frame_analytics_stats.js deleted file mode 100644 index 82f4d71d5..000000000 --- a/api/api/ml.get_data_frame_analytics_stats.js +++ /dev/null @@ -1,80 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlGetDataFrameAnalyticsStats (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'allow_no_match', - 'from', - 'size' - ] - - const snakeCase = { - allowNoMatch: 'allow_no_match' - - } - - /** - * Perform a ml.get_data_frame_analytics_stats request - * Retrieves usage information for data frame analytics jobs. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/get-dfanalytics-stats.html - */ - return function mlGetDataFrameAnalyticsStats (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((id) != null) { - if (method == null) method = 'GET' - path = '/' + '_ml' + '/' + 'data_frame' + '/' + 'analytics' + '/' + encodeURIComponent(id) + '/' + '_stats' - } else { - if (method == null) method = 'GET' - path = '/' + '_ml' + '/' + 'data_frame' + '/' + 'analytics' + '/' + '_stats' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlGetDataFrameAnalyticsStats diff --git a/api/api/ml.get_datafeed_stats.js b/api/api/ml.get_datafeed_stats.js deleted file mode 100644 index 9935f16f4..000000000 --- a/api/api/ml.get_datafeed_stats.js +++ /dev/null @@ -1,77 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlGetDatafeedStats (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'allow_no_datafeeds' - ] - - const snakeCase = { - allowNoDatafeeds: 'allow_no_datafeeds' - } - - /** - * Perform a ml.get_datafeed_stats request - * Retrieves usage information for datafeeds. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-datafeed-stats.html - */ - return function mlGetDatafeedStats (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, datafeedId, datafeed_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((datafeed_id || datafeedId) != null) { - if (method == null) method = 'GET' - path = '/' + '_ml' + '/' + 'datafeeds' + '/' + encodeURIComponent(datafeed_id || datafeedId) + '/' + '_stats' - } else { - if (method == null) method = 'GET' - path = '/' + '_ml' + '/' + 'datafeeds' + '/' + '_stats' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlGetDatafeedStats diff --git a/api/api/ml.get_datafeeds.js b/api/api/ml.get_datafeeds.js deleted file mode 100644 index 66b0cde5c..000000000 --- a/api/api/ml.get_datafeeds.js +++ /dev/null @@ -1,77 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlGetDatafeeds (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'allow_no_datafeeds' - ] - - const snakeCase = { - allowNoDatafeeds: 'allow_no_datafeeds' - } - - /** - * Perform a ml.get_datafeeds request - * Retrieves configuration information for datafeeds. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-datafeed.html - */ - return function mlGetDatafeeds (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, datafeedId, datafeed_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((datafeed_id || datafeedId) != null) { - if (method == null) method = 'GET' - path = '/' + '_ml' + '/' + 'datafeeds' + '/' + encodeURIComponent(datafeed_id || datafeedId) - } else { - if (method == null) method = 'GET' - path = '/' + '_ml' + '/' + 'datafeeds' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlGetDatafeeds diff --git a/api/api/ml.get_filters.js b/api/api/ml.get_filters.js deleted file mode 100644 index 0d5c36973..000000000 --- a/api/api/ml.get_filters.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlGetFilters (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'from', - 'size' - ] - - const snakeCase = { - - } - - /** - * Perform a ml.get_filters request - * Retrieves filters. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-filter.html - */ - return function mlGetFilters (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, filterId, filter_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((filter_id || filterId) != null) { - if (method == null) method = 'GET' - path = '/' + '_ml' + '/' + 'filters' + '/' + encodeURIComponent(filter_id || filterId) - } else { - if (method == null) method = 'GET' - path = '/' + '_ml' + '/' + 'filters' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlGetFilters diff --git a/api/api/ml.get_influencers.js b/api/api/ml.get_influencers.js deleted file mode 100644 index 91d8269bd..000000000 --- a/api/api/ml.get_influencers.js +++ /dev/null @@ -1,87 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlGetInfluencers (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'exclude_interim', - 'from', - 'size', - 'start', - 'end', - 'influencer_score', - 'sort', - 'desc' - ] - - const snakeCase = { - excludeInterim: 'exclude_interim', - influencerScore: 'influencer_score' - - } - - /** - * Perform a ml.get_influencers request - * Retrieves anomaly detection job results for one or more influencers. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-influencer.html - */ - return function mlGetInfluencers (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['job_id'] == null && params['jobId'] == null) { - const err = new ConfigurationError('Missing required parameter: job_id or jobId') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, jobId, job_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) + '/' + 'results' + '/' + 'influencers' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlGetInfluencers diff --git a/api/api/ml.get_job_stats.js b/api/api/ml.get_job_stats.js deleted file mode 100644 index 6827cff93..000000000 --- a/api/api/ml.get_job_stats.js +++ /dev/null @@ -1,77 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlGetJobStats (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'allow_no_jobs' - ] - - const snakeCase = { - allowNoJobs: 'allow_no_jobs' - } - - /** - * Perform a ml.get_job_stats request - * Retrieves usage information for anomaly detection jobs. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job-stats.html - */ - return function mlGetJobStats (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, jobId, job_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((job_id || jobId) != null) { - if (method == null) method = 'GET' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) + '/' + '_stats' - } else { - if (method == null) method = 'GET' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + '_stats' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlGetJobStats diff --git a/api/api/ml.get_jobs.js b/api/api/ml.get_jobs.js deleted file mode 100644 index 30c78f4e5..000000000 --- a/api/api/ml.get_jobs.js +++ /dev/null @@ -1,77 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlGetJobs (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'allow_no_jobs' - ] - - const snakeCase = { - allowNoJobs: 'allow_no_jobs' - } - - /** - * Perform a ml.get_jobs request - * Retrieves configuration information for anomaly detection jobs. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job.html - */ - return function mlGetJobs (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, jobId, job_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((job_id || jobId) != null) { - if (method == null) method = 'GET' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) - } else { - if (method == null) method = 'GET' - path = '/' + '_ml' + '/' + 'anomaly_detectors' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlGetJobs diff --git a/api/api/ml.get_model_snapshots.js b/api/api/ml.get_model_snapshots.js deleted file mode 100644 index 26e1e1574..000000000 --- a/api/api/ml.get_model_snapshots.js +++ /dev/null @@ -1,94 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlGetModelSnapshots (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'from', - 'size', - 'start', - 'end', - 'sort', - 'desc' - ] - - const snakeCase = { - - } - - /** - * Perform a ml.get_model_snapshots request - * Retrieves information about model snapshots. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-snapshot.html - */ - return function mlGetModelSnapshots (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['job_id'] == null && params['jobId'] == null) { - const err = new ConfigurationError('Missing required parameter: job_id or jobId') - return handleError(err, callback) - } - - // check required url components - if ((params['snapshot_id'] != null || params['snapshotId'] != null) && ((params['job_id'] == null && params['jobId'] == null))) { - const err = new ConfigurationError('Missing required parameter of the url: job_id') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, jobId, job_id, snapshotId, snapshot_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((job_id || jobId) != null && (snapshot_id || snapshotId) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) + '/' + 'model_snapshots' + '/' + encodeURIComponent(snapshot_id || snapshotId) - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) + '/' + 'model_snapshots' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlGetModelSnapshots diff --git a/api/api/ml.get_overall_buckets.js b/api/api/ml.get_overall_buckets.js deleted file mode 100644 index a22f47ddf..000000000 --- a/api/api/ml.get_overall_buckets.js +++ /dev/null @@ -1,88 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlGetOverallBuckets (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'top_n', - 'bucket_span', - 'overall_score', - 'exclude_interim', - 'start', - 'end', - 'allow_no_jobs' - ] - - const snakeCase = { - topN: 'top_n', - bucketSpan: 'bucket_span', - overallScore: 'overall_score', - excludeInterim: 'exclude_interim', - allowNoJobs: 'allow_no_jobs' - } - - /** - * Perform a ml.get_overall_buckets request - * Retrieves overall bucket results that summarize the bucket results of multiple anomaly detection jobs. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-overall-buckets.html - */ - return function mlGetOverallBuckets (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['job_id'] == null && params['jobId'] == null) { - const err = new ConfigurationError('Missing required parameter: job_id or jobId') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, jobId, job_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) + '/' + 'results' + '/' + 'overall_buckets' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlGetOverallBuckets diff --git a/api/api/ml.get_records.js b/api/api/ml.get_records.js deleted file mode 100644 index 6f160cb24..000000000 --- a/api/api/ml.get_records.js +++ /dev/null @@ -1,87 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlGetRecords (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'exclude_interim', - 'from', - 'size', - 'start', - 'end', - 'record_score', - 'sort', - 'desc' - ] - - const snakeCase = { - excludeInterim: 'exclude_interim', - recordScore: 'record_score' - - } - - /** - * Perform a ml.get_records request - * Retrieves anomaly records for an anomaly detection job. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-record.html - */ - return function mlGetRecords (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['job_id'] == null && params['jobId'] == null) { - const err = new ConfigurationError('Missing required parameter: job_id or jobId') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, jobId, job_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) + '/' + 'results' + '/' + 'records' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlGetRecords diff --git a/api/api/ml.get_trained_models.js b/api/api/ml.get_trained_models.js deleted file mode 100644 index b447802ed..000000000 --- a/api/api/ml.get_trained_models.js +++ /dev/null @@ -1,86 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlGetTrainedModels (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'allow_no_match', - 'include_model_definition', - 'decompress_definition', - 'from', - 'size', - 'tags', - 'for_export' - ] - - const snakeCase = { - allowNoMatch: 'allow_no_match', - includeModelDefinition: 'include_model_definition', - decompressDefinition: 'decompress_definition', - forExport: 'for_export' - } - - /** - * Perform a ml.get_trained_models request - * Retrieves configuration information for a trained inference model. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/get-inference.html - */ - return function mlGetTrainedModels (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, modelId, model_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((model_id || modelId) != null) { - if (method == null) method = 'GET' - path = '/' + '_ml' + '/' + 'inference' + '/' + encodeURIComponent(model_id || modelId) - } else { - if (method == null) method = 'GET' - path = '/' + '_ml' + '/' + 'inference' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlGetTrainedModels diff --git a/api/api/ml.get_trained_models_stats.js b/api/api/ml.get_trained_models_stats.js deleted file mode 100644 index a06558cf9..000000000 --- a/api/api/ml.get_trained_models_stats.js +++ /dev/null @@ -1,80 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlGetTrainedModelsStats (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'allow_no_match', - 'from', - 'size' - ] - - const snakeCase = { - allowNoMatch: 'allow_no_match' - - } - - /** - * Perform a ml.get_trained_models_stats request - * Retrieves usage information for trained inference models. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/get-inference-stats.html - */ - return function mlGetTrainedModelsStats (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, modelId, model_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((model_id || modelId) != null) { - if (method == null) method = 'GET' - path = '/' + '_ml' + '/' + 'inference' + '/' + encodeURIComponent(model_id || modelId) + '/' + '_stats' - } else { - if (method == null) method = 'GET' - path = '/' + '_ml' + '/' + 'inference' + '/' + '_stats' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlGetTrainedModelsStats diff --git a/api/api/ml.info.js b/api/api/ml.info.js deleted file mode 100644 index 67f1c5254..000000000 --- a/api/api/ml.info.js +++ /dev/null @@ -1,72 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlInfo (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a ml.info request - * Returns defaults and limits used by machine learning. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/get-ml-info.html - */ - return function mlInfo (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + '_ml' + '/' + 'info' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlInfo diff --git a/api/api/ml.open_job.js b/api/api/ml.open_job.js deleted file mode 100644 index fd7199a7e..000000000 --- a/api/api/ml.open_job.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlOpenJob (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a ml.open_job request - * Opens one or more anomaly detection jobs. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-open-job.html - */ - return function mlOpenJob (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['job_id'] == null && params['jobId'] == null) { - const err = new ConfigurationError('Missing required parameter: job_id or jobId') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, jobId, job_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) + '/' + '_open' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlOpenJob diff --git a/api/api/ml.post_calendar_events.js b/api/api/ml.post_calendar_events.js deleted file mode 100644 index 1586d6e88..000000000 --- a/api/api/ml.post_calendar_events.js +++ /dev/null @@ -1,82 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlPostCalendarEvents (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a ml.post_calendar_events request - * Posts scheduled events in a calendar. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-post-calendar-event.html - */ - return function mlPostCalendarEvents (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['calendar_id'] == null && params['calendarId'] == null) { - const err = new ConfigurationError('Missing required parameter: calendar_id or calendarId') - return handleError(err, callback) - } - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, calendarId, calendar_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_ml' + '/' + 'calendars' + '/' + encodeURIComponent(calendar_id || calendarId) + '/' + 'events' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlPostCalendarEvents diff --git a/api/api/ml.post_data.js b/api/api/ml.post_data.js deleted file mode 100644 index ee13a736f..000000000 --- a/api/api/ml.post_data.js +++ /dev/null @@ -1,84 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlPostData (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'reset_start', - 'reset_end' - ] - - const snakeCase = { - resetStart: 'reset_start', - resetEnd: 'reset_end' - } - - /** - * Perform a ml.post_data request - * Sends data to an anomaly detection job for analysis. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-post-data.html - */ - return function mlPostData (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['job_id'] == null && params['jobId'] == null) { - const err = new ConfigurationError('Missing required parameter: job_id or jobId') - return handleError(err, callback) - } - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, jobId, job_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) + '/' + '_data' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlPostData diff --git a/api/api/ml.preview_datafeed.js b/api/api/ml.preview_datafeed.js deleted file mode 100644 index cd9807027..000000000 --- a/api/api/ml.preview_datafeed.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlPreviewDatafeed (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a ml.preview_datafeed request - * Previews a datafeed. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-preview-datafeed.html - */ - return function mlPreviewDatafeed (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['datafeed_id'] == null && params['datafeedId'] == null) { - const err = new ConfigurationError('Missing required parameter: datafeed_id or datafeedId') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, datafeedId, datafeed_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + '_ml' + '/' + 'datafeeds' + '/' + encodeURIComponent(datafeed_id || datafeedId) + '/' + '_preview' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlPreviewDatafeed diff --git a/api/api/ml.put_calendar.js b/api/api/ml.put_calendar.js deleted file mode 100644 index d81be26fe..000000000 --- a/api/api/ml.put_calendar.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlPutCalendar (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a ml.put_calendar request - * Instantiates a calendar. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-calendar.html - */ - return function mlPutCalendar (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['calendar_id'] == null && params['calendarId'] == null) { - const err = new ConfigurationError('Missing required parameter: calendar_id or calendarId') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, calendarId, calendar_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'PUT' - path = '/' + '_ml' + '/' + 'calendars' + '/' + encodeURIComponent(calendar_id || calendarId) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlPutCalendar diff --git a/api/api/ml.put_calendar_job.js b/api/api/ml.put_calendar_job.js deleted file mode 100644 index b6ff7a902..000000000 --- a/api/api/ml.put_calendar_job.js +++ /dev/null @@ -1,88 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlPutCalendarJob (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a ml.put_calendar_job request - * Adds an anomaly detection job to a calendar. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-calendar-job.html - */ - return function mlPutCalendarJob (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['calendar_id'] == null && params['calendarId'] == null) { - const err = new ConfigurationError('Missing required parameter: calendar_id or calendarId') - return handleError(err, callback) - } - if (params['job_id'] == null && params['jobId'] == null) { - const err = new ConfigurationError('Missing required parameter: job_id or jobId') - return handleError(err, callback) - } - - // check required url components - if ((params['job_id'] != null || params['jobId'] != null) && ((params['calendar_id'] == null && params['calendarId'] == null))) { - const err = new ConfigurationError('Missing required parameter of the url: calendar_id') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, calendarId, calendar_id, jobId, job_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'PUT' - path = '/' + '_ml' + '/' + 'calendars' + '/' + encodeURIComponent(calendar_id || calendarId) + '/' + 'jobs' + '/' + encodeURIComponent(job_id || jobId) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlPutCalendarJob diff --git a/api/api/ml.put_data_frame_analytics.js b/api/api/ml.put_data_frame_analytics.js deleted file mode 100644 index 4e01baa44..000000000 --- a/api/api/ml.put_data_frame_analytics.js +++ /dev/null @@ -1,82 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlPutDataFrameAnalytics (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a ml.put_data_frame_analytics request - * Instantiates a data frame analytics job. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/put-dfanalytics.html - */ - return function mlPutDataFrameAnalytics (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['id'] == null) { - const err = new ConfigurationError('Missing required parameter: id') - return handleError(err, callback) - } - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'PUT' - path = '/' + '_ml' + '/' + 'data_frame' + '/' + 'analytics' + '/' + encodeURIComponent(id) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlPutDataFrameAnalytics diff --git a/api/api/ml.put_datafeed.js b/api/api/ml.put_datafeed.js deleted file mode 100644 index 7f87ea306..000000000 --- a/api/api/ml.put_datafeed.js +++ /dev/null @@ -1,88 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlPutDatafeed (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'ignore_unavailable', - 'allow_no_indices', - 'ignore_throttled', - 'expand_wildcards' - ] - - const snakeCase = { - ignoreUnavailable: 'ignore_unavailable', - allowNoIndices: 'allow_no_indices', - ignoreThrottled: 'ignore_throttled', - expandWildcards: 'expand_wildcards' - } - - /** - * Perform a ml.put_datafeed request - * Instantiates a datafeed. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-datafeed.html - */ - return function mlPutDatafeed (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['datafeed_id'] == null && params['datafeedId'] == null) { - const err = new ConfigurationError('Missing required parameter: datafeed_id or datafeedId') - return handleError(err, callback) - } - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, datafeedId, datafeed_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'PUT' - path = '/' + '_ml' + '/' + 'datafeeds' + '/' + encodeURIComponent(datafeed_id || datafeedId) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlPutDatafeed diff --git a/api/api/ml.put_filter.js b/api/api/ml.put_filter.js deleted file mode 100644 index a82fffd70..000000000 --- a/api/api/ml.put_filter.js +++ /dev/null @@ -1,82 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlPutFilter (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a ml.put_filter request - * Instantiates a filter. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-filter.html - */ - return function mlPutFilter (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['filter_id'] == null && params['filterId'] == null) { - const err = new ConfigurationError('Missing required parameter: filter_id or filterId') - return handleError(err, callback) - } - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, filterId, filter_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'PUT' - path = '/' + '_ml' + '/' + 'filters' + '/' + encodeURIComponent(filter_id || filterId) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlPutFilter diff --git a/api/api/ml.put_job.js b/api/api/ml.put_job.js deleted file mode 100644 index 0ad56a8aa..000000000 --- a/api/api/ml.put_job.js +++ /dev/null @@ -1,82 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlPutJob (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a ml.put_job request - * Instantiates an anomaly detection job. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-job.html - */ - return function mlPutJob (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['job_id'] == null && params['jobId'] == null) { - const err = new ConfigurationError('Missing required parameter: job_id or jobId') - return handleError(err, callback) - } - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, jobId, job_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'PUT' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlPutJob diff --git a/api/api/ml.put_trained_model.js b/api/api/ml.put_trained_model.js deleted file mode 100644 index 1d460ae35..000000000 --- a/api/api/ml.put_trained_model.js +++ /dev/null @@ -1,82 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlPutTrainedModel (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a ml.put_trained_model request - * Creates an inference trained model. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/put-inference.html - */ - return function mlPutTrainedModel (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['model_id'] == null && params['modelId'] == null) { - const err = new ConfigurationError('Missing required parameter: model_id or modelId') - return handleError(err, callback) - } - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, modelId, model_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'PUT' - path = '/' + '_ml' + '/' + 'inference' + '/' + encodeURIComponent(model_id || modelId) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlPutTrainedModel diff --git a/api/api/ml.revert_model_snapshot.js b/api/api/ml.revert_model_snapshot.js deleted file mode 100644 index b5ac7a714..000000000 --- a/api/api/ml.revert_model_snapshot.js +++ /dev/null @@ -1,88 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlRevertModelSnapshot (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'delete_intervening_results' - ] - - const snakeCase = { - deleteInterveningResults: 'delete_intervening_results' - } - - /** - * Perform a ml.revert_model_snapshot request - * Reverts to a specific snapshot. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-revert-snapshot.html - */ - return function mlRevertModelSnapshot (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['job_id'] == null && params['jobId'] == null) { - const err = new ConfigurationError('Missing required parameter: job_id or jobId') - return handleError(err, callback) - } - if (params['snapshot_id'] == null && params['snapshotId'] == null) { - const err = new ConfigurationError('Missing required parameter: snapshot_id or snapshotId') - return handleError(err, callback) - } - - // check required url components - if ((params['snapshot_id'] != null || params['snapshotId'] != null) && ((params['job_id'] == null && params['jobId'] == null))) { - const err = new ConfigurationError('Missing required parameter of the url: job_id') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, jobId, job_id, snapshotId, snapshot_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) + '/' + 'model_snapshots' + '/' + encodeURIComponent(snapshot_id || snapshotId) + '/' + '_revert' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlRevertModelSnapshot diff --git a/api/api/ml.set_upgrade_mode.js b/api/api/ml.set_upgrade_mode.js deleted file mode 100644 index 6d5a03f06..000000000 --- a/api/api/ml.set_upgrade_mode.js +++ /dev/null @@ -1,73 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlSetUpgradeMode (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'enabled', - 'timeout' - ] - - const snakeCase = { - - } - - /** - * Perform a ml.set_upgrade_mode request - * Sets a cluster wide upgrade_mode setting that prepares machine learning indices for an upgrade. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-set-upgrade-mode.html - */ - return function mlSetUpgradeMode (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_ml' + '/' + 'set_upgrade_mode' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlSetUpgradeMode diff --git a/api/api/ml.start_data_frame_analytics.js b/api/api/ml.start_data_frame_analytics.js deleted file mode 100644 index 7ba5210b3..000000000 --- a/api/api/ml.start_data_frame_analytics.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlStartDataFrameAnalytics (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'timeout' - ] - - const snakeCase = { - - } - - /** - * Perform a ml.start_data_frame_analytics request - * Starts a data frame analytics job. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/start-dfanalytics.html - */ - return function mlStartDataFrameAnalytics (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['id'] == null) { - const err = new ConfigurationError('Missing required parameter: id') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_ml' + '/' + 'data_frame' + '/' + 'analytics' + '/' + encodeURIComponent(id) + '/' + '_start' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlStartDataFrameAnalytics diff --git a/api/api/ml.start_datafeed.js b/api/api/ml.start_datafeed.js deleted file mode 100644 index 45725dfbf..000000000 --- a/api/api/ml.start_datafeed.js +++ /dev/null @@ -1,80 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlStartDatafeed (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'start', - 'end', - 'timeout' - ] - - const snakeCase = { - - } - - /** - * Perform a ml.start_datafeed request - * Starts one or more datafeeds. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-start-datafeed.html - */ - return function mlStartDatafeed (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['datafeed_id'] == null && params['datafeedId'] == null) { - const err = new ConfigurationError('Missing required parameter: datafeed_id or datafeedId') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, datafeedId, datafeed_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_ml' + '/' + 'datafeeds' + '/' + encodeURIComponent(datafeed_id || datafeedId) + '/' + '_start' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlStartDatafeed diff --git a/api/api/ml.stop_data_frame_analytics.js b/api/api/ml.stop_data_frame_analytics.js deleted file mode 100644 index f549389e2..000000000 --- a/api/api/ml.stop_data_frame_analytics.js +++ /dev/null @@ -1,81 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlStopDataFrameAnalytics (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'allow_no_match', - 'force', - 'timeout' - ] - - const snakeCase = { - allowNoMatch: 'allow_no_match' - - } - - /** - * Perform a ml.stop_data_frame_analytics request - * Stops one or more data frame analytics jobs. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/stop-dfanalytics.html - */ - return function mlStopDataFrameAnalytics (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['id'] == null) { - const err = new ConfigurationError('Missing required parameter: id') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_ml' + '/' + 'data_frame' + '/' + 'analytics' + '/' + encodeURIComponent(id) + '/' + '_stop' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlStopDataFrameAnalytics diff --git a/api/api/ml.stop_datafeed.js b/api/api/ml.stop_datafeed.js deleted file mode 100644 index a10f9a926..000000000 --- a/api/api/ml.stop_datafeed.js +++ /dev/null @@ -1,81 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlStopDatafeed (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'allow_no_datafeeds', - 'force', - 'timeout' - ] - - const snakeCase = { - allowNoDatafeeds: 'allow_no_datafeeds' - - } - - /** - * Perform a ml.stop_datafeed request - * Stops one or more datafeeds. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-stop-datafeed.html - */ - return function mlStopDatafeed (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['datafeed_id'] == null && params['datafeedId'] == null) { - const err = new ConfigurationError('Missing required parameter: datafeed_id or datafeedId') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, datafeedId, datafeed_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_ml' + '/' + 'datafeeds' + '/' + encodeURIComponent(datafeed_id || datafeedId) + '/' + '_stop' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlStopDatafeed diff --git a/api/api/ml.update_datafeed.js b/api/api/ml.update_datafeed.js deleted file mode 100644 index 253b89cc7..000000000 --- a/api/api/ml.update_datafeed.js +++ /dev/null @@ -1,88 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlUpdateDatafeed (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'ignore_unavailable', - 'allow_no_indices', - 'ignore_throttled', - 'expand_wildcards' - ] - - const snakeCase = { - ignoreUnavailable: 'ignore_unavailable', - allowNoIndices: 'allow_no_indices', - ignoreThrottled: 'ignore_throttled', - expandWildcards: 'expand_wildcards' - } - - /** - * Perform a ml.update_datafeed request - * Updates certain properties of a datafeed. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-datafeed.html - */ - return function mlUpdateDatafeed (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['datafeed_id'] == null && params['datafeedId'] == null) { - const err = new ConfigurationError('Missing required parameter: datafeed_id or datafeedId') - return handleError(err, callback) - } - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, datafeedId, datafeed_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_ml' + '/' + 'datafeeds' + '/' + encodeURIComponent(datafeed_id || datafeedId) + '/' + '_update' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlUpdateDatafeed diff --git a/api/api/ml.update_filter.js b/api/api/ml.update_filter.js deleted file mode 100644 index 38296576e..000000000 --- a/api/api/ml.update_filter.js +++ /dev/null @@ -1,82 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlUpdateFilter (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a ml.update_filter request - * Updates the description of a filter, adds items, or removes items. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-filter.html - */ - return function mlUpdateFilter (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['filter_id'] == null && params['filterId'] == null) { - const err = new ConfigurationError('Missing required parameter: filter_id or filterId') - return handleError(err, callback) - } - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, filterId, filter_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_ml' + '/' + 'filters' + '/' + encodeURIComponent(filter_id || filterId) + '/' + '_update' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlUpdateFilter diff --git a/api/api/ml.update_job.js b/api/api/ml.update_job.js deleted file mode 100644 index 3f2b52e88..000000000 --- a/api/api/ml.update_job.js +++ /dev/null @@ -1,82 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlUpdateJob (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a ml.update_job request - * Updates certain properties of an anomaly detection job. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-job.html - */ - return function mlUpdateJob (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['job_id'] == null && params['jobId'] == null) { - const err = new ConfigurationError('Missing required parameter: job_id or jobId') - return handleError(err, callback) - } - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, jobId, job_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) + '/' + '_update' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlUpdateJob diff --git a/api/api/ml.update_model_snapshot.js b/api/api/ml.update_model_snapshot.js deleted file mode 100644 index 5890350a9..000000000 --- a/api/api/ml.update_model_snapshot.js +++ /dev/null @@ -1,92 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlUpdateModelSnapshot (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a ml.update_model_snapshot request - * Updates certain properties of a snapshot. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-snapshot.html - */ - return function mlUpdateModelSnapshot (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['job_id'] == null && params['jobId'] == null) { - const err = new ConfigurationError('Missing required parameter: job_id or jobId') - return handleError(err, callback) - } - if (params['snapshot_id'] == null && params['snapshotId'] == null) { - const err = new ConfigurationError('Missing required parameter: snapshot_id or snapshotId') - return handleError(err, callback) - } - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // check required url components - if ((params['snapshot_id'] != null || params['snapshotId'] != null) && ((params['job_id'] == null && params['jobId'] == null))) { - const err = new ConfigurationError('Missing required parameter of the url: job_id') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, jobId, job_id, snapshotId, snapshot_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + encodeURIComponent(job_id || jobId) + '/' + 'model_snapshots' + '/' + encodeURIComponent(snapshot_id || snapshotId) + '/' + '_update' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlUpdateModelSnapshot diff --git a/api/api/ml.validate.js b/api/api/ml.validate.js deleted file mode 100644 index 6a20fc89b..000000000 --- a/api/api/ml.validate.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlValidate (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a ml.validate request - * Validates an anomaly detection job. - * https://www.elastic.co/guide/en/machine-learning/current/ml-jobs.html - */ - return function mlValidate (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + '_validate' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlValidate diff --git a/api/api/ml.validate_detector.js b/api/api/ml.validate_detector.js deleted file mode 100644 index 38549430e..000000000 --- a/api/api/ml.validate_detector.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMlValidateDetector (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a ml.validate_detector request - * Validates an anomaly detection detector. - * https://www.elastic.co/guide/en/machine-learning/current/ml-jobs.html - */ - return function mlValidateDetector (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_ml' + '/' + 'anomaly_detectors' + '/' + '_validate' + '/' + 'detector' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMlValidateDetector diff --git a/api/api/monitoring.bulk.js b/api/api/monitoring.bulk.js deleted file mode 100644 index 366f72155..000000000 --- a/api/api/monitoring.bulk.js +++ /dev/null @@ -1,87 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMonitoringBulk (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'system_id', - 'system_api_version', - 'interval' - ] - - const snakeCase = { - systemId: 'system_id', - systemApiVersion: 'system_api_version' - - } - - /** - * Perform a monitoring.bulk request - * Used by the monitoring features to send monitoring data. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/monitor-elasticsearch-cluster.html - */ - return function monitoringBulk (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, type, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((type) != null) { - if (method == null) method = 'POST' - path = '/' + '_monitoring' + '/' + encodeURIComponent(type) + '/' + 'bulk' - } else { - if (method == null) method = 'POST' - path = '/' + '_monitoring' + '/' + 'bulk' - } - - // build request object - const request = { - method, - path, - bulkBody: body, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMonitoringBulk diff --git a/api/api/msearch.js b/api/api/msearch.js deleted file mode 100644 index 2307e9e11..000000000 --- a/api/api/msearch.js +++ /dev/null @@ -1,102 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMsearch (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'search_type', - 'max_concurrent_searches', - 'typed_keys', - 'pre_filter_shard_size', - 'max_concurrent_shard_requests', - 'rest_total_hits_as_int', - 'ccs_minimize_roundtrips', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - searchType: 'search_type', - maxConcurrentSearches: 'max_concurrent_searches', - typedKeys: 'typed_keys', - preFilterShardSize: 'pre_filter_shard_size', - maxConcurrentShardRequests: 'max_concurrent_shard_requests', - restTotalHitsAsInt: 'rest_total_hits_as_int', - ccsMinimizeRoundtrips: 'ccs_minimize_roundtrips', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a msearch request - * Allows to execute several search operations in one request. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/search-multi-search.html - */ - return function msearch (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((index) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_msearch' - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_msearch' - } - - // build request object - const request = { - method, - path, - bulkBody: body, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMsearch diff --git a/api/api/msearch_template.js b/api/api/msearch_template.js deleted file mode 100644 index 884ca4d3f..000000000 --- a/api/api/msearch_template.js +++ /dev/null @@ -1,98 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMsearchTemplate (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'search_type', - 'typed_keys', - 'max_concurrent_searches', - 'rest_total_hits_as_int', - 'ccs_minimize_roundtrips', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - searchType: 'search_type', - typedKeys: 'typed_keys', - maxConcurrentSearches: 'max_concurrent_searches', - restTotalHitsAsInt: 'rest_total_hits_as_int', - ccsMinimizeRoundtrips: 'ccs_minimize_roundtrips', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a msearch_template request - * Allows to execute several search template operations in one request. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html - */ - return function msearchTemplate (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((index) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_msearch' + '/' + 'template' - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_msearch' + '/' + 'template' - } - - // build request object - const request = { - method, - path, - bulkBody: body, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMsearchTemplate diff --git a/api/api/mtermvectors.js b/api/api/mtermvectors.js deleted file mode 100644 index ba5ecff69..000000000 --- a/api/api/mtermvectors.js +++ /dev/null @@ -1,97 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildMtermvectors (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'ids', - 'term_statistics', - 'field_statistics', - 'fields', - 'offsets', - 'positions', - 'payloads', - 'preference', - 'routing', - 'realtime', - 'version', - 'version_type', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - termStatistics: 'term_statistics', - fieldStatistics: 'field_statistics', - versionType: 'version_type', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a mtermvectors request - * Returns multiple termvectors in one request. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-multi-termvectors.html - */ - return function mtermvectors (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((index) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_mtermvectors' - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_mtermvectors' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildMtermvectors diff --git a/api/api/nodes.hot_threads.js b/api/api/nodes.hot_threads.js deleted file mode 100644 index 82bafc0c2..000000000 --- a/api/api/nodes.hot_threads.js +++ /dev/null @@ -1,89 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildNodesHotThreads (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'interval', - 'snapshots', - 'threads', - 'ignore_idle_threads', - 'type', - 'timeout', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - ignoreIdleThreads: 'ignore_idle_threads', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a nodes.hot_threads request - * Returns information about hot threads on each node in the cluster. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-hot-threads.html - */ - return function nodesHotThreads (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, nodeId, node_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((node_id || nodeId) != null) { - if (method == null) method = 'GET' - path = '/' + '_nodes' + '/' + encodeURIComponent(node_id || nodeId) + '/' + 'hot_threads' - } else { - if (method == null) method = 'GET' - path = '/' + '_nodes' + '/' + 'hot_threads' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildNodesHotThreads diff --git a/api/api/nodes.info.js b/api/api/nodes.info.js deleted file mode 100644 index 372e37c3b..000000000 --- a/api/api/nodes.info.js +++ /dev/null @@ -1,91 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildNodesInfo (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'flat_settings', - 'timeout', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - flatSettings: 'flat_settings', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a nodes.info request - * Returns information about nodes in the cluster. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-info.html - */ - return function nodesInfo (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, nodeId, node_id, metric, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((node_id || nodeId) != null && (metric) != null) { - if (method == null) method = 'GET' - path = '/' + '_nodes' + '/' + encodeURIComponent(node_id || nodeId) + '/' + encodeURIComponent(metric) - } else if ((node_id || nodeId) != null) { - if (method == null) method = 'GET' - path = '/' + '_nodes' + '/' + encodeURIComponent(node_id || nodeId) - } else if ((metric) != null) { - if (method == null) method = 'GET' - path = '/' + '_nodes' + '/' + encodeURIComponent(metric) - } else { - if (method == null) method = 'GET' - path = '/' + '_nodes' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildNodesInfo diff --git a/api/api/nodes.reload_secure_settings.js b/api/api/nodes.reload_secure_settings.js deleted file mode 100644 index dc82ea055..000000000 --- a/api/api/nodes.reload_secure_settings.js +++ /dev/null @@ -1,83 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildNodesReloadSecureSettings (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'timeout', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a nodes.reload_secure_settings request - * Reloads secure settings. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/secure-settings.html#reloadable-secure-settings - */ - return function nodesReloadSecureSettings (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, nodeId, node_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((node_id || nodeId) != null) { - if (method == null) method = 'POST' - path = '/' + '_nodes' + '/' + encodeURIComponent(node_id || nodeId) + '/' + 'reload_secure_settings' - } else { - if (method == null) method = 'POST' - path = '/' + '_nodes' + '/' + 'reload_secure_settings' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildNodesReloadSecureSettings diff --git a/api/api/nodes.stats.js b/api/api/nodes.stats.js deleted file mode 100644 index 5a47a7e21..000000000 --- a/api/api/nodes.stats.js +++ /dev/null @@ -1,105 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildNodesStats (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'completion_fields', - 'fielddata_fields', - 'fields', - 'groups', - 'level', - 'types', - 'timeout', - 'include_segment_file_sizes', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - completionFields: 'completion_fields', - fielddataFields: 'fielddata_fields', - includeSegmentFileSizes: 'include_segment_file_sizes', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a nodes.stats request - * Returns statistical information about nodes in the cluster. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-stats.html - */ - return function nodesStats (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, nodeId, node_id, metric, indexMetric, index_metric, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((node_id || nodeId) != null && (metric) != null && (index_metric || indexMetric) != null) { - if (method == null) method = 'GET' - path = '/' + '_nodes' + '/' + encodeURIComponent(node_id || nodeId) + '/' + 'stats' + '/' + encodeURIComponent(metric) + '/' + encodeURIComponent(index_metric || indexMetric) - } else if ((node_id || nodeId) != null && (metric) != null) { - if (method == null) method = 'GET' - path = '/' + '_nodes' + '/' + encodeURIComponent(node_id || nodeId) + '/' + 'stats' + '/' + encodeURIComponent(metric) - } else if ((metric) != null && (index_metric || indexMetric) != null) { - if (method == null) method = 'GET' - path = '/' + '_nodes' + '/' + 'stats' + '/' + encodeURIComponent(metric) + '/' + encodeURIComponent(index_metric || indexMetric) - } else if ((node_id || nodeId) != null) { - if (method == null) method = 'GET' - path = '/' + '_nodes' + '/' + encodeURIComponent(node_id || nodeId) + '/' + 'stats' - } else if ((metric) != null) { - if (method == null) method = 'GET' - path = '/' + '_nodes' + '/' + 'stats' + '/' + encodeURIComponent(metric) - } else { - if (method == null) method = 'GET' - path = '/' + '_nodes' + '/' + 'stats' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildNodesStats diff --git a/api/api/nodes.usage.js b/api/api/nodes.usage.js deleted file mode 100644 index 0b317b371..000000000 --- a/api/api/nodes.usage.js +++ /dev/null @@ -1,89 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildNodesUsage (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'timeout', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a nodes.usage request - * Returns low-level information about REST actions usage on nodes. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-usage.html - */ - return function nodesUsage (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, nodeId, node_id, metric, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((node_id || nodeId) != null && (metric) != null) { - if (method == null) method = 'GET' - path = '/' + '_nodes' + '/' + encodeURIComponent(node_id || nodeId) + '/' + 'usage' + '/' + encodeURIComponent(metric) - } else if ((node_id || nodeId) != null) { - if (method == null) method = 'GET' - path = '/' + '_nodes' + '/' + encodeURIComponent(node_id || nodeId) + '/' + 'usage' - } else if ((metric) != null) { - if (method == null) method = 'GET' - path = '/' + '_nodes' + '/' + 'usage' + '/' + encodeURIComponent(metric) - } else { - if (method == null) method = 'GET' - path = '/' + '_nodes' + '/' + 'usage' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildNodesUsage diff --git a/api/api/ping.js b/api/api/ping.js deleted file mode 100644 index f37fa4103..000000000 --- a/api/api/ping.js +++ /dev/null @@ -1,77 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildPing (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a ping request - * Returns whether the cluster is running. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html - */ - return function ping (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'HEAD' - path = '/' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildPing diff --git a/api/api/put_script.js b/api/api/put_script.js deleted file mode 100644 index 160114e11..000000000 --- a/api/api/put_script.js +++ /dev/null @@ -1,102 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildPutScript (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'timeout', - 'master_timeout', - 'context', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a put_script request - * Creates or updates a script. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html - */ - return function putScript (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['id'] == null) { - const err = new ConfigurationError('Missing required parameter: id') - return handleError(err, callback) - } - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // check required url components - if (params['context'] != null && (params['id'] == null)) { - const err = new ConfigurationError('Missing required parameter of the url: id') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, id, context, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((id) != null && (context) != null) { - if (method == null) method = 'PUT' - path = '/' + '_scripts' + '/' + encodeURIComponent(id) + '/' + encodeURIComponent(context) - } else { - if (method == null) method = 'PUT' - path = '/' + '_scripts' + '/' + encodeURIComponent(id) - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildPutScript diff --git a/api/api/rank_eval.js b/api/api/rank_eval.js deleted file mode 100644 index 2debba204..000000000 --- a/api/api/rank_eval.js +++ /dev/null @@ -1,96 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildRankEval (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'ignore_unavailable', - 'allow_no_indices', - 'expand_wildcards', - 'search_type', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - ignoreUnavailable: 'ignore_unavailable', - allowNoIndices: 'allow_no_indices', - expandWildcards: 'expand_wildcards', - searchType: 'search_type', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a rank_eval request - * Allows to evaluate the quality of ranked search results over a set of typical search queries - * https://www.elastic.co/guide/en/elasticsearch/reference/master/search-rank-eval.html - */ - return function rankEval (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((index) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_rank_eval' - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_rank_eval' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildRankEval diff --git a/api/api/reindex.js b/api/api/reindex.js deleted file mode 100644 index 5712b2c56..000000000 --- a/api/api/reindex.js +++ /dev/null @@ -1,97 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildReindex (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'refresh', - 'timeout', - 'wait_for_active_shards', - 'wait_for_completion', - 'requests_per_second', - 'scroll', - 'slices', - 'max_docs', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - waitForActiveShards: 'wait_for_active_shards', - waitForCompletion: 'wait_for_completion', - requestsPerSecond: 'requests_per_second', - maxDocs: 'max_docs', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a reindex request - * Allows to copy documents from one index to another, optionally filtering the source -documents by a query, changing the destination index settings, or fetching the -documents from a remote cluster. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-reindex.html - */ - return function reindex (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_reindex' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildReindex diff --git a/api/api/reindex_rethrottle.js b/api/api/reindex_rethrottle.js deleted file mode 100644 index 37fd8a031..000000000 --- a/api/api/reindex_rethrottle.js +++ /dev/null @@ -1,89 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildReindexRethrottle (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'requests_per_second', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - requestsPerSecond: 'requests_per_second', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a reindex_rethrottle request - * Changes the number of requests per second for a particular Reindex operation. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-reindex.html - */ - return function reindexRethrottle (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['task_id'] == null && params['taskId'] == null) { - const err = new ConfigurationError('Missing required parameter: task_id or taskId') - return handleError(err, callback) - } - if (params['requests_per_second'] == null && params['requestsPerSecond'] == null) { - const err = new ConfigurationError('Missing required parameter: requests_per_second or requestsPerSecond') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, taskId, task_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_reindex' + '/' + encodeURIComponent(task_id || taskId) + '/' + '_rethrottle' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildReindexRethrottle diff --git a/api/api/render_search_template.js b/api/api/render_search_template.js deleted file mode 100644 index 3cdefa7cc..000000000 --- a/api/api/render_search_template.js +++ /dev/null @@ -1,82 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildRenderSearchTemplate (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a render_search_template request - * Allows to use the Mustache language to pre-render a search definition. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/search-template.html#_validating_templates - */ - return function renderSearchTemplate (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((id) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_render' + '/' + 'template' + '/' + encodeURIComponent(id) - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_render' + '/' + 'template' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildRenderSearchTemplate diff --git a/api/api/rollup.delete_job.js b/api/api/rollup.delete_job.js deleted file mode 100644 index befeedff0..000000000 --- a/api/api/rollup.delete_job.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildRollupDeleteJob (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a rollup.delete_job request - * Deletes an existing rollup job. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-delete-job.html - */ - return function rollupDeleteJob (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['id'] == null) { - const err = new ConfigurationError('Missing required parameter: id') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'DELETE' - path = '/' + '_rollup' + '/' + 'job' + '/' + encodeURIComponent(id) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildRollupDeleteJob diff --git a/api/api/rollup.get_jobs.js b/api/api/rollup.get_jobs.js deleted file mode 100644 index 57414ab06..000000000 --- a/api/api/rollup.get_jobs.js +++ /dev/null @@ -1,77 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildRollupGetJobs (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a rollup.get_jobs request - * Retrieves the configuration, stats, and status of rollup jobs. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-get-job.html - */ - return function rollupGetJobs (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((id) != null) { - if (method == null) method = 'GET' - path = '/' + '_rollup' + '/' + 'job' + '/' + encodeURIComponent(id) - } else { - if (method == null) method = 'GET' - path = '/' + '_rollup' + '/' + 'job' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildRollupGetJobs diff --git a/api/api/rollup.get_rollup_caps.js b/api/api/rollup.get_rollup_caps.js deleted file mode 100644 index fd5a5fded..000000000 --- a/api/api/rollup.get_rollup_caps.js +++ /dev/null @@ -1,77 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildRollupGetRollupCaps (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a rollup.get_rollup_caps request - * Returns the capabilities of any rollup jobs that have been configured for a specific index or index pattern. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-get-rollup-caps.html - */ - return function rollupGetRollupCaps (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((id) != null) { - if (method == null) method = 'GET' - path = '/' + '_rollup' + '/' + 'data' + '/' + encodeURIComponent(id) - } else { - if (method == null) method = 'GET' - path = '/' + '_rollup' + '/' + 'data' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildRollupGetRollupCaps diff --git a/api/api/rollup.get_rollup_index_caps.js b/api/api/rollup.get_rollup_index_caps.js deleted file mode 100644 index 2f127cfc3..000000000 --- a/api/api/rollup.get_rollup_index_caps.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildRollupGetRollupIndexCaps (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a rollup.get_rollup_index_caps request - * Returns the rollup capabilities of all jobs inside of a rollup index (e.g. the index where rollup data is stored). - * https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-get-rollup-index-caps.html - */ - return function rollupGetRollupIndexCaps (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['index'] == null) { - const err = new ConfigurationError('Missing required parameter: index') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + encodeURIComponent(index) + '/' + '_rollup' + '/' + 'data' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildRollupGetRollupIndexCaps diff --git a/api/api/rollup.put_job.js b/api/api/rollup.put_job.js deleted file mode 100644 index ef68909fb..000000000 --- a/api/api/rollup.put_job.js +++ /dev/null @@ -1,82 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildRollupPutJob (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a rollup.put_job request - * Creates a rollup job. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-put-job.html - */ - return function rollupPutJob (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['id'] == null) { - const err = new ConfigurationError('Missing required parameter: id') - return handleError(err, callback) - } - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'PUT' - path = '/' + '_rollup' + '/' + 'job' + '/' + encodeURIComponent(id) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildRollupPutJob diff --git a/api/api/rollup.rollup_search.js b/api/api/rollup.rollup_search.js deleted file mode 100644 index 4bd756434..000000000 --- a/api/api/rollup.rollup_search.js +++ /dev/null @@ -1,95 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildRollupRollupSearch (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'typed_keys', - 'rest_total_hits_as_int' - ] - - const snakeCase = { - typedKeys: 'typed_keys', - restTotalHitsAsInt: 'rest_total_hits_as_int' - } - - /** - * Perform a rollup.rollup_search request - * Enables searching rolled-up data using the standard query DSL. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-search.html - */ - return function rollupRollupSearch (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['index'] == null) { - const err = new ConfigurationError('Missing required parameter: index') - return handleError(err, callback) - } - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // check required url components - if (params['type'] != null && (params['index'] == null)) { - const err = new ConfigurationError('Missing required parameter of the url: index') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, type, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((index) != null && (type) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + encodeURIComponent(index) + '/' + encodeURIComponent(type) + '/' + '_rollup_search' - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_rollup_search' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildRollupRollupSearch diff --git a/api/api/rollup.start_job.js b/api/api/rollup.start_job.js deleted file mode 100644 index 5fad665ba..000000000 --- a/api/api/rollup.start_job.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildRollupStartJob (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a rollup.start_job request - * Starts an existing, stopped rollup job. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-start-job.html - */ - return function rollupStartJob (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['id'] == null) { - const err = new ConfigurationError('Missing required parameter: id') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_rollup' + '/' + 'job' + '/' + encodeURIComponent(id) + '/' + '_start' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildRollupStartJob diff --git a/api/api/rollup.stop_job.js b/api/api/rollup.stop_job.js deleted file mode 100644 index 727058e2b..000000000 --- a/api/api/rollup.stop_job.js +++ /dev/null @@ -1,80 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildRollupStopJob (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'wait_for_completion', - 'timeout' - ] - - const snakeCase = { - waitForCompletion: 'wait_for_completion' - - } - - /** - * Perform a rollup.stop_job request - * Stops an existing, started rollup job. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-stop-job.html - */ - return function rollupStopJob (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['id'] == null) { - const err = new ConfigurationError('Missing required parameter: id') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_rollup' + '/' + 'job' + '/' + encodeURIComponent(id) + '/' + '_stop' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildRollupStopJob diff --git a/api/api/scripts_painless_execute.js b/api/api/scripts_painless_execute.js deleted file mode 100644 index 1bb435492..000000000 --- a/api/api/scripts_painless_execute.js +++ /dev/null @@ -1,77 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildScriptsPainlessExecute (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a scripts_painless_execute request - * Allows an arbitrary script to be executed and a result to be returned - * https://www.elastic.co/guide/en/elasticsearch/painless/master/painless-execute-api.html - */ - return function scriptsPainlessExecute (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_scripts' + '/' + 'painless' + '/' + '_execute' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildScriptsPainlessExecute diff --git a/api/api/scroll.js b/api/api/scroll.js deleted file mode 100644 index 47c10e30e..000000000 --- a/api/api/scroll.js +++ /dev/null @@ -1,87 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildScroll (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'scroll', - 'scroll_id', - 'rest_total_hits_as_int', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - scrollId: 'scroll_id', - restTotalHitsAsInt: 'rest_total_hits_as_int', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a scroll request - * Allows to retrieve a large numbers of results from a single search request. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/search-request-body.html#request-body-search-scroll - */ - return function scroll (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, scrollId, scroll_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((scroll_id || scrollId) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_search' + '/' + 'scroll' + '/' + encodeURIComponent(scroll_id || scrollId) - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_search' + '/' + 'scroll' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildScroll diff --git a/api/api/search.js b/api/api/search.js deleted file mode 100644 index 0ddad2f51..000000000 --- a/api/api/search.js +++ /dev/null @@ -1,155 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSearch (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'analyzer', - 'analyze_wildcard', - 'ccs_minimize_roundtrips', - 'default_operator', - 'df', - 'explain', - 'stored_fields', - 'docvalue_fields', - 'from', - 'ignore_unavailable', - 'ignore_throttled', - 'allow_no_indices', - 'expand_wildcards', - 'lenient', - 'preference', - 'q', - 'routing', - 'scroll', - 'search_type', - 'size', - 'sort', - '_source', - '_source_excludes', - '_source_exclude', - '_source_includes', - '_source_include', - 'terminate_after', - 'stats', - 'suggest_field', - 'suggest_mode', - 'suggest_size', - 'suggest_text', - 'timeout', - 'track_scores', - 'track_total_hits', - 'allow_partial_search_results', - 'typed_keys', - 'version', - 'seq_no_primary_term', - 'request_cache', - 'batched_reduce_size', - 'max_concurrent_shard_requests', - 'pre_filter_shard_size', - 'rest_total_hits_as_int', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - analyzeWildcard: 'analyze_wildcard', - ccsMinimizeRoundtrips: 'ccs_minimize_roundtrips', - defaultOperator: 'default_operator', - storedFields: 'stored_fields', - docvalueFields: 'docvalue_fields', - ignoreUnavailable: 'ignore_unavailable', - ignoreThrottled: 'ignore_throttled', - allowNoIndices: 'allow_no_indices', - expandWildcards: 'expand_wildcards', - searchType: 'search_type', - _sourceExcludes: '_source_excludes', - _sourceExclude: '_source_exclude', - _sourceIncludes: '_source_includes', - _sourceInclude: '_source_include', - terminateAfter: 'terminate_after', - suggestField: 'suggest_field', - suggestMode: 'suggest_mode', - suggestSize: 'suggest_size', - suggestText: 'suggest_text', - trackScores: 'track_scores', - trackTotalHits: 'track_total_hits', - allowPartialSearchResults: 'allow_partial_search_results', - typedKeys: 'typed_keys', - seqNoPrimaryTerm: 'seq_no_primary_term', - requestCache: 'request_cache', - batchedReduceSize: 'batched_reduce_size', - maxConcurrentShardRequests: 'max_concurrent_shard_requests', - preFilterShardSize: 'pre_filter_shard_size', - restTotalHitsAsInt: 'rest_total_hits_as_int', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a search request - * Returns results matching a query. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/search-search.html - */ - return function search (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((index) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_search' - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_search' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSearch diff --git a/api/api/search_shards.js b/api/api/search_shards.js deleted file mode 100644 index 227ec04ba..000000000 --- a/api/api/search_shards.js +++ /dev/null @@ -1,91 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSearchShards (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'preference', - 'routing', - 'local', - 'ignore_unavailable', - 'allow_no_indices', - 'expand_wildcards', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - ignoreUnavailable: 'ignore_unavailable', - allowNoIndices: 'allow_no_indices', - expandWildcards: 'expand_wildcards', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a search_shards request - * Returns information about the indices and shards that a search request would be executed against. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/search-shards.html - */ - return function searchShards (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((index) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_search_shards' - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_search_shards' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSearchShards diff --git a/api/api/search_template.js b/api/api/search_template.js deleted file mode 100644 index 5e48fb6f6..000000000 --- a/api/api/search_template.js +++ /dev/null @@ -1,109 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSearchTemplate (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'ignore_unavailable', - 'ignore_throttled', - 'allow_no_indices', - 'expand_wildcards', - 'preference', - 'routing', - 'scroll', - 'search_type', - 'explain', - 'profile', - 'typed_keys', - 'rest_total_hits_as_int', - 'ccs_minimize_roundtrips', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - ignoreUnavailable: 'ignore_unavailable', - ignoreThrottled: 'ignore_throttled', - allowNoIndices: 'allow_no_indices', - expandWildcards: 'expand_wildcards', - searchType: 'search_type', - typedKeys: 'typed_keys', - restTotalHitsAsInt: 'rest_total_hits_as_int', - ccsMinimizeRoundtrips: 'ccs_minimize_roundtrips', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a search_template request - * Allows to use the Mustache language to pre-render a search definition. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/search-template.html - */ - return function searchTemplate (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((index) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_search' + '/' + 'template' - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_search' + '/' + 'template' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSearchTemplate diff --git a/api/api/searchable_snapshots.clear_cache.js b/api/api/searchable_snapshots.clear_cache.js deleted file mode 100644 index 09cb13bd5..000000000 --- a/api/api/searchable_snapshots.clear_cache.js +++ /dev/null @@ -1,83 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSearchableSnapshotsClearCache (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'ignore_unavailable', - 'allow_no_indices', - 'expand_wildcards', - 'index' - ] - - const snakeCase = { - ignoreUnavailable: 'ignore_unavailable', - allowNoIndices: 'allow_no_indices', - expandWildcards: 'expand_wildcards' - - } - - /** - * Perform a searchable_snapshots.clear_cache request - * Clear the cache of searchable snapshots. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/searchable-snapshots-api-clear-cache.html - */ - return function searchableSnapshotsClearCache (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((index) != null) { - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_searchable_snapshots' + '/' + 'cache' + '/' + 'clear' - } else { - if (method == null) method = 'POST' - path = '/' + '_searchable_snapshots' + '/' + 'cache' + '/' + 'clear' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSearchableSnapshotsClearCache diff --git a/api/api/searchable_snapshots.mount.js b/api/api/searchable_snapshots.mount.js deleted file mode 100644 index 2a60bae63..000000000 --- a/api/api/searchable_snapshots.mount.js +++ /dev/null @@ -1,94 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSearchableSnapshotsMount (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'master_timeout', - 'wait_for_completion' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - waitForCompletion: 'wait_for_completion' - } - - /** - * Perform a searchable_snapshots.mount request - * Mount a snapshot as a searchable index. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/searchable-snapshots-api-mount-snapshot.html - */ - return function searchableSnapshotsMount (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['repository'] == null) { - const err = new ConfigurationError('Missing required parameter: repository') - return handleError(err, callback) - } - if (params['snapshot'] == null) { - const err = new ConfigurationError('Missing required parameter: snapshot') - return handleError(err, callback) - } - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // check required url components - if (params['snapshot'] != null && (params['repository'] == null)) { - const err = new ConfigurationError('Missing required parameter of the url: repository') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, repository, snapshot, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_snapshot' + '/' + encodeURIComponent(repository) + '/' + encodeURIComponent(snapshot) + '/' + '_mount' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSearchableSnapshotsMount diff --git a/api/api/searchable_snapshots.repository_stats.js b/api/api/searchable_snapshots.repository_stats.js deleted file mode 100644 index d91653f43..000000000 --- a/api/api/searchable_snapshots.repository_stats.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSearchableSnapshotsRepositoryStats (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a searchable_snapshots.repository_stats request - * Retrieve usage statistics about a snapshot repository. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/searchable-snapshots-repository-stats.html - */ - return function searchableSnapshotsRepositoryStats (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['repository'] == null) { - const err = new ConfigurationError('Missing required parameter: repository') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, repository, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + '_snapshot' + '/' + encodeURIComponent(repository) + '/' + '_stats' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSearchableSnapshotsRepositoryStats diff --git a/api/api/searchable_snapshots.stats.js b/api/api/searchable_snapshots.stats.js deleted file mode 100644 index e295866e2..000000000 --- a/api/api/searchable_snapshots.stats.js +++ /dev/null @@ -1,77 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSearchableSnapshotsStats (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a searchable_snapshots.stats request - * Retrieve various statistics about searchable snapshots. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/searchable-snapshots-api-stats.html - */ - return function searchableSnapshotsStats (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((index) != null) { - if (method == null) method = 'GET' - path = '/' + encodeURIComponent(index) + '/' + '_searchable_snapshots' + '/' + 'stats' - } else { - if (method == null) method = 'GET' - path = '/' + '_searchable_snapshots' + '/' + 'stats' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSearchableSnapshotsStats diff --git a/api/api/security.authenticate.js b/api/api/security.authenticate.js deleted file mode 100644 index bd156f41f..000000000 --- a/api/api/security.authenticate.js +++ /dev/null @@ -1,72 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSecurityAuthenticate (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a security.authenticate request - * Enables authentication as a user and retrieve information about the authenticated user. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-authenticate.html - */ - return function securityAuthenticate (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + '_security' + '/' + '_authenticate' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSecurityAuthenticate diff --git a/api/api/security.change_password.js b/api/api/security.change_password.js deleted file mode 100644 index b71816c75..000000000 --- a/api/api/security.change_password.js +++ /dev/null @@ -1,83 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSecurityChangePassword (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'refresh' - ] - - const snakeCase = { - - } - - /** - * Perform a security.change_password request - * Changes the passwords of users in the native realm and built-in users. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-change-password.html - */ - return function securityChangePassword (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, username, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((username) != null) { - if (method == null) method = 'PUT' - path = '/' + '_security' + '/' + 'user' + '/' + encodeURIComponent(username) + '/' + '_password' - } else { - if (method == null) method = 'PUT' - path = '/' + '_security' + '/' + 'user' + '/' + '_password' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSecurityChangePassword diff --git a/api/api/security.clear_cached_realms.js b/api/api/security.clear_cached_realms.js deleted file mode 100644 index d06e2aa24..000000000 --- a/api/api/security.clear_cached_realms.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSecurityClearCachedRealms (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'usernames' - ] - - const snakeCase = { - - } - - /** - * Perform a security.clear_cached_realms request - * Evicts users from the user cache. Can completely clear the cache or evict specific users. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-clear-cache.html - */ - return function securityClearCachedRealms (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['realms'] == null) { - const err = new ConfigurationError('Missing required parameter: realms') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, realms, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_security' + '/' + 'realm' + '/' + encodeURIComponent(realms) + '/' + '_clear_cache' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSecurityClearCachedRealms diff --git a/api/api/security.clear_cached_roles.js b/api/api/security.clear_cached_roles.js deleted file mode 100644 index fb6497298..000000000 --- a/api/api/security.clear_cached_roles.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSecurityClearCachedRoles (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a security.clear_cached_roles request - * Evicts roles from the native role cache. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-clear-role-cache.html - */ - return function securityClearCachedRoles (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['name'] == null) { - const err = new ConfigurationError('Missing required parameter: name') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_security' + '/' + 'role' + '/' + encodeURIComponent(name) + '/' + '_clear_cache' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSecurityClearCachedRoles diff --git a/api/api/security.create_api_key.js b/api/api/security.create_api_key.js deleted file mode 100644 index d0ec816da..000000000 --- a/api/api/security.create_api_key.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSecurityCreateApiKey (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'refresh' - ] - - const snakeCase = { - - } - - /** - * Perform a security.create_api_key request - * Creates an API key for access without requiring basic authentication. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-api-key.html - */ - return function securityCreateApiKey (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'PUT' - path = '/' + '_security' + '/' + 'api_key' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSecurityCreateApiKey diff --git a/api/api/security.delete_privileges.js b/api/api/security.delete_privileges.js deleted file mode 100644 index 89031c2b1..000000000 --- a/api/api/security.delete_privileges.js +++ /dev/null @@ -1,88 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSecurityDeletePrivileges (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'refresh' - ] - - const snakeCase = { - - } - - /** - * Perform a security.delete_privileges request - * Removes application privileges. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-privilege.html - */ - return function securityDeletePrivileges (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['application'] == null) { - const err = new ConfigurationError('Missing required parameter: application') - return handleError(err, callback) - } - if (params['name'] == null) { - const err = new ConfigurationError('Missing required parameter: name') - return handleError(err, callback) - } - - // check required url components - if (params['name'] != null && (params['application'] == null)) { - const err = new ConfigurationError('Missing required parameter of the url: application') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, application, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'DELETE' - path = '/' + '_security' + '/' + 'privilege' + '/' + encodeURIComponent(application) + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSecurityDeletePrivileges diff --git a/api/api/security.delete_role.js b/api/api/security.delete_role.js deleted file mode 100644 index 4dd113a6a..000000000 --- a/api/api/security.delete_role.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSecurityDeleteRole (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'refresh' - ] - - const snakeCase = { - - } - - /** - * Perform a security.delete_role request - * Removes roles in the native realm. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-role.html - */ - return function securityDeleteRole (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['name'] == null) { - const err = new ConfigurationError('Missing required parameter: name') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'DELETE' - path = '/' + '_security' + '/' + 'role' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSecurityDeleteRole diff --git a/api/api/security.delete_role_mapping.js b/api/api/security.delete_role_mapping.js deleted file mode 100644 index 5edde79d3..000000000 --- a/api/api/security.delete_role_mapping.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSecurityDeleteRoleMapping (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'refresh' - ] - - const snakeCase = { - - } - - /** - * Perform a security.delete_role_mapping request - * Removes role mappings. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-role-mapping.html - */ - return function securityDeleteRoleMapping (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['name'] == null) { - const err = new ConfigurationError('Missing required parameter: name') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'DELETE' - path = '/' + '_security' + '/' + 'role_mapping' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSecurityDeleteRoleMapping diff --git a/api/api/security.delete_user.js b/api/api/security.delete_user.js deleted file mode 100644 index 007da3e8c..000000000 --- a/api/api/security.delete_user.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSecurityDeleteUser (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'refresh' - ] - - const snakeCase = { - - } - - /** - * Perform a security.delete_user request - * Deletes users from the native realm. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-user.html - */ - return function securityDeleteUser (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['username'] == null) { - const err = new ConfigurationError('Missing required parameter: username') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, username, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'DELETE' - path = '/' + '_security' + '/' + 'user' + '/' + encodeURIComponent(username) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSecurityDeleteUser diff --git a/api/api/security.disable_user.js b/api/api/security.disable_user.js deleted file mode 100644 index b89afc916..000000000 --- a/api/api/security.disable_user.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSecurityDisableUser (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'refresh' - ] - - const snakeCase = { - - } - - /** - * Perform a security.disable_user request - * Disables users in the native realm. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-disable-user.html - */ - return function securityDisableUser (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['username'] == null) { - const err = new ConfigurationError('Missing required parameter: username') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, username, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'PUT' - path = '/' + '_security' + '/' + 'user' + '/' + encodeURIComponent(username) + '/' + '_disable' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSecurityDisableUser diff --git a/api/api/security.enable_user.js b/api/api/security.enable_user.js deleted file mode 100644 index 367590123..000000000 --- a/api/api/security.enable_user.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSecurityEnableUser (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'refresh' - ] - - const snakeCase = { - - } - - /** - * Perform a security.enable_user request - * Enables users in the native realm. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-enable-user.html - */ - return function securityEnableUser (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['username'] == null) { - const err = new ConfigurationError('Missing required parameter: username') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, username, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'PUT' - path = '/' + '_security' + '/' + 'user' + '/' + encodeURIComponent(username) + '/' + '_enable' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSecurityEnableUser diff --git a/api/api/security.get_api_key.js b/api/api/security.get_api_key.js deleted file mode 100644 index 7492f84e5..000000000 --- a/api/api/security.get_api_key.js +++ /dev/null @@ -1,77 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSecurityGetApiKey (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'id', - 'name', - 'username', - 'realm_name', - 'owner' - ] - - const snakeCase = { - realmName: 'realm_name' - - } - - /** - * Perform a security.get_api_key request - * Retrieves information for one or more API keys. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-api-key.html - */ - return function securityGetApiKey (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + '_security' + '/' + 'api_key' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSecurityGetApiKey diff --git a/api/api/security.get_builtin_privileges.js b/api/api/security.get_builtin_privileges.js deleted file mode 100644 index 15581bf4f..000000000 --- a/api/api/security.get_builtin_privileges.js +++ /dev/null @@ -1,72 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSecurityGetBuiltinPrivileges (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a security.get_builtin_privileges request - * Retrieves the list of cluster privileges and index privileges that are available in this version of Elasticsearch. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-builtin-privileges.html - */ - return function securityGetBuiltinPrivileges (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + '_security' + '/' + 'privilege' + '/' + '_builtin' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSecurityGetBuiltinPrivileges diff --git a/api/api/security.get_privileges.js b/api/api/security.get_privileges.js deleted file mode 100644 index 15a5a3a72..000000000 --- a/api/api/security.get_privileges.js +++ /dev/null @@ -1,86 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSecurityGetPrivileges (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a security.get_privileges request - * Retrieves application privileges. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-privileges.html - */ - return function securityGetPrivileges (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required url components - if (params['name'] != null && (params['application'] == null)) { - const err = new ConfigurationError('Missing required parameter of the url: application') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, application, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((application) != null && (name) != null) { - if (method == null) method = 'GET' - path = '/' + '_security' + '/' + 'privilege' + '/' + encodeURIComponent(application) + '/' + encodeURIComponent(name) - } else if ((application) != null) { - if (method == null) method = 'GET' - path = '/' + '_security' + '/' + 'privilege' + '/' + encodeURIComponent(application) - } else { - if (method == null) method = 'GET' - path = '/' + '_security' + '/' + 'privilege' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSecurityGetPrivileges diff --git a/api/api/security.get_role.js b/api/api/security.get_role.js deleted file mode 100644 index e660c5b8a..000000000 --- a/api/api/security.get_role.js +++ /dev/null @@ -1,77 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSecurityGetRole (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a security.get_role request - * Retrieves roles in the native realm. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-role.html - */ - return function securityGetRole (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((name) != null) { - if (method == null) method = 'GET' - path = '/' + '_security' + '/' + 'role' + '/' + encodeURIComponent(name) - } else { - if (method == null) method = 'GET' - path = '/' + '_security' + '/' + 'role' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSecurityGetRole diff --git a/api/api/security.get_role_mapping.js b/api/api/security.get_role_mapping.js deleted file mode 100644 index 0a1768512..000000000 --- a/api/api/security.get_role_mapping.js +++ /dev/null @@ -1,77 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSecurityGetRoleMapping (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a security.get_role_mapping request - * Retrieves role mappings. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-role-mapping.html - */ - return function securityGetRoleMapping (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((name) != null) { - if (method == null) method = 'GET' - path = '/' + '_security' + '/' + 'role_mapping' + '/' + encodeURIComponent(name) - } else { - if (method == null) method = 'GET' - path = '/' + '_security' + '/' + 'role_mapping' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSecurityGetRoleMapping diff --git a/api/api/security.get_token.js b/api/api/security.get_token.js deleted file mode 100644 index 066c9e6d0..000000000 --- a/api/api/security.get_token.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSecurityGetToken (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a security.get_token request - * Creates a bearer token for access without requiring basic authentication. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-token.html - */ - return function securityGetToken (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_security' + '/' + 'oauth2' + '/' + 'token' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSecurityGetToken diff --git a/api/api/security.get_user.js b/api/api/security.get_user.js deleted file mode 100644 index 40383949b..000000000 --- a/api/api/security.get_user.js +++ /dev/null @@ -1,77 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSecurityGetUser (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a security.get_user request - * Retrieves information about users in the native realm and built-in users. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-user.html - */ - return function securityGetUser (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, username, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((username) != null) { - if (method == null) method = 'GET' - path = '/' + '_security' + '/' + 'user' + '/' + encodeURIComponent(username) - } else { - if (method == null) method = 'GET' - path = '/' + '_security' + '/' + 'user' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSecurityGetUser diff --git a/api/api/security.get_user_privileges.js b/api/api/security.get_user_privileges.js deleted file mode 100644 index 3ef3a9dc3..000000000 --- a/api/api/security.get_user_privileges.js +++ /dev/null @@ -1,72 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSecurityGetUserPrivileges (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a security.get_user_privileges request - * Retrieves application privileges. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-privileges.html - */ - return function securityGetUserPrivileges (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + '_security' + '/' + 'user' + '/' + '_privileges' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSecurityGetUserPrivileges diff --git a/api/api/security.has_privileges.js b/api/api/security.has_privileges.js deleted file mode 100644 index dc0f1d1d1..000000000 --- a/api/api/security.has_privileges.js +++ /dev/null @@ -1,83 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSecurityHasPrivileges (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a security.has_privileges request - * Determines whether the specified user has a specified list of privileges. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-has-privileges.html - */ - return function securityHasPrivileges (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, user, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((user) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_security' + '/' + 'user' + '/' + encodeURIComponent(user) + '/' + '_has_privileges' - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_security' + '/' + 'user' + '/' + '_has_privileges' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSecurityHasPrivileges diff --git a/api/api/security.invalidate_api_key.js b/api/api/security.invalidate_api_key.js deleted file mode 100644 index 470704850..000000000 --- a/api/api/security.invalidate_api_key.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSecurityInvalidateApiKey (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a security.invalidate_api_key request - * Invalidates one or more API keys. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-invalidate-api-key.html - */ - return function securityInvalidateApiKey (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'DELETE' - path = '/' + '_security' + '/' + 'api_key' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSecurityInvalidateApiKey diff --git a/api/api/security.invalidate_token.js b/api/api/security.invalidate_token.js deleted file mode 100644 index dc1bcf6ba..000000000 --- a/api/api/security.invalidate_token.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSecurityInvalidateToken (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a security.invalidate_token request - * Invalidates one or more access tokens or refresh tokens. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-invalidate-token.html - */ - return function securityInvalidateToken (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'DELETE' - path = '/' + '_security' + '/' + 'oauth2' + '/' + 'token' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSecurityInvalidateToken diff --git a/api/api/security.put_privileges.js b/api/api/security.put_privileges.js deleted file mode 100644 index 83df2139c..000000000 --- a/api/api/security.put_privileges.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSecurityPutPrivileges (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'refresh' - ] - - const snakeCase = { - - } - - /** - * Perform a security.put_privileges request - * Adds or updates application privileges. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-privileges.html - */ - return function securityPutPrivileges (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'PUT' - path = '/' + '_security' + '/' + 'privilege' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSecurityPutPrivileges diff --git a/api/api/security.put_role.js b/api/api/security.put_role.js deleted file mode 100644 index 3de853456..000000000 --- a/api/api/security.put_role.js +++ /dev/null @@ -1,82 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSecurityPutRole (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'refresh' - ] - - const snakeCase = { - - } - - /** - * Perform a security.put_role request - * Adds and updates roles in the native realm. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-role.html - */ - return function securityPutRole (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['name'] == null) { - const err = new ConfigurationError('Missing required parameter: name') - return handleError(err, callback) - } - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'PUT' - path = '/' + '_security' + '/' + 'role' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSecurityPutRole diff --git a/api/api/security.put_role_mapping.js b/api/api/security.put_role_mapping.js deleted file mode 100644 index e35a70657..000000000 --- a/api/api/security.put_role_mapping.js +++ /dev/null @@ -1,82 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSecurityPutRoleMapping (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'refresh' - ] - - const snakeCase = { - - } - - /** - * Perform a security.put_role_mapping request - * Creates and updates role mappings. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-role-mapping.html - */ - return function securityPutRoleMapping (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['name'] == null) { - const err = new ConfigurationError('Missing required parameter: name') - return handleError(err, callback) - } - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, name, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'PUT' - path = '/' + '_security' + '/' + 'role_mapping' + '/' + encodeURIComponent(name) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSecurityPutRoleMapping diff --git a/api/api/security.put_user.js b/api/api/security.put_user.js deleted file mode 100644 index 398816fa9..000000000 --- a/api/api/security.put_user.js +++ /dev/null @@ -1,82 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSecurityPutUser (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'refresh' - ] - - const snakeCase = { - - } - - /** - * Perform a security.put_user request - * Adds and updates users in the native realm. These users are commonly referred to as native users. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-user.html - */ - return function securityPutUser (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['username'] == null) { - const err = new ConfigurationError('Missing required parameter: username') - return handleError(err, callback) - } - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, username, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'PUT' - path = '/' + '_security' + '/' + 'user' + '/' + encodeURIComponent(username) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSecurityPutUser diff --git a/api/api/slm.delete_lifecycle.js b/api/api/slm.delete_lifecycle.js deleted file mode 100644 index 6730ea66e..000000000 --- a/api/api/slm.delete_lifecycle.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSlmDeleteLifecycle (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a slm.delete_lifecycle request - * Deletes an existing snapshot lifecycle policy. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-delete-policy.html - */ - return function slmDeleteLifecycle (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['policy_id'] == null && params['policyId'] == null) { - const err = new ConfigurationError('Missing required parameter: policy_id or policyId') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, policyId, policy_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'DELETE' - path = '/' + '_slm' + '/' + 'policy' + '/' + encodeURIComponent(policy_id || policyId) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSlmDeleteLifecycle diff --git a/api/api/slm.execute_lifecycle.js b/api/api/slm.execute_lifecycle.js deleted file mode 100644 index 669e9470e..000000000 --- a/api/api/slm.execute_lifecycle.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSlmExecuteLifecycle (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a slm.execute_lifecycle request - * Immediately creates a snapshot according to the lifecycle policy, without waiting for the scheduled time. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-execute-lifecycle.html - */ - return function slmExecuteLifecycle (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['policy_id'] == null && params['policyId'] == null) { - const err = new ConfigurationError('Missing required parameter: policy_id or policyId') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, policyId, policy_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'PUT' - path = '/' + '_slm' + '/' + 'policy' + '/' + encodeURIComponent(policy_id || policyId) + '/' + '_execute' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSlmExecuteLifecycle diff --git a/api/api/slm.execute_retention.js b/api/api/slm.execute_retention.js deleted file mode 100644 index c8d8db99e..000000000 --- a/api/api/slm.execute_retention.js +++ /dev/null @@ -1,72 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSlmExecuteRetention (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a slm.execute_retention request - * Deletes any snapshots that are expired according to the policy's retention rules. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-execute-retention.html - */ - return function slmExecuteRetention (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_slm' + '/' + '_execute_retention' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSlmExecuteRetention diff --git a/api/api/slm.get_lifecycle.js b/api/api/slm.get_lifecycle.js deleted file mode 100644 index 22be108f6..000000000 --- a/api/api/slm.get_lifecycle.js +++ /dev/null @@ -1,77 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSlmGetLifecycle (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a slm.get_lifecycle request - * Retrieves one or more snapshot lifecycle policy definitions and information about the latest snapshot attempts. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-get-policy.html - */ - return function slmGetLifecycle (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, policyId, policy_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((policy_id || policyId) != null) { - if (method == null) method = 'GET' - path = '/' + '_slm' + '/' + 'policy' + '/' + encodeURIComponent(policy_id || policyId) - } else { - if (method == null) method = 'GET' - path = '/' + '_slm' + '/' + 'policy' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSlmGetLifecycle diff --git a/api/api/slm.get_stats.js b/api/api/slm.get_stats.js deleted file mode 100644 index 9937bf94f..000000000 --- a/api/api/slm.get_stats.js +++ /dev/null @@ -1,72 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSlmGetStats (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a slm.get_stats request - * Returns global and policy-level statistics about actions taken by snapshot lifecycle management. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/slm-api-get-stats.html - */ - return function slmGetStats (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + '_slm' + '/' + 'stats' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSlmGetStats diff --git a/api/api/slm.get_status.js b/api/api/slm.get_status.js deleted file mode 100644 index 64a66cb88..000000000 --- a/api/api/slm.get_status.js +++ /dev/null @@ -1,72 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSlmGetStatus (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a slm.get_status request - * Retrieves the status of snapshot lifecycle management (SLM). - * https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-get-status.html - */ - return function slmGetStatus (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + '_slm' + '/' + 'status' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSlmGetStatus diff --git a/api/api/slm.put_lifecycle.js b/api/api/slm.put_lifecycle.js deleted file mode 100644 index 47d4e6de3..000000000 --- a/api/api/slm.put_lifecycle.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSlmPutLifecycle (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a slm.put_lifecycle request - * Creates or updates a snapshot lifecycle policy. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-put-policy.html - */ - return function slmPutLifecycle (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['policy_id'] == null && params['policyId'] == null) { - const err = new ConfigurationError('Missing required parameter: policy_id or policyId') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, policyId, policy_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'PUT' - path = '/' + '_slm' + '/' + 'policy' + '/' + encodeURIComponent(policy_id || policyId) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSlmPutLifecycle diff --git a/api/api/slm.start.js b/api/api/slm.start.js deleted file mode 100644 index 137d0c1c9..000000000 --- a/api/api/slm.start.js +++ /dev/null @@ -1,72 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSlmStart (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a slm.start request - * Turns on snapshot lifecycle management (SLM). - * https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-start.html - */ - return function slmStart (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_slm' + '/' + 'start' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSlmStart diff --git a/api/api/slm.stop.js b/api/api/slm.stop.js deleted file mode 100644 index 32ccb37a0..000000000 --- a/api/api/slm.stop.js +++ /dev/null @@ -1,72 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSlmStop (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a slm.stop request - * Turns off snapshot lifecycle management (SLM). - * https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-stop.html - */ - return function slmStop (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_slm' + '/' + 'stop' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSlmStop diff --git a/api/api/snapshot.cleanup_repository.js b/api/api/snapshot.cleanup_repository.js deleted file mode 100644 index 7fc13cdd8..000000000 --- a/api/api/snapshot.cleanup_repository.js +++ /dev/null @@ -1,86 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSnapshotCleanupRepository (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'master_timeout', - 'timeout', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a snapshot.cleanup_repository request - * Removes stale data from repository. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/clean-up-snapshot-repo-api.html - */ - return function snapshotCleanupRepository (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['repository'] == null) { - const err = new ConfigurationError('Missing required parameter: repository') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, repository, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_snapshot' + '/' + encodeURIComponent(repository) + '/' + '_cleanup' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSnapshotCleanupRepository diff --git a/api/api/snapshot.create.js b/api/api/snapshot.create.js deleted file mode 100644 index 4c0d83631..000000000 --- a/api/api/snapshot.create.js +++ /dev/null @@ -1,97 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSnapshotCreate (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'master_timeout', - 'wait_for_completion', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - waitForCompletion: 'wait_for_completion', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a snapshot.create request - * Creates a snapshot in a repository. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html - */ - return function snapshotCreate (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['repository'] == null) { - const err = new ConfigurationError('Missing required parameter: repository') - return handleError(err, callback) - } - if (params['snapshot'] == null) { - const err = new ConfigurationError('Missing required parameter: snapshot') - return handleError(err, callback) - } - - // check required url components - if (params['snapshot'] != null && (params['repository'] == null)) { - const err = new ConfigurationError('Missing required parameter of the url: repository') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, repository, snapshot, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'PUT' - path = '/' + '_snapshot' + '/' + encodeURIComponent(repository) + '/' + encodeURIComponent(snapshot) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSnapshotCreate diff --git a/api/api/snapshot.create_repository.js b/api/api/snapshot.create_repository.js deleted file mode 100644 index b34dbbe8d..000000000 --- a/api/api/snapshot.create_repository.js +++ /dev/null @@ -1,91 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSnapshotCreateRepository (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'master_timeout', - 'timeout', - 'verify', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a snapshot.create_repository request - * Creates a repository. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html - */ - return function snapshotCreateRepository (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['repository'] == null) { - const err = new ConfigurationError('Missing required parameter: repository') - return handleError(err, callback) - } - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, repository, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'PUT' - path = '/' + '_snapshot' + '/' + encodeURIComponent(repository) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSnapshotCreateRepository diff --git a/api/api/snapshot.delete.js b/api/api/snapshot.delete.js deleted file mode 100644 index 1b2bc1cc8..000000000 --- a/api/api/snapshot.delete.js +++ /dev/null @@ -1,95 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSnapshotDelete (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'master_timeout', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a snapshot.delete request - * Deletes one or more snapshots. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html - */ - return function snapshotDelete (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['repository'] == null) { - const err = new ConfigurationError('Missing required parameter: repository') - return handleError(err, callback) - } - if (params['snapshot'] == null) { - const err = new ConfigurationError('Missing required parameter: snapshot') - return handleError(err, callback) - } - - // check required url components - if (params['snapshot'] != null && (params['repository'] == null)) { - const err = new ConfigurationError('Missing required parameter of the url: repository') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, repository, snapshot, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'DELETE' - path = '/' + '_snapshot' + '/' + encodeURIComponent(repository) + '/' + encodeURIComponent(snapshot) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSnapshotDelete diff --git a/api/api/snapshot.delete_repository.js b/api/api/snapshot.delete_repository.js deleted file mode 100644 index 337992c00..000000000 --- a/api/api/snapshot.delete_repository.js +++ /dev/null @@ -1,86 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSnapshotDeleteRepository (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'master_timeout', - 'timeout', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a snapshot.delete_repository request - * Deletes a repository. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html - */ - return function snapshotDeleteRepository (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['repository'] == null) { - const err = new ConfigurationError('Missing required parameter: repository') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, repository, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'DELETE' - path = '/' + '_snapshot' + '/' + encodeURIComponent(repository) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSnapshotDeleteRepository diff --git a/api/api/snapshot.get.js b/api/api/snapshot.get.js deleted file mode 100644 index e78500b62..000000000 --- a/api/api/snapshot.get.js +++ /dev/null @@ -1,98 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSnapshotGet (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'master_timeout', - 'ignore_unavailable', - 'verbose', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - ignoreUnavailable: 'ignore_unavailable', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a snapshot.get request - * Returns information about a snapshot. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html - */ - return function snapshotGet (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['repository'] == null) { - const err = new ConfigurationError('Missing required parameter: repository') - return handleError(err, callback) - } - if (params['snapshot'] == null) { - const err = new ConfigurationError('Missing required parameter: snapshot') - return handleError(err, callback) - } - - // check required url components - if (params['snapshot'] != null && (params['repository'] == null)) { - const err = new ConfigurationError('Missing required parameter of the url: repository') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, repository, snapshot, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + '_snapshot' + '/' + encodeURIComponent(repository) + '/' + encodeURIComponent(snapshot) - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSnapshotGet diff --git a/api/api/snapshot.get_repository.js b/api/api/snapshot.get_repository.js deleted file mode 100644 index c55356d66..000000000 --- a/api/api/snapshot.get_repository.js +++ /dev/null @@ -1,85 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSnapshotGetRepository (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'master_timeout', - 'local', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a snapshot.get_repository request - * Returns information about a repository. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html - */ - return function snapshotGetRepository (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, repository, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((repository) != null) { - if (method == null) method = 'GET' - path = '/' + '_snapshot' + '/' + encodeURIComponent(repository) - } else { - if (method == null) method = 'GET' - path = '/' + '_snapshot' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSnapshotGetRepository diff --git a/api/api/snapshot.restore.js b/api/api/snapshot.restore.js deleted file mode 100644 index 567b02d48..000000000 --- a/api/api/snapshot.restore.js +++ /dev/null @@ -1,97 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSnapshotRestore (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'master_timeout', - 'wait_for_completion', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - waitForCompletion: 'wait_for_completion', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a snapshot.restore request - * Restores a snapshot. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html - */ - return function snapshotRestore (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['repository'] == null) { - const err = new ConfigurationError('Missing required parameter: repository') - return handleError(err, callback) - } - if (params['snapshot'] == null) { - const err = new ConfigurationError('Missing required parameter: snapshot') - return handleError(err, callback) - } - - // check required url components - if (params['snapshot'] != null && (params['repository'] == null)) { - const err = new ConfigurationError('Missing required parameter of the url: repository') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, repository, snapshot, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_snapshot' + '/' + encodeURIComponent(repository) + '/' + encodeURIComponent(snapshot) + '/' + '_restore' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSnapshotRestore diff --git a/api/api/snapshot.status.js b/api/api/snapshot.status.js deleted file mode 100644 index 9e057c997..000000000 --- a/api/api/snapshot.status.js +++ /dev/null @@ -1,95 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSnapshotStatus (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'master_timeout', - 'ignore_unavailable', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - ignoreUnavailable: 'ignore_unavailable', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a snapshot.status request - * Returns information about the status of a snapshot. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html - */ - return function snapshotStatus (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required url components - if (params['snapshot'] != null && (params['repository'] == null)) { - const err = new ConfigurationError('Missing required parameter of the url: repository') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, repository, snapshot, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((repository) != null && (snapshot) != null) { - if (method == null) method = 'GET' - path = '/' + '_snapshot' + '/' + encodeURIComponent(repository) + '/' + encodeURIComponent(snapshot) + '/' + '_status' - } else if ((repository) != null) { - if (method == null) method = 'GET' - path = '/' + '_snapshot' + '/' + encodeURIComponent(repository) + '/' + '_status' - } else { - if (method == null) method = 'GET' - path = '/' + '_snapshot' + '/' + '_status' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSnapshotStatus diff --git a/api/api/snapshot.verify_repository.js b/api/api/snapshot.verify_repository.js deleted file mode 100644 index 1e04a2f09..000000000 --- a/api/api/snapshot.verify_repository.js +++ /dev/null @@ -1,86 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSnapshotVerifyRepository (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'master_timeout', - 'timeout', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - masterTimeout: 'master_timeout', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a snapshot.verify_repository request - * Verifies a repository. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html - */ - return function snapshotVerifyRepository (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['repository'] == null) { - const err = new ConfigurationError('Missing required parameter: repository') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, repository, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_snapshot' + '/' + encodeURIComponent(repository) + '/' + '_verify' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSnapshotVerifyRepository diff --git a/api/api/sql.clear_cursor.js b/api/api/sql.clear_cursor.js deleted file mode 100644 index bfa3ce6b8..000000000 --- a/api/api/sql.clear_cursor.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSqlClearCursor (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a sql.clear_cursor request - * Clears the SQL cursor - * https://www.elastic.co/guide/en/elasticsearch/reference/current/sql-pagination.html - */ - return function sqlClearCursor (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_sql' + '/' + 'close' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSqlClearCursor diff --git a/api/api/sql.query.js b/api/api/sql.query.js deleted file mode 100644 index 5354b8684..000000000 --- a/api/api/sql.query.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSqlQuery (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'format' - ] - - const snakeCase = { - - } - - /** - * Perform a sql.query request - * Executes a SQL request - * https://www.elastic.co/guide/en/elasticsearch/reference/current/sql-rest-overview.html - */ - return function sqlQuery (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_sql' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSqlQuery diff --git a/api/api/sql.translate.js b/api/api/sql.translate.js deleted file mode 100644 index 652570095..000000000 --- a/api/api/sql.translate.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSqlTranslate (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a sql.translate request - * Translates SQL into Elasticsearch queries - * https://www.elastic.co/guide/en/elasticsearch/reference/current/sql-translate.html - */ - return function sqlTranslate (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + '_sql' + '/' + 'translate' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSqlTranslate diff --git a/api/api/ssl.certificates.js b/api/api/ssl.certificates.js deleted file mode 100644 index beef26f5f..000000000 --- a/api/api/ssl.certificates.js +++ /dev/null @@ -1,72 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildSslCertificates (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a ssl.certificates request - * Retrieves information about the X.509 certificates used to encrypt communications in the cluster. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-ssl.html - */ - return function sslCertificates (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + '_ssl' + '/' + 'certificates' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildSslCertificates diff --git a/api/api/tasks.cancel.js b/api/api/tasks.cancel.js deleted file mode 100644 index 03700b092..000000000 --- a/api/api/tasks.cancel.js +++ /dev/null @@ -1,88 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildTasksCancel (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'nodes', - 'actions', - 'parent_task_id', - 'wait_for_completion', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - parentTaskId: 'parent_task_id', - waitForCompletion: 'wait_for_completion', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a tasks.cancel request - * Cancels a task, if it can be cancelled through an API. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html - */ - return function tasksCancel (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, taskId, task_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((task_id || taskId) != null) { - if (method == null) method = 'POST' - path = '/' + '_tasks' + '/' + encodeURIComponent(task_id || taskId) + '/' + '_cancel' - } else { - if (method == null) method = 'POST' - path = '/' + '_tasks' + '/' + '_cancel' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildTasksCancel diff --git a/api/api/tasks.get.js b/api/api/tasks.get.js deleted file mode 100644 index 37f09539c..000000000 --- a/api/api/tasks.get.js +++ /dev/null @@ -1,86 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildTasksGet (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'wait_for_completion', - 'timeout', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - waitForCompletion: 'wait_for_completion', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a tasks.get request - * Returns information about a task. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html - */ - return function tasksGet (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['task_id'] == null && params['taskId'] == null) { - const err = new ConfigurationError('Missing required parameter: task_id or taskId') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, taskId, task_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + '_tasks' + '/' + encodeURIComponent(task_id || taskId) - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildTasksGet diff --git a/api/api/tasks.list.js b/api/api/tasks.list.js deleted file mode 100644 index ec188c579..000000000 --- a/api/api/tasks.list.js +++ /dev/null @@ -1,87 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildTasksList (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'nodes', - 'actions', - 'detailed', - 'parent_task_id', - 'wait_for_completion', - 'group_by', - 'timeout', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - parentTaskId: 'parent_task_id', - waitForCompletion: 'wait_for_completion', - groupBy: 'group_by', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a tasks.list request - * Returns a list of tasks. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html - */ - return function tasksList (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + '_tasks' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildTasksList diff --git a/api/api/termvectors.js b/api/api/termvectors.js deleted file mode 100644 index eb44cda6a..000000000 --- a/api/api/termvectors.js +++ /dev/null @@ -1,102 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildTermvectors (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'term_statistics', - 'field_statistics', - 'fields', - 'offsets', - 'positions', - 'payloads', - 'preference', - 'routing', - 'realtime', - 'version', - 'version_type', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - termStatistics: 'term_statistics', - fieldStatistics: 'field_statistics', - versionType: 'version_type', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a termvectors request - * Returns information and statistics about terms in the fields of a particular document. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-termvectors.html - */ - return function termvectors (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['index'] == null) { - const err = new ConfigurationError('Missing required parameter: index') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((index) != null && (id) != null) { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_termvectors' + '/' + encodeURIComponent(id) - } else { - if (method == null) method = body == null ? 'GET' : 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_termvectors' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildTermvectors diff --git a/api/api/transform.delete_transform.js b/api/api/transform.delete_transform.js deleted file mode 100644 index 6d76bfe56..000000000 --- a/api/api/transform.delete_transform.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildTransformDeleteTransform (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'force' - ] - - const snakeCase = { - - } - - /** - * Perform a transform.delete_transform request - * Deletes an existing transform. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-transform.html - */ - return function transformDeleteTransform (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['transform_id'] == null && params['transformId'] == null) { - const err = new ConfigurationError('Missing required parameter: transform_id or transformId') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, transformId, transform_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'DELETE' - path = '/' + '_transform' + '/' + encodeURIComponent(transform_id || transformId) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildTransformDeleteTransform diff --git a/api/api/transform.get_transform.js b/api/api/transform.get_transform.js deleted file mode 100644 index 4fc5fa0e4..000000000 --- a/api/api/transform.get_transform.js +++ /dev/null @@ -1,79 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildTransformGetTransform (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'from', - 'size', - 'allow_no_match' - ] - - const snakeCase = { - allowNoMatch: 'allow_no_match' - } - - /** - * Perform a transform.get_transform request - * Retrieves configuration information for transforms. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/get-transform.html - */ - return function transformGetTransform (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, transformId, transform_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((transform_id || transformId) != null) { - if (method == null) method = 'GET' - path = '/' + '_transform' + '/' + encodeURIComponent(transform_id || transformId) - } else { - if (method == null) method = 'GET' - path = '/' + '_transform' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildTransformGetTransform diff --git a/api/api/transform.get_transform_stats.js b/api/api/transform.get_transform_stats.js deleted file mode 100644 index d139d9e00..000000000 --- a/api/api/transform.get_transform_stats.js +++ /dev/null @@ -1,80 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildTransformGetTransformStats (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'from', - 'size', - 'allow_no_match' - ] - - const snakeCase = { - allowNoMatch: 'allow_no_match' - } - - /** - * Perform a transform.get_transform_stats request - * Retrieves usage information for transforms. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/get-transform-stats.html - */ - return function transformGetTransformStats (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['transform_id'] == null && params['transformId'] == null) { - const err = new ConfigurationError('Missing required parameter: transform_id or transformId') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, transformId, transform_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + '_transform' + '/' + encodeURIComponent(transform_id || transformId) + '/' + '_stats' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildTransformGetTransformStats diff --git a/api/api/transform.preview_transform.js b/api/api/transform.preview_transform.js deleted file mode 100644 index 73aadb2c2..000000000 --- a/api/api/transform.preview_transform.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildTransformPreviewTransform (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a transform.preview_transform request - * Previews a transform. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/preview-transform.html - */ - return function transformPreviewTransform (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_transform' + '/' + '_preview' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildTransformPreviewTransform diff --git a/api/api/transform.put_transform.js b/api/api/transform.put_transform.js deleted file mode 100644 index 578f1c0b3..000000000 --- a/api/api/transform.put_transform.js +++ /dev/null @@ -1,82 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildTransformPutTransform (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'defer_validation' - ] - - const snakeCase = { - deferValidation: 'defer_validation' - } - - /** - * Perform a transform.put_transform request - * Instantiates a transform. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/put-transform.html - */ - return function transformPutTransform (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['transform_id'] == null && params['transformId'] == null) { - const err = new ConfigurationError('Missing required parameter: transform_id or transformId') - return handleError(err, callback) - } - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, transformId, transform_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'PUT' - path = '/' + '_transform' + '/' + encodeURIComponent(transform_id || transformId) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildTransformPutTransform diff --git a/api/api/transform.start_transform.js b/api/api/transform.start_transform.js deleted file mode 100644 index b06f6a793..000000000 --- a/api/api/transform.start_transform.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildTransformStartTransform (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'timeout' - ] - - const snakeCase = { - - } - - /** - * Perform a transform.start_transform request - * Starts one or more transforms. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/start-transform.html - */ - return function transformStartTransform (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['transform_id'] == null && params['transformId'] == null) { - const err = new ConfigurationError('Missing required parameter: transform_id or transformId') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, transformId, transform_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_transform' + '/' + encodeURIComponent(transform_id || transformId) + '/' + '_start' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildTransformStartTransform diff --git a/api/api/transform.stop_transform.js b/api/api/transform.stop_transform.js deleted file mode 100644 index be60db461..000000000 --- a/api/api/transform.stop_transform.js +++ /dev/null @@ -1,84 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildTransformStopTransform (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'force', - 'wait_for_completion', - 'timeout', - 'allow_no_match', - 'wait_for_checkpoint' - ] - - const snakeCase = { - waitForCompletion: 'wait_for_completion', - allowNoMatch: 'allow_no_match', - waitForCheckpoint: 'wait_for_checkpoint' - } - - /** - * Perform a transform.stop_transform request - * Stops one or more transforms. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/stop-transform.html - */ - return function transformStopTransform (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['transform_id'] == null && params['transformId'] == null) { - const err = new ConfigurationError('Missing required parameter: transform_id or transformId') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, transformId, transform_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_transform' + '/' + encodeURIComponent(transform_id || transformId) + '/' + '_stop' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildTransformStopTransform diff --git a/api/api/transform.update_transform.js b/api/api/transform.update_transform.js deleted file mode 100644 index eaa9404ae..000000000 --- a/api/api/transform.update_transform.js +++ /dev/null @@ -1,82 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildTransformUpdateTransform (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'defer_validation' - ] - - const snakeCase = { - deferValidation: 'defer_validation' - } - - /** - * Perform a transform.update_transform request - * Updates certain properties of a transform. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/update-transform.html - */ - return function transformUpdateTransform (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['transform_id'] == null && params['transformId'] == null) { - const err = new ConfigurationError('Missing required parameter: transform_id or transformId') - return handleError(err, callback) - } - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, transformId, transform_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_transform' + '/' + encodeURIComponent(transform_id || transformId) + '/' + '_update' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildTransformUpdateTransform diff --git a/api/api/update.js b/api/api/update.js deleted file mode 100644 index bd5744bba..000000000 --- a/api/api/update.js +++ /dev/null @@ -1,117 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildUpdate (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'wait_for_active_shards', - '_source', - '_source_excludes', - '_source_exclude', - '_source_includes', - '_source_include', - 'lang', - 'refresh', - 'retry_on_conflict', - 'routing', - 'timeout', - 'if_seq_no', - 'if_primary_term', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - waitForActiveShards: 'wait_for_active_shards', - _sourceExcludes: '_source_excludes', - _sourceExclude: '_source_exclude', - _sourceIncludes: '_source_includes', - _sourceInclude: '_source_include', - retryOnConflict: 'retry_on_conflict', - ifSeqNo: 'if_seq_no', - ifPrimaryTerm: 'if_primary_term', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a update request - * Updates a document with a script or partial document. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-update.html - */ - return function update (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['id'] == null) { - const err = new ConfigurationError('Missing required parameter: id') - return handleError(err, callback) - } - if (params['index'] == null) { - const err = new ConfigurationError('Missing required parameter: index') - return handleError(err, callback) - } - if (params['body'] == null) { - const err = new ConfigurationError('Missing required parameter: body') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, id, index, type, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((index) != null && (type) != null && (id) != null) { - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(index) + '/' + encodeURIComponent(type) + '/' + encodeURIComponent(id) + '/' + '_update' - } else { - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_update' + '/' + encodeURIComponent(id) - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildUpdate diff --git a/api/api/update_by_query.js b/api/api/update_by_query.js deleted file mode 100644 index 61cf88c16..000000000 --- a/api/api/update_by_query.js +++ /dev/null @@ -1,139 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildUpdateByQuery (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'analyzer', - 'analyze_wildcard', - 'default_operator', - 'df', - 'from', - 'ignore_unavailable', - 'allow_no_indices', - 'conflicts', - 'expand_wildcards', - 'lenient', - 'pipeline', - 'preference', - 'q', - 'routing', - 'scroll', - 'search_type', - 'search_timeout', - 'max_docs', - 'sort', - '_source', - '_source_excludes', - '_source_exclude', - '_source_includes', - '_source_include', - 'terminate_after', - 'stats', - 'version', - 'version_type', - 'request_cache', - 'refresh', - 'timeout', - 'wait_for_active_shards', - 'scroll_size', - 'wait_for_completion', - 'requests_per_second', - 'slices', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - analyzeWildcard: 'analyze_wildcard', - defaultOperator: 'default_operator', - ignoreUnavailable: 'ignore_unavailable', - allowNoIndices: 'allow_no_indices', - expandWildcards: 'expand_wildcards', - searchType: 'search_type', - searchTimeout: 'search_timeout', - maxDocs: 'max_docs', - _sourceExcludes: '_source_excludes', - _sourceExclude: '_source_exclude', - _sourceIncludes: '_source_includes', - _sourceInclude: '_source_include', - terminateAfter: 'terminate_after', - versionType: 'version_type', - requestCache: 'request_cache', - waitForActiveShards: 'wait_for_active_shards', - scrollSize: 'scroll_size', - waitForCompletion: 'wait_for_completion', - requestsPerSecond: 'requests_per_second', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a update_by_query request - * Performs an update on every document in the index without changing the source, -for example to pick up a mapping change. - * https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-update-by-query.html - */ - return function updateByQuery (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['index'] == null) { - const err = new ConfigurationError('Missing required parameter: index') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, index, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + encodeURIComponent(index) + '/' + '_update_by_query' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildUpdateByQuery diff --git a/api/api/update_by_query_rethrottle.js b/api/api/update_by_query_rethrottle.js deleted file mode 100644 index d34a0fc2a..000000000 --- a/api/api/update_by_query_rethrottle.js +++ /dev/null @@ -1,89 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildUpdateByQueryRethrottle (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'requests_per_second', - 'pretty', - 'human', - 'error_trace', - 'source', - 'filter_path' - ] - - const snakeCase = { - requestsPerSecond: 'requests_per_second', - errorTrace: 'error_trace', - filterPath: 'filter_path' - } - - /** - * Perform a update_by_query_rethrottle request - * Changes the number of requests per second for a particular Update By Query operation. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update-by-query.html - */ - return function updateByQueryRethrottle (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['task_id'] == null && params['taskId'] == null) { - const err = new ConfigurationError('Missing required parameter: task_id or taskId') - return handleError(err, callback) - } - if (params['requests_per_second'] == null && params['requestsPerSecond'] == null) { - const err = new ConfigurationError('Missing required parameter: requests_per_second or requestsPerSecond') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, taskId, task_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_update_by_query' + '/' + encodeURIComponent(task_id || taskId) + '/' + '_rethrottle' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildUpdateByQueryRethrottle diff --git a/api/api/watcher.ack_watch.js b/api/api/watcher.ack_watch.js deleted file mode 100644 index ea399d928..000000000 --- a/api/api/watcher.ack_watch.js +++ /dev/null @@ -1,89 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildWatcherAckWatch (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a watcher.ack_watch request - * Acknowledges a watch, manually throttling the execution of the watch's actions. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-ack-watch.html - */ - return function watcherAckWatch (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['watch_id'] == null && params['watchId'] == null) { - const err = new ConfigurationError('Missing required parameter: watch_id or watchId') - return handleError(err, callback) - } - - // check required url components - if ((params['action_id'] != null || params['actionId'] != null) && ((params['watch_id'] == null && params['watchId'] == null))) { - const err = new ConfigurationError('Missing required parameter of the url: watch_id') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, watchId, watch_id, actionId, action_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((watch_id || watchId) != null && (action_id || actionId) != null) { - if (method == null) method = 'PUT' - path = '/' + '_watcher' + '/' + 'watch' + '/' + encodeURIComponent(watch_id || watchId) + '/' + '_ack' + '/' + encodeURIComponent(action_id || actionId) - } else { - if (method == null) method = 'PUT' - path = '/' + '_watcher' + '/' + 'watch' + '/' + encodeURIComponent(watch_id || watchId) + '/' + '_ack' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildWatcherAckWatch diff --git a/api/api/watcher.activate_watch.js b/api/api/watcher.activate_watch.js deleted file mode 100644 index f78edec85..000000000 --- a/api/api/watcher.activate_watch.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildWatcherActivateWatch (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a watcher.activate_watch request - * Activates a currently inactive watch. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-activate-watch.html - */ - return function watcherActivateWatch (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['watch_id'] == null && params['watchId'] == null) { - const err = new ConfigurationError('Missing required parameter: watch_id or watchId') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, watchId, watch_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'PUT' - path = '/' + '_watcher' + '/' + 'watch' + '/' + encodeURIComponent(watch_id || watchId) + '/' + '_activate' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildWatcherActivateWatch diff --git a/api/api/watcher.deactivate_watch.js b/api/api/watcher.deactivate_watch.js deleted file mode 100644 index add97ef75..000000000 --- a/api/api/watcher.deactivate_watch.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildWatcherDeactivateWatch (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a watcher.deactivate_watch request - * Deactivates a currently active watch. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-deactivate-watch.html - */ - return function watcherDeactivateWatch (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['watch_id'] == null && params['watchId'] == null) { - const err = new ConfigurationError('Missing required parameter: watch_id or watchId') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, watchId, watch_id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'PUT' - path = '/' + '_watcher' + '/' + 'watch' + '/' + encodeURIComponent(watch_id || watchId) + '/' + '_deactivate' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildWatcherDeactivateWatch diff --git a/api/api/watcher.delete_watch.js b/api/api/watcher.delete_watch.js deleted file mode 100644 index 544480b14..000000000 --- a/api/api/watcher.delete_watch.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildWatcherDeleteWatch (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a watcher.delete_watch request - * Removes a watch from Watcher. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-delete-watch.html - */ - return function watcherDeleteWatch (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['id'] == null) { - const err = new ConfigurationError('Missing required parameter: id') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'DELETE' - path = '/' + '_watcher' + '/' + 'watch' + '/' + encodeURIComponent(id) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildWatcherDeleteWatch diff --git a/api/api/watcher.execute_watch.js b/api/api/watcher.execute_watch.js deleted file mode 100644 index 62bf13a11..000000000 --- a/api/api/watcher.execute_watch.js +++ /dev/null @@ -1,77 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildWatcherExecuteWatch (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'debug' - ] - - const snakeCase = { - - } - - /** - * Perform a watcher.execute_watch request - * Forces the execution of a stored watch. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-execute-watch.html - */ - return function watcherExecuteWatch (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((id) != null) { - if (method == null) method = 'PUT' - path = '/' + '_watcher' + '/' + 'watch' + '/' + encodeURIComponent(id) + '/' + '_execute' - } else { - if (method == null) method = 'PUT' - path = '/' + '_watcher' + '/' + 'watch' + '/' + '_execute' - } - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildWatcherExecuteWatch diff --git a/api/api/watcher.get_watch.js b/api/api/watcher.get_watch.js deleted file mode 100644 index 61e693964..000000000 --- a/api/api/watcher.get_watch.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildWatcherGetWatch (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a watcher.get_watch request - * Retrieves a watch by its ID. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-get-watch.html - */ - return function watcherGetWatch (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['id'] == null) { - const err = new ConfigurationError('Missing required parameter: id') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + '_watcher' + '/' + 'watch' + '/' + encodeURIComponent(id) - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildWatcherGetWatch diff --git a/api/api/watcher.put_watch.js b/api/api/watcher.put_watch.js deleted file mode 100644 index 1a60b2d4b..000000000 --- a/api/api/watcher.put_watch.js +++ /dev/null @@ -1,82 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildWatcherPutWatch (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'active', - 'version', - 'if_seq_no', - 'if_primary_term' - ] - - const snakeCase = { - ifSeqNo: 'if_seq_no', - ifPrimaryTerm: 'if_primary_term' - } - - /** - * Perform a watcher.put_watch request - * Creates a new watch, or updates an existing one. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-put-watch.html - */ - return function watcherPutWatch (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // check required parameters - if (params['id'] == null) { - const err = new ConfigurationError('Missing required parameter: id') - return handleError(err, callback) - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, id, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'PUT' - path = '/' + '_watcher' + '/' + 'watch' + '/' + encodeURIComponent(id) - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildWatcherPutWatch diff --git a/api/api/watcher.start.js b/api/api/watcher.start.js deleted file mode 100644 index c9e3b9047..000000000 --- a/api/api/watcher.start.js +++ /dev/null @@ -1,72 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildWatcherStart (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a watcher.start request - * Starts Watcher if it is not already running. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-start.html - */ - return function watcherStart (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_watcher' + '/' + '_start' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildWatcherStart diff --git a/api/api/watcher.stats.js b/api/api/watcher.stats.js deleted file mode 100644 index 647b733f6..000000000 --- a/api/api/watcher.stats.js +++ /dev/null @@ -1,78 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildWatcherStats (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'metric', - 'emit_stacktraces' - ] - - const snakeCase = { - emitStacktraces: 'emit_stacktraces' - } - - /** - * Perform a watcher.stats request - * Retrieves the current Watcher metrics. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-stats.html - */ - return function watcherStats (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, metric, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if ((metric) != null) { - if (method == null) method = 'GET' - path = '/' + '_watcher' + '/' + 'stats' + '/' + encodeURIComponent(metric) - } else { - if (method == null) method = 'GET' - path = '/' + '_watcher' + '/' + 'stats' - } - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildWatcherStats diff --git a/api/api/watcher.stop.js b/api/api/watcher.stop.js deleted file mode 100644 index ac5a97d88..000000000 --- a/api/api/watcher.stop.js +++ /dev/null @@ -1,72 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildWatcherStop (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - - ] - - const snakeCase = { - - } - - /** - * Perform a watcher.stop request - * Stops Watcher if it is running. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-stop.html - */ - return function watcherStop (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'POST' - path = '/' + '_watcher' + '/' + '_stop' - - // build request object - const request = { - method, - path, - body: body || '', - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildWatcherStop diff --git a/api/api/xpack.info.js b/api/api/xpack.info.js deleted file mode 100644 index 2be59b4a9..000000000 --- a/api/api/xpack.info.js +++ /dev/null @@ -1,72 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildXpackInfo (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'categories' - ] - - const snakeCase = { - - } - - /** - * Perform a xpack.info request - * Retrieves information about the installed X-Pack features. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/info-api.html - */ - return function xpackInfo (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + '_xpack' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildXpackInfo diff --git a/api/api/xpack.usage.js b/api/api/xpack.usage.js deleted file mode 100644 index bebe72ee5..000000000 --- a/api/api/xpack.usage.js +++ /dev/null @@ -1,72 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ -/* eslint no-unused-vars: 0 */ - -function buildXpackUsage (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - 'master_timeout' - ] - - const snakeCase = { - masterTimeout: 'master_timeout' - } - - /** - * Perform a xpack.usage request - * Retrieves usage information about the installed X-Pack features. - * https://www.elastic.co/guide/en/elasticsearch/reference/current/usage-api.html - */ - return function xpackUsage (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(`Headers should be an object, instead got: ${typeof options.headers}`) - return handleError(err, callback) - } - - var warnings = [] - var { method, body, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - var path = '' - - if (method == null) method = 'GET' - path = '/' + '_xpack' + '/' + 'usage' - - // build request object - const request = { - method, - path, - body: null, - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } -} - -module.exports = buildXpackUsage diff --git a/api/index.js b/api/index.js deleted file mode 100644 index 6527e78cf..000000000 --- a/api/index.js +++ /dev/null @@ -1,708 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -const assert = require('assert') - -function ESAPI (opts) { - assert(opts.makeRequest, 'Missing makeRequest function') - assert(opts.ConfigurationError, 'Missing ConfigurationError class') - assert(opts.result, 'Missing default result object') - - const { result } = opts - opts.handleError = handleError - opts.snakeCaseKeys = snakeCaseKeys - - const apis = { - async_search: { - delete: lazyLoad('async_search.delete', opts), - get: lazyLoad('async_search.get', opts), - submit: lazyLoad('async_search.submit', opts) - }, - asyncSearch: { - delete: lazyLoad('async_search.delete', opts), - get: lazyLoad('async_search.get', opts), - submit: lazyLoad('async_search.submit', opts) - }, - autoscaling: { - delete_autoscaling_policy: lazyLoad('autoscaling.delete_autoscaling_policy', opts), - deleteAutoscalingPolicy: lazyLoad('autoscaling.delete_autoscaling_policy', opts), - get_autoscaling_decision: lazyLoad('autoscaling.get_autoscaling_decision', opts), - getAutoscalingDecision: lazyLoad('autoscaling.get_autoscaling_decision', opts), - get_autoscaling_policy: lazyLoad('autoscaling.get_autoscaling_policy', opts), - getAutoscalingPolicy: lazyLoad('autoscaling.get_autoscaling_policy', opts), - put_autoscaling_policy: lazyLoad('autoscaling.put_autoscaling_policy', opts), - putAutoscalingPolicy: lazyLoad('autoscaling.put_autoscaling_policy', opts) - }, - bulk: lazyLoad('bulk', opts), - cat: { - aliases: lazyLoad('cat.aliases', opts), - allocation: lazyLoad('cat.allocation', opts), - count: lazyLoad('cat.count', opts), - fielddata: lazyLoad('cat.fielddata', opts), - health: lazyLoad('cat.health', opts), - help: lazyLoad('cat.help', opts), - indices: lazyLoad('cat.indices', opts), - master: lazyLoad('cat.master', opts), - ml_data_frame_analytics: lazyLoad('cat.ml_data_frame_analytics', opts), - mlDataFrameAnalytics: lazyLoad('cat.ml_data_frame_analytics', opts), - ml_datafeeds: lazyLoad('cat.ml_datafeeds', opts), - mlDatafeeds: lazyLoad('cat.ml_datafeeds', opts), - ml_jobs: lazyLoad('cat.ml_jobs', opts), - mlJobs: lazyLoad('cat.ml_jobs', opts), - ml_trained_models: lazyLoad('cat.ml_trained_models', opts), - mlTrainedModels: lazyLoad('cat.ml_trained_models', opts), - nodeattrs: lazyLoad('cat.nodeattrs', opts), - nodes: lazyLoad('cat.nodes', opts), - pending_tasks: lazyLoad('cat.pending_tasks', opts), - pendingTasks: lazyLoad('cat.pending_tasks', opts), - plugins: lazyLoad('cat.plugins', opts), - recovery: lazyLoad('cat.recovery', opts), - repositories: lazyLoad('cat.repositories', opts), - segments: lazyLoad('cat.segments', opts), - shards: lazyLoad('cat.shards', opts), - snapshots: lazyLoad('cat.snapshots', opts), - tasks: lazyLoad('cat.tasks', opts), - templates: lazyLoad('cat.templates', opts), - thread_pool: lazyLoad('cat.thread_pool', opts), - threadPool: lazyLoad('cat.thread_pool', opts), - transforms: lazyLoad('cat.transforms', opts) - }, - ccr: { - delete_auto_follow_pattern: lazyLoad('ccr.delete_auto_follow_pattern', opts), - deleteAutoFollowPattern: lazyLoad('ccr.delete_auto_follow_pattern', opts), - follow: lazyLoad('ccr.follow', opts), - follow_info: lazyLoad('ccr.follow_info', opts), - followInfo: lazyLoad('ccr.follow_info', opts), - follow_stats: lazyLoad('ccr.follow_stats', opts), - followStats: lazyLoad('ccr.follow_stats', opts), - forget_follower: lazyLoad('ccr.forget_follower', opts), - forgetFollower: lazyLoad('ccr.forget_follower', opts), - get_auto_follow_pattern: lazyLoad('ccr.get_auto_follow_pattern', opts), - getAutoFollowPattern: lazyLoad('ccr.get_auto_follow_pattern', opts), - pause_auto_follow_pattern: lazyLoad('ccr.pause_auto_follow_pattern', opts), - pauseAutoFollowPattern: lazyLoad('ccr.pause_auto_follow_pattern', opts), - pause_follow: lazyLoad('ccr.pause_follow', opts), - pauseFollow: lazyLoad('ccr.pause_follow', opts), - put_auto_follow_pattern: lazyLoad('ccr.put_auto_follow_pattern', opts), - putAutoFollowPattern: lazyLoad('ccr.put_auto_follow_pattern', opts), - resume_auto_follow_pattern: lazyLoad('ccr.resume_auto_follow_pattern', opts), - resumeAutoFollowPattern: lazyLoad('ccr.resume_auto_follow_pattern', opts), - resume_follow: lazyLoad('ccr.resume_follow', opts), - resumeFollow: lazyLoad('ccr.resume_follow', opts), - stats: lazyLoad('ccr.stats', opts), - unfollow: lazyLoad('ccr.unfollow', opts) - }, - clear_scroll: lazyLoad('clear_scroll', opts), - clearScroll: lazyLoad('clear_scroll', opts), - cluster: { - allocation_explain: lazyLoad('cluster.allocation_explain', opts), - allocationExplain: lazyLoad('cluster.allocation_explain', opts), - delete_component_template: lazyLoad('cluster.delete_component_template', opts), - deleteComponentTemplate: lazyLoad('cluster.delete_component_template', opts), - delete_voting_config_exclusions: lazyLoad('cluster.delete_voting_config_exclusions', opts), - deleteVotingConfigExclusions: lazyLoad('cluster.delete_voting_config_exclusions', opts), - exists_component_template: lazyLoad('cluster.exists_component_template', opts), - existsComponentTemplate: lazyLoad('cluster.exists_component_template', opts), - get_component_template: lazyLoad('cluster.get_component_template', opts), - getComponentTemplate: lazyLoad('cluster.get_component_template', opts), - get_settings: lazyLoad('cluster.get_settings', opts), - getSettings: lazyLoad('cluster.get_settings', opts), - health: lazyLoad('cluster.health', opts), - pending_tasks: lazyLoad('cluster.pending_tasks', opts), - pendingTasks: lazyLoad('cluster.pending_tasks', opts), - post_voting_config_exclusions: lazyLoad('cluster.post_voting_config_exclusions', opts), - postVotingConfigExclusions: lazyLoad('cluster.post_voting_config_exclusions', opts), - put_component_template: lazyLoad('cluster.put_component_template', opts), - putComponentTemplate: lazyLoad('cluster.put_component_template', opts), - put_settings: lazyLoad('cluster.put_settings', opts), - putSettings: lazyLoad('cluster.put_settings', opts), - remote_info: lazyLoad('cluster.remote_info', opts), - remoteInfo: lazyLoad('cluster.remote_info', opts), - reroute: lazyLoad('cluster.reroute', opts), - state: lazyLoad('cluster.state', opts), - stats: lazyLoad('cluster.stats', opts) - }, - count: lazyLoad('count', opts), - create: lazyLoad('create', opts), - data_frame_transform_deprecated: { - delete_transform: lazyLoad('data_frame_transform_deprecated.delete_transform', opts), - deleteTransform: lazyLoad('data_frame_transform_deprecated.delete_transform', opts), - get_transform: lazyLoad('data_frame_transform_deprecated.get_transform', opts), - getTransform: lazyLoad('data_frame_transform_deprecated.get_transform', opts), - get_transform_stats: lazyLoad('data_frame_transform_deprecated.get_transform_stats', opts), - getTransformStats: lazyLoad('data_frame_transform_deprecated.get_transform_stats', opts), - preview_transform: lazyLoad('data_frame_transform_deprecated.preview_transform', opts), - previewTransform: lazyLoad('data_frame_transform_deprecated.preview_transform', opts), - put_transform: lazyLoad('data_frame_transform_deprecated.put_transform', opts), - putTransform: lazyLoad('data_frame_transform_deprecated.put_transform', opts), - start_transform: lazyLoad('data_frame_transform_deprecated.start_transform', opts), - startTransform: lazyLoad('data_frame_transform_deprecated.start_transform', opts), - stop_transform: lazyLoad('data_frame_transform_deprecated.stop_transform', opts), - stopTransform: lazyLoad('data_frame_transform_deprecated.stop_transform', opts), - update_transform: lazyLoad('data_frame_transform_deprecated.update_transform', opts), - updateTransform: lazyLoad('data_frame_transform_deprecated.update_transform', opts) - }, - dataFrameTransformDeprecated: { - delete_transform: lazyLoad('data_frame_transform_deprecated.delete_transform', opts), - deleteTransform: lazyLoad('data_frame_transform_deprecated.delete_transform', opts), - get_transform: lazyLoad('data_frame_transform_deprecated.get_transform', opts), - getTransform: lazyLoad('data_frame_transform_deprecated.get_transform', opts), - get_transform_stats: lazyLoad('data_frame_transform_deprecated.get_transform_stats', opts), - getTransformStats: lazyLoad('data_frame_transform_deprecated.get_transform_stats', opts), - preview_transform: lazyLoad('data_frame_transform_deprecated.preview_transform', opts), - previewTransform: lazyLoad('data_frame_transform_deprecated.preview_transform', opts), - put_transform: lazyLoad('data_frame_transform_deprecated.put_transform', opts), - putTransform: lazyLoad('data_frame_transform_deprecated.put_transform', opts), - start_transform: lazyLoad('data_frame_transform_deprecated.start_transform', opts), - startTransform: lazyLoad('data_frame_transform_deprecated.start_transform', opts), - stop_transform: lazyLoad('data_frame_transform_deprecated.stop_transform', opts), - stopTransform: lazyLoad('data_frame_transform_deprecated.stop_transform', opts), - update_transform: lazyLoad('data_frame_transform_deprecated.update_transform', opts), - updateTransform: lazyLoad('data_frame_transform_deprecated.update_transform', opts) - }, - delete: lazyLoad('delete', opts), - delete_by_query: lazyLoad('delete_by_query', opts), - deleteByQuery: lazyLoad('delete_by_query', opts), - delete_by_query_rethrottle: lazyLoad('delete_by_query_rethrottle', opts), - deleteByQueryRethrottle: lazyLoad('delete_by_query_rethrottle', opts), - delete_script: lazyLoad('delete_script', opts), - deleteScript: lazyLoad('delete_script', opts), - enrich: { - delete_policy: lazyLoad('enrich.delete_policy', opts), - deletePolicy: lazyLoad('enrich.delete_policy', opts), - execute_policy: lazyLoad('enrich.execute_policy', opts), - executePolicy: lazyLoad('enrich.execute_policy', opts), - get_policy: lazyLoad('enrich.get_policy', opts), - getPolicy: lazyLoad('enrich.get_policy', opts), - put_policy: lazyLoad('enrich.put_policy', opts), - putPolicy: lazyLoad('enrich.put_policy', opts), - stats: lazyLoad('enrich.stats', opts) - }, - eql: { - search: lazyLoad('eql.search', opts) - }, - exists: lazyLoad('exists', opts), - exists_source: lazyLoad('exists_source', opts), - existsSource: lazyLoad('exists_source', opts), - explain: lazyLoad('explain', opts), - field_caps: lazyLoad('field_caps', opts), - fieldCaps: lazyLoad('field_caps', opts), - get: lazyLoad('get', opts), - get_script: lazyLoad('get_script', opts), - getScript: lazyLoad('get_script', opts), - get_script_context: lazyLoad('get_script_context', opts), - getScriptContext: lazyLoad('get_script_context', opts), - get_script_languages: lazyLoad('get_script_languages', opts), - getScriptLanguages: lazyLoad('get_script_languages', opts), - get_source: lazyLoad('get_source', opts), - getSource: lazyLoad('get_source', opts), - graph: { - explore: lazyLoad('graph.explore', opts) - }, - ilm: { - delete_lifecycle: lazyLoad('ilm.delete_lifecycle', opts), - deleteLifecycle: lazyLoad('ilm.delete_lifecycle', opts), - explain_lifecycle: lazyLoad('ilm.explain_lifecycle', opts), - explainLifecycle: lazyLoad('ilm.explain_lifecycle', opts), - get_lifecycle: lazyLoad('ilm.get_lifecycle', opts), - getLifecycle: lazyLoad('ilm.get_lifecycle', opts), - get_status: lazyLoad('ilm.get_status', opts), - getStatus: lazyLoad('ilm.get_status', opts), - move_to_step: lazyLoad('ilm.move_to_step', opts), - moveToStep: lazyLoad('ilm.move_to_step', opts), - put_lifecycle: lazyLoad('ilm.put_lifecycle', opts), - putLifecycle: lazyLoad('ilm.put_lifecycle', opts), - remove_policy: lazyLoad('ilm.remove_policy', opts), - removePolicy: lazyLoad('ilm.remove_policy', opts), - retry: lazyLoad('ilm.retry', opts), - start: lazyLoad('ilm.start', opts), - stop: lazyLoad('ilm.stop', opts) - }, - index: lazyLoad('index', opts), - indices: { - analyze: lazyLoad('indices.analyze', opts), - clear_cache: lazyLoad('indices.clear_cache', opts), - clearCache: lazyLoad('indices.clear_cache', opts), - clone: lazyLoad('indices.clone', opts), - close: lazyLoad('indices.close', opts), - create: lazyLoad('indices.create', opts), - create_data_stream: lazyLoad('indices.create_data_stream', opts), - createDataStream: lazyLoad('indices.create_data_stream', opts), - delete: lazyLoad('indices.delete', opts), - delete_alias: lazyLoad('indices.delete_alias', opts), - deleteAlias: lazyLoad('indices.delete_alias', opts), - delete_data_stream: lazyLoad('indices.delete_data_stream', opts), - deleteDataStream: lazyLoad('indices.delete_data_stream', opts), - delete_index_template: lazyLoad('indices.delete_index_template', opts), - deleteIndexTemplate: lazyLoad('indices.delete_index_template', opts), - delete_template: lazyLoad('indices.delete_template', opts), - deleteTemplate: lazyLoad('indices.delete_template', opts), - exists: lazyLoad('indices.exists', opts), - exists_alias: lazyLoad('indices.exists_alias', opts), - existsAlias: lazyLoad('indices.exists_alias', opts), - exists_index_template: lazyLoad('indices.exists_index_template', opts), - existsIndexTemplate: lazyLoad('indices.exists_index_template', opts), - exists_template: lazyLoad('indices.exists_template', opts), - existsTemplate: lazyLoad('indices.exists_template', opts), - exists_type: lazyLoad('indices.exists_type', opts), - existsType: lazyLoad('indices.exists_type', opts), - flush: lazyLoad('indices.flush', opts), - forcemerge: lazyLoad('indices.forcemerge', opts), - freeze: lazyLoad('indices.freeze', opts), - get: lazyLoad('indices.get', opts), - get_alias: lazyLoad('indices.get_alias', opts), - getAlias: lazyLoad('indices.get_alias', opts), - get_data_stream: lazyLoad('indices.get_data_stream', opts), - getDataStream: lazyLoad('indices.get_data_stream', opts), - get_field_mapping: lazyLoad('indices.get_field_mapping', opts), - getFieldMapping: lazyLoad('indices.get_field_mapping', opts), - get_index_template: lazyLoad('indices.get_index_template', opts), - getIndexTemplate: lazyLoad('indices.get_index_template', opts), - get_mapping: lazyLoad('indices.get_mapping', opts), - getMapping: lazyLoad('indices.get_mapping', opts), - get_settings: lazyLoad('indices.get_settings', opts), - getSettings: lazyLoad('indices.get_settings', opts), - get_template: lazyLoad('indices.get_template', opts), - getTemplate: lazyLoad('indices.get_template', opts), - get_upgrade: lazyLoad('indices.get_upgrade', opts), - getUpgrade: lazyLoad('indices.get_upgrade', opts), - open: lazyLoad('indices.open', opts), - put_alias: lazyLoad('indices.put_alias', opts), - putAlias: lazyLoad('indices.put_alias', opts), - put_index_template: lazyLoad('indices.put_index_template', opts), - putIndexTemplate: lazyLoad('indices.put_index_template', opts), - put_mapping: lazyLoad('indices.put_mapping', opts), - putMapping: lazyLoad('indices.put_mapping', opts), - put_settings: lazyLoad('indices.put_settings', opts), - putSettings: lazyLoad('indices.put_settings', opts), - put_template: lazyLoad('indices.put_template', opts), - putTemplate: lazyLoad('indices.put_template', opts), - recovery: lazyLoad('indices.recovery', opts), - refresh: lazyLoad('indices.refresh', opts), - reload_search_analyzers: lazyLoad('indices.reload_search_analyzers', opts), - reloadSearchAnalyzers: lazyLoad('indices.reload_search_analyzers', opts), - rollover: lazyLoad('indices.rollover', opts), - segments: lazyLoad('indices.segments', opts), - shard_stores: lazyLoad('indices.shard_stores', opts), - shardStores: lazyLoad('indices.shard_stores', opts), - shrink: lazyLoad('indices.shrink', opts), - simulate_index_template: lazyLoad('indices.simulate_index_template', opts), - simulateIndexTemplate: lazyLoad('indices.simulate_index_template', opts), - simulate_template: lazyLoad('indices.simulate_template', opts), - simulateTemplate: lazyLoad('indices.simulate_template', opts), - split: lazyLoad('indices.split', opts), - stats: lazyLoad('indices.stats', opts), - unfreeze: lazyLoad('indices.unfreeze', opts), - update_aliases: lazyLoad('indices.update_aliases', opts), - updateAliases: lazyLoad('indices.update_aliases', opts), - upgrade: lazyLoad('indices.upgrade', opts), - validate_query: lazyLoad('indices.validate_query', opts), - validateQuery: lazyLoad('indices.validate_query', opts) - }, - info: lazyLoad('info', opts), - ingest: { - delete_pipeline: lazyLoad('ingest.delete_pipeline', opts), - deletePipeline: lazyLoad('ingest.delete_pipeline', opts), - get_pipeline: lazyLoad('ingest.get_pipeline', opts), - getPipeline: lazyLoad('ingest.get_pipeline', opts), - processor_grok: lazyLoad('ingest.processor_grok', opts), - processorGrok: lazyLoad('ingest.processor_grok', opts), - put_pipeline: lazyLoad('ingest.put_pipeline', opts), - putPipeline: lazyLoad('ingest.put_pipeline', opts), - simulate: lazyLoad('ingest.simulate', opts) - }, - license: { - delete: lazyLoad('license.delete', opts), - get: lazyLoad('license.get', opts), - get_basic_status: lazyLoad('license.get_basic_status', opts), - getBasicStatus: lazyLoad('license.get_basic_status', opts), - get_trial_status: lazyLoad('license.get_trial_status', opts), - getTrialStatus: lazyLoad('license.get_trial_status', opts), - post: lazyLoad('license.post', opts), - post_start_basic: lazyLoad('license.post_start_basic', opts), - postStartBasic: lazyLoad('license.post_start_basic', opts), - post_start_trial: lazyLoad('license.post_start_trial', opts), - postStartTrial: lazyLoad('license.post_start_trial', opts) - }, - mget: lazyLoad('mget', opts), - migration: { - deprecations: lazyLoad('migration.deprecations', opts) - }, - ml: { - close_job: lazyLoad('ml.close_job', opts), - closeJob: lazyLoad('ml.close_job', opts), - delete_calendar: lazyLoad('ml.delete_calendar', opts), - deleteCalendar: lazyLoad('ml.delete_calendar', opts), - delete_calendar_event: lazyLoad('ml.delete_calendar_event', opts), - deleteCalendarEvent: lazyLoad('ml.delete_calendar_event', opts), - delete_calendar_job: lazyLoad('ml.delete_calendar_job', opts), - deleteCalendarJob: lazyLoad('ml.delete_calendar_job', opts), - delete_data_frame_analytics: lazyLoad('ml.delete_data_frame_analytics', opts), - deleteDataFrameAnalytics: lazyLoad('ml.delete_data_frame_analytics', opts), - delete_datafeed: lazyLoad('ml.delete_datafeed', opts), - deleteDatafeed: lazyLoad('ml.delete_datafeed', opts), - delete_expired_data: lazyLoad('ml.delete_expired_data', opts), - deleteExpiredData: lazyLoad('ml.delete_expired_data', opts), - delete_filter: lazyLoad('ml.delete_filter', opts), - deleteFilter: lazyLoad('ml.delete_filter', opts), - delete_forecast: lazyLoad('ml.delete_forecast', opts), - deleteForecast: lazyLoad('ml.delete_forecast', opts), - delete_job: lazyLoad('ml.delete_job', opts), - deleteJob: lazyLoad('ml.delete_job', opts), - delete_model_snapshot: lazyLoad('ml.delete_model_snapshot', opts), - deleteModelSnapshot: lazyLoad('ml.delete_model_snapshot', opts), - delete_trained_model: lazyLoad('ml.delete_trained_model', opts), - deleteTrainedModel: lazyLoad('ml.delete_trained_model', opts), - estimate_model_memory: lazyLoad('ml.estimate_model_memory', opts), - estimateModelMemory: lazyLoad('ml.estimate_model_memory', opts), - evaluate_data_frame: lazyLoad('ml.evaluate_data_frame', opts), - evaluateDataFrame: lazyLoad('ml.evaluate_data_frame', opts), - explain_data_frame_analytics: lazyLoad('ml.explain_data_frame_analytics', opts), - explainDataFrameAnalytics: lazyLoad('ml.explain_data_frame_analytics', opts), - find_file_structure: lazyLoad('ml.find_file_structure', opts), - findFileStructure: lazyLoad('ml.find_file_structure', opts), - flush_job: lazyLoad('ml.flush_job', opts), - flushJob: lazyLoad('ml.flush_job', opts), - forecast: lazyLoad('ml.forecast', opts), - get_buckets: lazyLoad('ml.get_buckets', opts), - getBuckets: lazyLoad('ml.get_buckets', opts), - get_calendar_events: lazyLoad('ml.get_calendar_events', opts), - getCalendarEvents: lazyLoad('ml.get_calendar_events', opts), - get_calendars: lazyLoad('ml.get_calendars', opts), - getCalendars: lazyLoad('ml.get_calendars', opts), - get_categories: lazyLoad('ml.get_categories', opts), - getCategories: lazyLoad('ml.get_categories', opts), - get_data_frame_analytics: lazyLoad('ml.get_data_frame_analytics', opts), - getDataFrameAnalytics: lazyLoad('ml.get_data_frame_analytics', opts), - get_data_frame_analytics_stats: lazyLoad('ml.get_data_frame_analytics_stats', opts), - getDataFrameAnalyticsStats: lazyLoad('ml.get_data_frame_analytics_stats', opts), - get_datafeed_stats: lazyLoad('ml.get_datafeed_stats', opts), - getDatafeedStats: lazyLoad('ml.get_datafeed_stats', opts), - get_datafeeds: lazyLoad('ml.get_datafeeds', opts), - getDatafeeds: lazyLoad('ml.get_datafeeds', opts), - get_filters: lazyLoad('ml.get_filters', opts), - getFilters: lazyLoad('ml.get_filters', opts), - get_influencers: lazyLoad('ml.get_influencers', opts), - getInfluencers: lazyLoad('ml.get_influencers', opts), - get_job_stats: lazyLoad('ml.get_job_stats', opts), - getJobStats: lazyLoad('ml.get_job_stats', opts), - get_jobs: lazyLoad('ml.get_jobs', opts), - getJobs: lazyLoad('ml.get_jobs', opts), - get_model_snapshots: lazyLoad('ml.get_model_snapshots', opts), - getModelSnapshots: lazyLoad('ml.get_model_snapshots', opts), - get_overall_buckets: lazyLoad('ml.get_overall_buckets', opts), - getOverallBuckets: lazyLoad('ml.get_overall_buckets', opts), - get_records: lazyLoad('ml.get_records', opts), - getRecords: lazyLoad('ml.get_records', opts), - get_trained_models: lazyLoad('ml.get_trained_models', opts), - getTrainedModels: lazyLoad('ml.get_trained_models', opts), - get_trained_models_stats: lazyLoad('ml.get_trained_models_stats', opts), - getTrainedModelsStats: lazyLoad('ml.get_trained_models_stats', opts), - info: lazyLoad('ml.info', opts), - open_job: lazyLoad('ml.open_job', opts), - openJob: lazyLoad('ml.open_job', opts), - post_calendar_events: lazyLoad('ml.post_calendar_events', opts), - postCalendarEvents: lazyLoad('ml.post_calendar_events', opts), - post_data: lazyLoad('ml.post_data', opts), - postData: lazyLoad('ml.post_data', opts), - preview_datafeed: lazyLoad('ml.preview_datafeed', opts), - previewDatafeed: lazyLoad('ml.preview_datafeed', opts), - put_calendar: lazyLoad('ml.put_calendar', opts), - putCalendar: lazyLoad('ml.put_calendar', opts), - put_calendar_job: lazyLoad('ml.put_calendar_job', opts), - putCalendarJob: lazyLoad('ml.put_calendar_job', opts), - put_data_frame_analytics: lazyLoad('ml.put_data_frame_analytics', opts), - putDataFrameAnalytics: lazyLoad('ml.put_data_frame_analytics', opts), - put_datafeed: lazyLoad('ml.put_datafeed', opts), - putDatafeed: lazyLoad('ml.put_datafeed', opts), - put_filter: lazyLoad('ml.put_filter', opts), - putFilter: lazyLoad('ml.put_filter', opts), - put_job: lazyLoad('ml.put_job', opts), - putJob: lazyLoad('ml.put_job', opts), - put_trained_model: lazyLoad('ml.put_trained_model', opts), - putTrainedModel: lazyLoad('ml.put_trained_model', opts), - revert_model_snapshot: lazyLoad('ml.revert_model_snapshot', opts), - revertModelSnapshot: lazyLoad('ml.revert_model_snapshot', opts), - set_upgrade_mode: lazyLoad('ml.set_upgrade_mode', opts), - setUpgradeMode: lazyLoad('ml.set_upgrade_mode', opts), - start_data_frame_analytics: lazyLoad('ml.start_data_frame_analytics', opts), - startDataFrameAnalytics: lazyLoad('ml.start_data_frame_analytics', opts), - start_datafeed: lazyLoad('ml.start_datafeed', opts), - startDatafeed: lazyLoad('ml.start_datafeed', opts), - stop_data_frame_analytics: lazyLoad('ml.stop_data_frame_analytics', opts), - stopDataFrameAnalytics: lazyLoad('ml.stop_data_frame_analytics', opts), - stop_datafeed: lazyLoad('ml.stop_datafeed', opts), - stopDatafeed: lazyLoad('ml.stop_datafeed', opts), - update_datafeed: lazyLoad('ml.update_datafeed', opts), - updateDatafeed: lazyLoad('ml.update_datafeed', opts), - update_filter: lazyLoad('ml.update_filter', opts), - updateFilter: lazyLoad('ml.update_filter', opts), - update_job: lazyLoad('ml.update_job', opts), - updateJob: lazyLoad('ml.update_job', opts), - update_model_snapshot: lazyLoad('ml.update_model_snapshot', opts), - updateModelSnapshot: lazyLoad('ml.update_model_snapshot', opts), - validate: lazyLoad('ml.validate', opts), - validate_detector: lazyLoad('ml.validate_detector', opts), - validateDetector: lazyLoad('ml.validate_detector', opts) - }, - monitoring: { - bulk: lazyLoad('monitoring.bulk', opts) - }, - msearch: lazyLoad('msearch', opts), - msearch_template: lazyLoad('msearch_template', opts), - msearchTemplate: lazyLoad('msearch_template', opts), - mtermvectors: lazyLoad('mtermvectors', opts), - nodes: { - hot_threads: lazyLoad('nodes.hot_threads', opts), - hotThreads: lazyLoad('nodes.hot_threads', opts), - info: lazyLoad('nodes.info', opts), - reload_secure_settings: lazyLoad('nodes.reload_secure_settings', opts), - reloadSecureSettings: lazyLoad('nodes.reload_secure_settings', opts), - stats: lazyLoad('nodes.stats', opts), - usage: lazyLoad('nodes.usage', opts) - }, - ping: lazyLoad('ping', opts), - put_script: lazyLoad('put_script', opts), - putScript: lazyLoad('put_script', opts), - rank_eval: lazyLoad('rank_eval', opts), - rankEval: lazyLoad('rank_eval', opts), - reindex: lazyLoad('reindex', opts), - reindex_rethrottle: lazyLoad('reindex_rethrottle', opts), - reindexRethrottle: lazyLoad('reindex_rethrottle', opts), - render_search_template: lazyLoad('render_search_template', opts), - renderSearchTemplate: lazyLoad('render_search_template', opts), - rollup: { - delete_job: lazyLoad('rollup.delete_job', opts), - deleteJob: lazyLoad('rollup.delete_job', opts), - get_jobs: lazyLoad('rollup.get_jobs', opts), - getJobs: lazyLoad('rollup.get_jobs', opts), - get_rollup_caps: lazyLoad('rollup.get_rollup_caps', opts), - getRollupCaps: lazyLoad('rollup.get_rollup_caps', opts), - get_rollup_index_caps: lazyLoad('rollup.get_rollup_index_caps', opts), - getRollupIndexCaps: lazyLoad('rollup.get_rollup_index_caps', opts), - put_job: lazyLoad('rollup.put_job', opts), - putJob: lazyLoad('rollup.put_job', opts), - rollup_search: lazyLoad('rollup.rollup_search', opts), - rollupSearch: lazyLoad('rollup.rollup_search', opts), - start_job: lazyLoad('rollup.start_job', opts), - startJob: lazyLoad('rollup.start_job', opts), - stop_job: lazyLoad('rollup.stop_job', opts), - stopJob: lazyLoad('rollup.stop_job', opts) - }, - scripts_painless_execute: lazyLoad('scripts_painless_execute', opts), - scriptsPainlessExecute: lazyLoad('scripts_painless_execute', opts), - scroll: lazyLoad('scroll', opts), - search: lazyLoad('search', opts), - search_shards: lazyLoad('search_shards', opts), - searchShards: lazyLoad('search_shards', opts), - search_template: lazyLoad('search_template', opts), - searchTemplate: lazyLoad('search_template', opts), - searchable_snapshots: { - clear_cache: lazyLoad('searchable_snapshots.clear_cache', opts), - clearCache: lazyLoad('searchable_snapshots.clear_cache', opts), - mount: lazyLoad('searchable_snapshots.mount', opts), - repository_stats: lazyLoad('searchable_snapshots.repository_stats', opts), - repositoryStats: lazyLoad('searchable_snapshots.repository_stats', opts), - stats: lazyLoad('searchable_snapshots.stats', opts) - }, - searchableSnapshots: { - clear_cache: lazyLoad('searchable_snapshots.clear_cache', opts), - clearCache: lazyLoad('searchable_snapshots.clear_cache', opts), - mount: lazyLoad('searchable_snapshots.mount', opts), - repository_stats: lazyLoad('searchable_snapshots.repository_stats', opts), - repositoryStats: lazyLoad('searchable_snapshots.repository_stats', opts), - stats: lazyLoad('searchable_snapshots.stats', opts) - }, - security: { - authenticate: lazyLoad('security.authenticate', opts), - change_password: lazyLoad('security.change_password', opts), - changePassword: lazyLoad('security.change_password', opts), - clear_cached_realms: lazyLoad('security.clear_cached_realms', opts), - clearCachedRealms: lazyLoad('security.clear_cached_realms', opts), - clear_cached_roles: lazyLoad('security.clear_cached_roles', opts), - clearCachedRoles: lazyLoad('security.clear_cached_roles', opts), - create_api_key: lazyLoad('security.create_api_key', opts), - createApiKey: lazyLoad('security.create_api_key', opts), - delete_privileges: lazyLoad('security.delete_privileges', opts), - deletePrivileges: lazyLoad('security.delete_privileges', opts), - delete_role: lazyLoad('security.delete_role', opts), - deleteRole: lazyLoad('security.delete_role', opts), - delete_role_mapping: lazyLoad('security.delete_role_mapping', opts), - deleteRoleMapping: lazyLoad('security.delete_role_mapping', opts), - delete_user: lazyLoad('security.delete_user', opts), - deleteUser: lazyLoad('security.delete_user', opts), - disable_user: lazyLoad('security.disable_user', opts), - disableUser: lazyLoad('security.disable_user', opts), - enable_user: lazyLoad('security.enable_user', opts), - enableUser: lazyLoad('security.enable_user', opts), - get_api_key: lazyLoad('security.get_api_key', opts), - getApiKey: lazyLoad('security.get_api_key', opts), - get_builtin_privileges: lazyLoad('security.get_builtin_privileges', opts), - getBuiltinPrivileges: lazyLoad('security.get_builtin_privileges', opts), - get_privileges: lazyLoad('security.get_privileges', opts), - getPrivileges: lazyLoad('security.get_privileges', opts), - get_role: lazyLoad('security.get_role', opts), - getRole: lazyLoad('security.get_role', opts), - get_role_mapping: lazyLoad('security.get_role_mapping', opts), - getRoleMapping: lazyLoad('security.get_role_mapping', opts), - get_token: lazyLoad('security.get_token', opts), - getToken: lazyLoad('security.get_token', opts), - get_user: lazyLoad('security.get_user', opts), - getUser: lazyLoad('security.get_user', opts), - get_user_privileges: lazyLoad('security.get_user_privileges', opts), - getUserPrivileges: lazyLoad('security.get_user_privileges', opts), - has_privileges: lazyLoad('security.has_privileges', opts), - hasPrivileges: lazyLoad('security.has_privileges', opts), - invalidate_api_key: lazyLoad('security.invalidate_api_key', opts), - invalidateApiKey: lazyLoad('security.invalidate_api_key', opts), - invalidate_token: lazyLoad('security.invalidate_token', opts), - invalidateToken: lazyLoad('security.invalidate_token', opts), - put_privileges: lazyLoad('security.put_privileges', opts), - putPrivileges: lazyLoad('security.put_privileges', opts), - put_role: lazyLoad('security.put_role', opts), - putRole: lazyLoad('security.put_role', opts), - put_role_mapping: lazyLoad('security.put_role_mapping', opts), - putRoleMapping: lazyLoad('security.put_role_mapping', opts), - put_user: lazyLoad('security.put_user', opts), - putUser: lazyLoad('security.put_user', opts) - }, - slm: { - delete_lifecycle: lazyLoad('slm.delete_lifecycle', opts), - deleteLifecycle: lazyLoad('slm.delete_lifecycle', opts), - execute_lifecycle: lazyLoad('slm.execute_lifecycle', opts), - executeLifecycle: lazyLoad('slm.execute_lifecycle', opts), - execute_retention: lazyLoad('slm.execute_retention', opts), - executeRetention: lazyLoad('slm.execute_retention', opts), - get_lifecycle: lazyLoad('slm.get_lifecycle', opts), - getLifecycle: lazyLoad('slm.get_lifecycle', opts), - get_stats: lazyLoad('slm.get_stats', opts), - getStats: lazyLoad('slm.get_stats', opts), - get_status: lazyLoad('slm.get_status', opts), - getStatus: lazyLoad('slm.get_status', opts), - put_lifecycle: lazyLoad('slm.put_lifecycle', opts), - putLifecycle: lazyLoad('slm.put_lifecycle', opts), - start: lazyLoad('slm.start', opts), - stop: lazyLoad('slm.stop', opts) - }, - snapshot: { - cleanup_repository: lazyLoad('snapshot.cleanup_repository', opts), - cleanupRepository: lazyLoad('snapshot.cleanup_repository', opts), - create: lazyLoad('snapshot.create', opts), - create_repository: lazyLoad('snapshot.create_repository', opts), - createRepository: lazyLoad('snapshot.create_repository', opts), - delete: lazyLoad('snapshot.delete', opts), - delete_repository: lazyLoad('snapshot.delete_repository', opts), - deleteRepository: lazyLoad('snapshot.delete_repository', opts), - get: lazyLoad('snapshot.get', opts), - get_repository: lazyLoad('snapshot.get_repository', opts), - getRepository: lazyLoad('snapshot.get_repository', opts), - restore: lazyLoad('snapshot.restore', opts), - status: lazyLoad('snapshot.status', opts), - verify_repository: lazyLoad('snapshot.verify_repository', opts), - verifyRepository: lazyLoad('snapshot.verify_repository', opts) - }, - sql: { - clear_cursor: lazyLoad('sql.clear_cursor', opts), - clearCursor: lazyLoad('sql.clear_cursor', opts), - query: lazyLoad('sql.query', opts), - translate: lazyLoad('sql.translate', opts) - }, - ssl: { - certificates: lazyLoad('ssl.certificates', opts) - }, - tasks: { - cancel: lazyLoad('tasks.cancel', opts), - get: lazyLoad('tasks.get', opts), - list: lazyLoad('tasks.list', opts) - }, - termvectors: lazyLoad('termvectors', opts), - transform: { - delete_transform: lazyLoad('transform.delete_transform', opts), - deleteTransform: lazyLoad('transform.delete_transform', opts), - get_transform: lazyLoad('transform.get_transform', opts), - getTransform: lazyLoad('transform.get_transform', opts), - get_transform_stats: lazyLoad('transform.get_transform_stats', opts), - getTransformStats: lazyLoad('transform.get_transform_stats', opts), - preview_transform: lazyLoad('transform.preview_transform', opts), - previewTransform: lazyLoad('transform.preview_transform', opts), - put_transform: lazyLoad('transform.put_transform', opts), - putTransform: lazyLoad('transform.put_transform', opts), - start_transform: lazyLoad('transform.start_transform', opts), - startTransform: lazyLoad('transform.start_transform', opts), - stop_transform: lazyLoad('transform.stop_transform', opts), - stopTransform: lazyLoad('transform.stop_transform', opts), - update_transform: lazyLoad('transform.update_transform', opts), - updateTransform: lazyLoad('transform.update_transform', opts) - }, - update: lazyLoad('update', opts), - update_by_query: lazyLoad('update_by_query', opts), - updateByQuery: lazyLoad('update_by_query', opts), - update_by_query_rethrottle: lazyLoad('update_by_query_rethrottle', opts), - updateByQueryRethrottle: lazyLoad('update_by_query_rethrottle', opts), - watcher: { - ack_watch: lazyLoad('watcher.ack_watch', opts), - ackWatch: lazyLoad('watcher.ack_watch', opts), - activate_watch: lazyLoad('watcher.activate_watch', opts), - activateWatch: lazyLoad('watcher.activate_watch', opts), - deactivate_watch: lazyLoad('watcher.deactivate_watch', opts), - deactivateWatch: lazyLoad('watcher.deactivate_watch', opts), - delete_watch: lazyLoad('watcher.delete_watch', opts), - deleteWatch: lazyLoad('watcher.delete_watch', opts), - execute_watch: lazyLoad('watcher.execute_watch', opts), - executeWatch: lazyLoad('watcher.execute_watch', opts), - get_watch: lazyLoad('watcher.get_watch', opts), - getWatch: lazyLoad('watcher.get_watch', opts), - put_watch: lazyLoad('watcher.put_watch', opts), - putWatch: lazyLoad('watcher.put_watch', opts), - start: lazyLoad('watcher.start', opts), - stats: lazyLoad('watcher.stats', opts), - stop: lazyLoad('watcher.stop', opts) - }, - xpack: { - info: lazyLoad('xpack.info', opts), - usage: lazyLoad('xpack.usage', opts) - } - } - - return apis - - function handleError (err, callback) { - if (callback) return callback(err, result) - return Promise.reject(err) - } - - function snakeCaseKeys (acceptedQuerystring, snakeCase, querystring, warnings) { - var target = {} - var keys = Object.keys(querystring) - for (var i = 0, len = keys.length; i < len; i++) { - var key = keys[i] - target[snakeCase[key] || key] = querystring[key] - if (acceptedQuerystring.indexOf(snakeCase[key] || key) === -1) { - warnings.push('Client - Unknown parameter: "' + key + '", sending it as query parameter') - } - } - return target - } -} - -// It's unlikely that a user needs all of our APIs, -// and since require is a sync operation that takes time -// (given the amount of APIs we have), let's lazy load them, -// so a given API file will be required only -// if the user actually needs that API. -// The following implementation takes advantage -// of js closures to have a simple cache with the least overhead. -function lazyLoad (file, opts) { - var fn = null - return function _lazyLoad (params, options, callback) { - if (fn === null) { - fn = require(`./api/${file}.js`)(opts) - } - return fn(params, options, callback) - } -} - -module.exports = ESAPI diff --git a/api/requestParams.d.ts b/api/requestParams.d.ts deleted file mode 100644 index 7e2ad41a6..000000000 --- a/api/requestParams.d.ts +++ /dev/null @@ -1,2525 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -import { RequestBody, RequestNDBody } from '../lib/Transport' - -export interface Generic { - method?: string; - ignore?: number | number[]; - filter_path?: string | string[]; - pretty?: boolean; - human?: boolean; - error_trace?: boolean; - source?: string; -} - -export interface Bulk extends Generic { - index?: string; - type?: string; - _source_exclude?: string | string[]; - _source_include?: string | string[]; - wait_for_active_shards?: string; - refresh?: 'true' | 'false' | 'wait_for'; - routing?: string; - timeout?: string; - _source?: string | string[]; - _source_excludes?: string | string[]; - _source_includes?: string | string[]; - pipeline?: string; - body: T; -} - -export interface CatAliases extends Generic { - name?: string | string[]; - format?: string; - local?: boolean; - h?: string | string[]; - help?: boolean; - s?: string | string[]; - v?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; -} - -export interface CatAllocation extends Generic { - node_id?: string | string[]; - format?: string; - bytes?: 'b' | 'k' | 'kb' | 'm' | 'mb' | 'g' | 'gb' | 't' | 'tb' | 'p' | 'pb'; - local?: boolean; - master_timeout?: string; - h?: string | string[]; - help?: boolean; - s?: string | string[]; - v?: boolean; -} - -export interface CatCount extends Generic { - index?: string | string[]; - format?: string; - h?: string | string[]; - help?: boolean; - s?: string | string[]; - v?: boolean; -} - -export interface CatFielddata extends Generic { - fields?: string | string[]; - format?: string; - bytes?: 'b' | 'k' | 'kb' | 'm' | 'mb' | 'g' | 'gb' | 't' | 'tb' | 'p' | 'pb'; - h?: string | string[]; - help?: boolean; - s?: string | string[]; - v?: boolean; -} - -export interface CatHealth extends Generic { - format?: string; - h?: string | string[]; - help?: boolean; - s?: string | string[]; - time?: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos'; - ts?: boolean; - v?: boolean; -} - -export interface CatHelp extends Generic { - help?: boolean; - s?: string | string[]; -} - -export interface CatIndices extends Generic { - index?: string | string[]; - format?: string; - bytes?: 'b' | 'k' | 'kb' | 'm' | 'mb' | 'g' | 'gb' | 't' | 'tb' | 'p' | 'pb'; - local?: boolean; - master_timeout?: string; - h?: string | string[]; - health?: 'green' | 'yellow' | 'red'; - help?: boolean; - pri?: boolean; - s?: string | string[]; - time?: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos'; - v?: boolean; - include_unloaded_segments?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; -} - -export interface CatMaster extends Generic { - format?: string; - local?: boolean; - master_timeout?: string; - h?: string | string[]; - help?: boolean; - s?: string | string[]; - v?: boolean; -} - -export interface CatNodeattrs extends Generic { - format?: string; - local?: boolean; - master_timeout?: string; - h?: string | string[]; - help?: boolean; - s?: string | string[]; - v?: boolean; -} - -export interface CatNodes extends Generic { - bytes?: 'b' | 'k' | 'kb' | 'm' | 'mb' | 'g' | 'gb' | 't' | 'tb' | 'p' | 'pb'; - format?: string; - full_id?: boolean; - master_timeout?: string; - h?: string | string[]; - help?: boolean; - s?: string | string[]; - time?: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos'; - v?: boolean; -} - -export interface CatPendingTasks extends Generic { - format?: string; - local?: boolean; - master_timeout?: string; - h?: string | string[]; - help?: boolean; - s?: string | string[]; - time?: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos'; - v?: boolean; -} - -export interface CatPlugins extends Generic { - format?: string; - local?: boolean; - master_timeout?: string; - h?: string | string[]; - help?: boolean; - s?: string | string[]; - v?: boolean; -} - -export interface CatRecovery extends Generic { - index?: string | string[]; - format?: string; - active_only?: boolean; - bytes?: 'b' | 'k' | 'kb' | 'm' | 'mb' | 'g' | 'gb' | 't' | 'tb' | 'p' | 'pb'; - detailed?: boolean; - h?: string | string[]; - help?: boolean; - s?: string | string[]; - time?: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos'; - v?: boolean; -} - -export interface CatRepositories extends Generic { - format?: string; - local?: boolean; - master_timeout?: string; - h?: string | string[]; - help?: boolean; - s?: string | string[]; - v?: boolean; -} - -export interface CatSegments extends Generic { - index?: string | string[]; - format?: string; - bytes?: 'b' | 'k' | 'kb' | 'm' | 'mb' | 'g' | 'gb' | 't' | 'tb' | 'p' | 'pb'; - h?: string | string[]; - help?: boolean; - s?: string | string[]; - v?: boolean; -} - -export interface CatShards extends Generic { - index?: string | string[]; - format?: string; - bytes?: 'b' | 'k' | 'kb' | 'm' | 'mb' | 'g' | 'gb' | 't' | 'tb' | 'p' | 'pb'; - local?: boolean; - master_timeout?: string; - h?: string | string[]; - help?: boolean; - s?: string | string[]; - time?: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos'; - v?: boolean; -} - -export interface CatSnapshots extends Generic { - repository?: string | string[]; - format?: string; - ignore_unavailable?: boolean; - master_timeout?: string; - h?: string | string[]; - help?: boolean; - s?: string | string[]; - time?: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos'; - v?: boolean; -} - -export interface CatTasks extends Generic { - format?: string; - node_id?: string | string[]; - actions?: string | string[]; - detailed?: boolean; - parent_task?: number; - h?: string | string[]; - help?: boolean; - s?: string | string[]; - time?: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos'; - v?: boolean; -} - -export interface CatTemplates extends Generic { - name?: string; - format?: string; - local?: boolean; - master_timeout?: string; - h?: string | string[]; - help?: boolean; - s?: string | string[]; - v?: boolean; -} - -export interface CatThreadPool extends Generic { - thread_pool_patterns?: string | string[]; - format?: string; - time?: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos'; - local?: boolean; - master_timeout?: string; - h?: string | string[]; - help?: boolean; - s?: string | string[]; - v?: boolean; -} - -export interface ClearScroll extends Generic { - scroll_id?: string | string[]; - body?: T; -} - -export interface ClusterAllocationExplain extends Generic { - include_yes_decisions?: boolean; - include_disk_info?: boolean; - body?: T; -} - -export interface ClusterDeleteComponentTemplate extends Generic { - name: string; - timeout?: string; - master_timeout?: string; -} - -export interface ClusterDeleteVotingConfigExclusions extends Generic { - wait_for_removal?: boolean; -} - -export interface ClusterExistsComponentTemplate extends Generic { - name: string; - master_timeout?: string; - local?: boolean; -} - -export interface ClusterGetComponentTemplate extends Generic { - name?: string | string[]; - master_timeout?: string; - local?: boolean; -} - -export interface ClusterGetSettings extends Generic { - flat_settings?: boolean; - master_timeout?: string; - timeout?: string; - include_defaults?: boolean; -} - -export interface ClusterHealth extends Generic { - index?: string | string[]; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - level?: 'cluster' | 'indices' | 'shards'; - local?: boolean; - master_timeout?: string; - timeout?: string; - wait_for_active_shards?: string; - wait_for_nodes?: string; - wait_for_events?: 'immediate' | 'urgent' | 'high' | 'normal' | 'low' | 'languid'; - wait_for_no_relocating_shards?: boolean; - wait_for_no_initializing_shards?: boolean; - wait_for_status?: 'green' | 'yellow' | 'red'; -} - -export interface ClusterPendingTasks extends Generic { - local?: boolean; - master_timeout?: string; -} - -export interface ClusterPostVotingConfigExclusions extends Generic { - node_ids?: string; - node_names?: string; - timeout?: string; -} - -export interface ClusterPutComponentTemplate extends Generic { - name: string; - create?: boolean; - timeout?: string; - master_timeout?: string; - body: T; -} - -export interface ClusterPutSettings extends Generic { - flat_settings?: boolean; - master_timeout?: string; - timeout?: string; - body: T; -} - -export interface ClusterRemoteInfo extends Generic { -} - -export interface ClusterReroute extends Generic { - dry_run?: boolean; - explain?: boolean; - retry_failed?: boolean; - metric?: string | string[]; - master_timeout?: string; - timeout?: string; - body?: T; -} - -export interface ClusterState extends Generic { - index?: string | string[]; - metric?: string | string[]; - local?: boolean; - master_timeout?: string; - flat_settings?: boolean; - wait_for_metadata_version?: number; - wait_for_timeout?: string; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; -} - -export interface ClusterStats extends Generic { - node_id?: string | string[]; - flat_settings?: boolean; - timeout?: string; -} - -export interface Count extends Generic { - index?: string | string[]; - ignore_unavailable?: boolean; - ignore_throttled?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - min_score?: number; - preference?: string; - routing?: string | string[]; - q?: string; - analyzer?: string; - analyze_wildcard?: boolean; - default_operator?: 'AND' | 'OR'; - df?: string; - lenient?: boolean; - terminate_after?: number; - body?: T; -} - -export interface Create extends Generic { - id: string; - index: string; - type?: string; - wait_for_active_shards?: string; - refresh?: 'true' | 'false' | 'wait_for'; - routing?: string; - timeout?: string; - version?: number; - version_type?: 'internal' | 'external' | 'external_gte'; - pipeline?: string; - body: T; -} - -export interface Delete extends Generic { - id: string; - index: string; - type?: string; - wait_for_active_shards?: string; - refresh?: 'true' | 'false' | 'wait_for'; - routing?: string; - timeout?: string; - if_seq_no?: number; - if_primary_term?: number; - version?: number; - version_type?: 'internal' | 'external' | 'external_gte'; -} - -export interface DeleteByQuery extends Generic { - index: string | string[]; - _source_exclude?: string | string[]; - _source_include?: string | string[]; - analyzer?: string; - analyze_wildcard?: boolean; - default_operator?: 'AND' | 'OR'; - df?: string; - from?: number; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - conflicts?: 'abort' | 'proceed'; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - lenient?: boolean; - preference?: string; - q?: string; - routing?: string | string[]; - scroll?: string; - search_type?: 'query_then_fetch' | 'dfs_query_then_fetch'; - search_timeout?: string; - max_docs?: number; - sort?: string | string[]; - _source?: string | string[]; - _source_excludes?: string | string[]; - _source_includes?: string | string[]; - terminate_after?: number; - stats?: string | string[]; - version?: boolean; - request_cache?: boolean; - refresh?: boolean; - timeout?: string; - wait_for_active_shards?: string; - scroll_size?: number; - wait_for_completion?: boolean; - requests_per_second?: number; - slices?: number|string; - body: T; -} - -export interface DeleteByQueryRethrottle extends Generic { - task_id: string; - requests_per_second: number; -} - -export interface DeleteScript extends Generic { - id: string; - timeout?: string; - master_timeout?: string; -} - -export interface Exists extends Generic { - id: string; - index: string; - _source_exclude?: string | string[]; - _source_include?: string | string[]; - stored_fields?: string | string[]; - preference?: string; - realtime?: boolean; - refresh?: boolean; - routing?: string; - _source?: string | string[]; - _source_excludes?: string | string[]; - _source_includes?: string | string[]; - version?: number; - version_type?: 'internal' | 'external' | 'external_gte'; -} - -export interface ExistsSource extends Generic { - id: string; - index: string; - type?: string; - _source_exclude?: string | string[]; - _source_include?: string | string[]; - preference?: string; - realtime?: boolean; - refresh?: boolean; - routing?: string; - _source?: string | string[]; - _source_excludes?: string | string[]; - _source_includes?: string | string[]; - version?: number; - version_type?: 'internal' | 'external' | 'external_gte'; -} - -export interface Explain extends Generic { - id: string; - index: string; - _source_exclude?: string | string[]; - _source_include?: string | string[]; - analyze_wildcard?: boolean; - analyzer?: string; - default_operator?: 'AND' | 'OR'; - df?: string; - stored_fields?: string | string[]; - lenient?: boolean; - preference?: string; - q?: string; - routing?: string; - _source?: string | string[]; - _source_excludes?: string | string[]; - _source_includes?: string | string[]; - body?: T; -} - -export interface FieldCaps extends Generic { - index?: string | string[]; - fields?: string | string[]; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - include_unmapped?: boolean; -} - -export interface Get extends Generic { - id: string; - index: string; - _source_exclude?: string | string[]; - _source_include?: string | string[]; - stored_fields?: string | string[]; - preference?: string; - realtime?: boolean; - refresh?: boolean; - routing?: string; - _source?: string | string[]; - _source_excludes?: string | string[]; - _source_includes?: string | string[]; - version?: number; - version_type?: 'internal' | 'external' | 'external_gte'; -} - -export interface GetScript extends Generic { - id: string; - master_timeout?: string; -} - -export interface GetScriptContext extends Generic { -} - -export interface GetScriptLanguages extends Generic { -} - -export interface GetSource extends Generic { - id: string; - index: string; - _source_exclude?: string | string[]; - _source_include?: string | string[]; - preference?: string; - realtime?: boolean; - refresh?: boolean; - routing?: string; - _source?: string | string[]; - _source_excludes?: string | string[]; - _source_includes?: string | string[]; - version?: number; - version_type?: 'internal' | 'external' | 'external_gte'; -} - -export interface Index extends Generic { - id?: string; - index: string; - wait_for_active_shards?: string; - op_type?: 'index' | 'create'; - refresh?: 'true' | 'false' | 'wait_for'; - routing?: string; - timeout?: string; - version?: number; - version_type?: 'internal' | 'external' | 'external_gte'; - if_seq_no?: number; - if_primary_term?: number; - pipeline?: string; - body: T; -} - -export interface IndicesAnalyze extends Generic { - index?: string; - body?: T; -} - -export interface IndicesClearCache extends Generic { - index?: string | string[]; - fielddata?: boolean; - fields?: string | string[]; - query?: boolean; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - request?: boolean; -} - -export interface IndicesClone extends Generic { - index: string; - target: string; - timeout?: string; - master_timeout?: string; - wait_for_active_shards?: string; - body?: T; -} - -export interface IndicesClose extends Generic { - index: string | string[]; - timeout?: string; - master_timeout?: string; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - wait_for_active_shards?: string; -} - -export interface IndicesCreate extends Generic { - index: string; - wait_for_active_shards?: string; - timeout?: string; - master_timeout?: string; - body?: T; -} - -export interface IndicesCreateDataStream extends Generic { - name: string; - body?: T; -} - -export interface IndicesDelete extends Generic { - index: string | string[]; - timeout?: string; - master_timeout?: string; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; -} - -export interface IndicesDeleteAlias extends Generic { - index: string | string[]; - name: string | string[]; - timeout?: string; - master_timeout?: string; -} - -export interface IndicesDeleteDataStream extends Generic { - name: string; -} - -export interface IndicesDeleteIndexTemplate extends Generic { - name: string; - timeout?: string; - master_timeout?: string; -} - -export interface IndicesDeleteTemplate extends Generic { - name: string; - timeout?: string; - master_timeout?: string; -} - -export interface IndicesExists extends Generic { - index: string | string[]; - local?: boolean; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - flat_settings?: boolean; - include_defaults?: boolean; -} - -export interface IndicesExistsAlias extends Generic { - name: string | string[]; - index?: string | string[]; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - local?: boolean; -} - -export interface IndicesExistsIndexTemplate extends Generic { - name: string; - flat_settings?: boolean; - master_timeout?: string; - local?: boolean; -} - -export interface IndicesExistsTemplate extends Generic { - name: string | string[]; - flat_settings?: boolean; - master_timeout?: string; - local?: boolean; -} - -export interface IndicesExistsType extends Generic { - index: string | string[]; - type: string | string[]; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - local?: boolean; -} - -export interface IndicesFlush extends Generic { - index?: string | string[]; - force?: boolean; - wait_if_ongoing?: boolean; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; -} - -export interface IndicesForcemerge extends Generic { - index?: string | string[]; - flush?: boolean; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - max_num_segments?: number; - only_expunge_deletes?: boolean; -} - -export interface IndicesGet extends Generic { - index: string | string[]; - local?: boolean; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - flat_settings?: boolean; - include_defaults?: boolean; - master_timeout?: string; -} - -export interface IndicesGetAlias extends Generic { - name?: string | string[]; - index?: string | string[]; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - local?: boolean; -} - -export interface IndicesGetDataStream extends Generic { - name?: string; -} - -export interface IndicesGetFieldMapping extends Generic { - fields: string | string[]; - index?: string | string[]; - include_defaults?: boolean; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - local?: boolean; -} - -export interface IndicesGetIndexTemplate extends Generic { - name?: string | string[]; - flat_settings?: boolean; - master_timeout?: string; - local?: boolean; -} - -export interface IndicesGetMapping extends Generic { - index?: string | string[]; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - master_timeout?: string; - local?: boolean; -} - -export interface IndicesGetSettings extends Generic { - index?: string | string[]; - name?: string | string[]; - master_timeout?: string; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - flat_settings?: boolean; - local?: boolean; - include_defaults?: boolean; -} - -export interface IndicesGetTemplate extends Generic { - name?: string | string[]; - flat_settings?: boolean; - master_timeout?: string; - local?: boolean; -} - -export interface IndicesGetUpgrade extends Generic { - index?: string | string[]; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; -} - -export interface IndicesOpen extends Generic { - index: string | string[]; - timeout?: string; - master_timeout?: string; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - wait_for_active_shards?: string; -} - -export interface IndicesPutAlias extends Generic { - index: string | string[]; - name: string; - timeout?: string; - master_timeout?: string; - body?: T; -} - -export interface IndicesPutIndexTemplate extends Generic { - name: string; - create?: boolean; - cause?: string; - master_timeout?: string; - body: T; -} - -export interface IndicesPutMapping extends Generic { - index: string | string[]; - timeout?: string; - master_timeout?: string; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - body: T; -} - -export interface IndicesPutSettings extends Generic { - index?: string | string[]; - master_timeout?: string; - timeout?: string; - preserve_existing?: boolean; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - flat_settings?: boolean; - body: T; -} - -export interface IndicesPutTemplate extends Generic { - name: string; - order?: number; - create?: boolean; - master_timeout?: string; - body: T; -} - -export interface IndicesRecovery extends Generic { - index?: string | string[]; - detailed?: boolean; - active_only?: boolean; -} - -export interface IndicesRefresh extends Generic { - index?: string | string[]; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; -} - -export interface IndicesRollover extends Generic { - alias: string; - new_index?: string; - timeout?: string; - dry_run?: boolean; - master_timeout?: string; - wait_for_active_shards?: string; - body?: T; -} - -export interface IndicesSegments extends Generic { - index?: string | string[]; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - verbose?: boolean; -} - -export interface IndicesShardStores extends Generic { - index?: string | string[]; - status?: string | string[]; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; -} - -export interface IndicesShrink extends Generic { - index: string; - target: string; - timeout?: string; - master_timeout?: string; - wait_for_active_shards?: string; - body?: T; -} - -export interface IndicesSimulateIndexTemplate extends Generic { - name: string; - create?: boolean; - cause?: string; - master_timeout?: string; - body?: T; -} - -export interface IndicesSimulateTemplate extends Generic { - name?: string; - create?: boolean; - cause?: string; - master_timeout?: string; - body?: T; -} - -export interface IndicesSplit extends Generic { - index: string; - target: string; - timeout?: string; - master_timeout?: string; - wait_for_active_shards?: string; - body?: T; -} - -export interface IndicesStats extends Generic { - metric?: string | string[]; - index?: string | string[]; - completion_fields?: string | string[]; - fielddata_fields?: string | string[]; - fields?: string | string[]; - groups?: string | string[]; - level?: 'cluster' | 'indices' | 'shards'; - types?: string | string[]; - include_segment_file_sizes?: boolean; - include_unloaded_segments?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - forbid_closed_indices?: boolean; -} - -export interface IndicesUpdateAliases extends Generic { - timeout?: string; - master_timeout?: string; - body: T; -} - -export interface IndicesUpgrade extends Generic { - index?: string | string[]; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - ignore_unavailable?: boolean; - wait_for_completion?: boolean; - only_ancient_segments?: boolean; -} - -export interface IndicesValidateQuery extends Generic { - index?: string | string[]; - type?: string | string[]; - explain?: boolean; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - q?: string; - analyzer?: string; - analyze_wildcard?: boolean; - default_operator?: 'AND' | 'OR'; - df?: string; - lenient?: boolean; - rewrite?: boolean; - all_shards?: boolean; - body?: T; -} - -export interface Info extends Generic { -} - -export interface IngestDeletePipeline extends Generic { - id: string; - master_timeout?: string; - timeout?: string; -} - -export interface IngestGetPipeline extends Generic { - id?: string; - master_timeout?: string; -} - -export interface IngestProcessorGrok extends Generic { -} - -export interface IngestPutPipeline extends Generic { - id: string; - master_timeout?: string; - timeout?: string; - body: T; -} - -export interface IngestSimulate extends Generic { - id?: string; - verbose?: boolean; - body: T; -} - -export interface Mget extends Generic { - index?: string; - _source_exclude?: string | string[]; - _source_include?: string | string[]; - stored_fields?: string | string[]; - preference?: string; - realtime?: boolean; - refresh?: boolean; - routing?: string; - _source?: string | string[]; - _source_excludes?: string | string[]; - _source_includes?: string | string[]; - body: T; -} - -export interface Msearch extends Generic { - index?: string | string[]; - search_type?: 'query_then_fetch' | 'query_and_fetch' | 'dfs_query_then_fetch' | 'dfs_query_and_fetch'; - max_concurrent_searches?: number; - typed_keys?: boolean; - pre_filter_shard_size?: number; - max_concurrent_shard_requests?: number; - rest_total_hits_as_int?: boolean; - ccs_minimize_roundtrips?: boolean; - body: T; -} - -export interface MsearchTemplate extends Generic { - index?: string | string[]; - search_type?: 'query_then_fetch' | 'query_and_fetch' | 'dfs_query_then_fetch' | 'dfs_query_and_fetch'; - typed_keys?: boolean; - max_concurrent_searches?: number; - rest_total_hits_as_int?: boolean; - ccs_minimize_roundtrips?: boolean; - body: T; -} - -export interface Mtermvectors extends Generic { - index?: string; - ids?: string | string[]; - term_statistics?: boolean; - field_statistics?: boolean; - fields?: string | string[]; - offsets?: boolean; - positions?: boolean; - payloads?: boolean; - preference?: string; - routing?: string; - realtime?: boolean; - version?: number; - version_type?: 'internal' | 'external' | 'external_gte'; - body?: T; -} - -export interface NodesHotThreads extends Generic { - node_id?: string | string[]; - interval?: string; - snapshots?: number; - threads?: number; - ignore_idle_threads?: boolean; - type?: 'cpu' | 'wait' | 'block'; - timeout?: string; -} - -export interface NodesInfo extends Generic { - node_id?: string | string[]; - metric?: string | string[]; - flat_settings?: boolean; - timeout?: string; -} - -export interface NodesReloadSecureSettings extends Generic { - node_id?: string | string[]; - timeout?: string; - body?: T; -} - -export interface NodesStats extends Generic { - node_id?: string | string[]; - metric?: string | string[]; - index_metric?: string | string[]; - completion_fields?: string | string[]; - fielddata_fields?: string | string[]; - fields?: string | string[]; - groups?: boolean; - level?: 'indices' | 'node' | 'shards'; - types?: string | string[]; - timeout?: string; - include_segment_file_sizes?: boolean; -} - -export interface NodesUsage extends Generic { - node_id?: string | string[]; - metric?: string | string[]; - timeout?: string; -} - -export interface Ping extends Generic { -} - -export interface PutScript extends Generic { - id: string; - context?: string; - timeout?: string; - master_timeout?: string; - body: T; -} - -export interface RankEval extends Generic { - index?: string | string[]; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - search_type?: 'query_then_fetch' | 'dfs_query_then_fetch'; - body: T; -} - -export interface Reindex extends Generic { - refresh?: boolean; - timeout?: string; - wait_for_active_shards?: string; - wait_for_completion?: boolean; - requests_per_second?: number; - scroll?: string; - slices?: number|string; - max_docs?: number; - body: T; -} - -export interface ReindexRethrottle extends Generic { - task_id: string; - requests_per_second: number; -} - -export interface RenderSearchTemplate extends Generic { - id?: string; - body?: T; -} - -export interface ScriptsPainlessExecute extends Generic { - body?: T; -} - -export interface Scroll extends Generic { - scroll_id?: string; - scroll?: string; - rest_total_hits_as_int?: boolean; - body?: T; -} - -export interface Search extends Generic { - index?: string | string[]; - _source_exclude?: string | string[]; - _source_include?: string | string[]; - analyzer?: string; - analyze_wildcard?: boolean; - ccs_minimize_roundtrips?: boolean; - default_operator?: 'AND' | 'OR'; - df?: string; - explain?: boolean; - stored_fields?: string | string[]; - docvalue_fields?: string | string[]; - from?: number; - ignore_unavailable?: boolean; - ignore_throttled?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - lenient?: boolean; - preference?: string; - q?: string; - routing?: string | string[]; - scroll?: string; - search_type?: 'query_then_fetch' | 'dfs_query_then_fetch'; - size?: number; - sort?: string | string[]; - _source?: string | string[]; - _source_excludes?: string | string[]; - _source_includes?: string | string[]; - terminate_after?: number; - stats?: string | string[]; - suggest_field?: string; - suggest_mode?: 'missing' | 'popular' | 'always'; - suggest_size?: number; - suggest_text?: string; - timeout?: string; - track_scores?: boolean; - track_total_hits?: boolean | number; - allow_partial_search_results?: boolean; - typed_keys?: boolean; - version?: boolean; - seq_no_primary_term?: boolean; - request_cache?: boolean; - batched_reduce_size?: number; - max_concurrent_shard_requests?: number; - pre_filter_shard_size?: number; - rest_total_hits_as_int?: boolean; - body?: T; -} - -export interface SearchShards extends Generic { - index?: string | string[]; - preference?: string; - routing?: string; - local?: boolean; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; -} - -export interface SearchTemplate extends Generic { - index?: string | string[]; - ignore_unavailable?: boolean; - ignore_throttled?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - preference?: string; - routing?: string | string[]; - scroll?: string; - search_type?: 'query_then_fetch' | 'query_and_fetch' | 'dfs_query_then_fetch' | 'dfs_query_and_fetch'; - explain?: boolean; - profile?: boolean; - typed_keys?: boolean; - rest_total_hits_as_int?: boolean; - ccs_minimize_roundtrips?: boolean; - body: T; -} - -export interface SnapshotCleanupRepository extends Generic { - repository: string; - master_timeout?: string; - timeout?: string; -} - -export interface SnapshotCreate extends Generic { - repository: string; - snapshot: string; - master_timeout?: string; - wait_for_completion?: boolean; - body?: T; -} - -export interface SnapshotCreateRepository extends Generic { - repository: string; - master_timeout?: string; - timeout?: string; - verify?: boolean; - body: T; -} - -export interface SnapshotDelete extends Generic { - repository: string; - snapshot: string | string[]; - master_timeout?: string; -} - -export interface SnapshotDeleteRepository extends Generic { - repository: string | string[]; - master_timeout?: string; - timeout?: string; -} - -export interface SnapshotGet extends Generic { - repository: string; - snapshot: string | string[]; - master_timeout?: string; - ignore_unavailable?: boolean; - verbose?: boolean; -} - -export interface SnapshotGetRepository extends Generic { - repository?: string | string[]; - master_timeout?: string; - local?: boolean; -} - -export interface SnapshotRestore extends Generic { - repository: string; - snapshot: string; - master_timeout?: string; - wait_for_completion?: boolean; - body?: T; -} - -export interface SnapshotStatus extends Generic { - repository?: string; - snapshot?: string | string[]; - master_timeout?: string; - ignore_unavailable?: boolean; -} - -export interface SnapshotVerifyRepository extends Generic { - repository: string; - master_timeout?: string; - timeout?: string; -} - -export interface TasksCancel extends Generic { - task_id?: string; - nodes?: string | string[]; - actions?: string | string[]; - parent_task_id?: string; - wait_for_completion?: boolean; -} - -export interface TasksGet extends Generic { - task_id: string; - wait_for_completion?: boolean; - timeout?: string; -} - -export interface TasksList extends Generic { - nodes?: string | string[]; - actions?: string | string[]; - detailed?: boolean; - parent_task_id?: string; - wait_for_completion?: boolean; - group_by?: 'nodes' | 'parents' | 'none'; - timeout?: string; -} - -export interface Termvectors extends Generic { - index: string; - id?: string; - term_statistics?: boolean; - field_statistics?: boolean; - fields?: string | string[]; - offsets?: boolean; - positions?: boolean; - payloads?: boolean; - preference?: string; - routing?: string; - realtime?: boolean; - version?: number; - version_type?: 'internal' | 'external' | 'external_gte'; - body?: T; -} - -export interface Update extends Generic { - id: string; - index: string; - type?: string; - _source_exclude?: string | string[]; - _source_include?: string | string[]; - wait_for_active_shards?: string; - _source?: string | string[]; - _source_excludes?: string | string[]; - _source_includes?: string | string[]; - lang?: string; - refresh?: 'true' | 'false' | 'wait_for'; - retry_on_conflict?: number; - routing?: string; - timeout?: string; - if_seq_no?: number; - if_primary_term?: number; - body: T; -} - -export interface UpdateByQuery extends Generic { - index: string | string[]; - _source_exclude?: string | string[]; - _source_include?: string | string[]; - analyzer?: string; - analyze_wildcard?: boolean; - default_operator?: 'AND' | 'OR'; - df?: string; - from?: number; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - conflicts?: 'abort' | 'proceed'; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - lenient?: boolean; - pipeline?: string; - preference?: string; - q?: string; - routing?: string | string[]; - scroll?: string; - search_type?: 'query_then_fetch' | 'dfs_query_then_fetch'; - search_timeout?: string; - max_docs?: number; - sort?: string | string[]; - _source?: string | string[]; - _source_excludes?: string | string[]; - _source_includes?: string | string[]; - terminate_after?: number; - stats?: string | string[]; - version?: boolean; - version_type?: boolean; - request_cache?: boolean; - refresh?: boolean; - timeout?: string; - wait_for_active_shards?: string; - scroll_size?: number; - wait_for_completion?: boolean; - requests_per_second?: number; - slices?: number|string; - body?: T; -} - -export interface UpdateByQueryRethrottle extends Generic { - task_id: string; - requests_per_second: number; -} - -export interface AsyncSearchDelete extends Generic { - id: string; -} - -export interface AsyncSearchGet extends Generic { - id: string; - wait_for_completion_timeout?: string; - keep_alive?: string; - typed_keys?: boolean; -} - -export interface AsyncSearchSubmit extends Generic { - index?: string | string[]; - _source_exclude?: string | string[]; - _source_include?: string | string[]; - wait_for_completion_timeout?: string; - keep_on_completion?: boolean; - keep_alive?: string; - batched_reduce_size?: number; - request_cache?: boolean; - analyzer?: string; - analyze_wildcard?: boolean; - default_operator?: 'AND' | 'OR'; - df?: string; - explain?: boolean; - stored_fields?: string | string[]; - docvalue_fields?: string | string[]; - from?: number; - ignore_unavailable?: boolean; - ignore_throttled?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - lenient?: boolean; - preference?: string; - q?: string; - routing?: string | string[]; - search_type?: 'query_then_fetch' | 'dfs_query_then_fetch'; - size?: number; - sort?: string | string[]; - _source?: string | string[]; - _source_excludes?: string | string[]; - _source_includes?: string | string[]; - terminate_after?: number; - stats?: string | string[]; - suggest_field?: string; - suggest_mode?: 'missing' | 'popular' | 'always'; - suggest_size?: number; - suggest_text?: string; - timeout?: string; - track_scores?: boolean; - track_total_hits?: boolean | number; - allow_partial_search_results?: boolean; - typed_keys?: boolean; - version?: boolean; - seq_no_primary_term?: boolean; - max_concurrent_shard_requests?: number; - body?: T; -} - -export interface AutoscalingDeleteAutoscalingPolicy extends Generic { - name: string; -} - -export interface AutoscalingGetAutoscalingDecision extends Generic { -} - -export interface AutoscalingGetAutoscalingPolicy extends Generic { - name: string; -} - -export interface AutoscalingPutAutoscalingPolicy extends Generic { - name: string; - body: T; -} - -export interface CatMlDataFrameAnalytics extends Generic { - id?: string; - allow_no_match?: boolean; - bytes?: 'b' | 'k' | 'kb' | 'm' | 'mb' | 'g' | 'gb' | 't' | 'tb' | 'p' | 'pb'; - format?: string; - h?: string | string[]; - help?: boolean; - s?: string | string[]; - time?: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos'; - v?: boolean; -} - -export interface CatMlDatafeeds extends Generic { - datafeed_id?: string; - allow_no_datafeeds?: boolean; - format?: string; - h?: string | string[]; - help?: boolean; - s?: string | string[]; - time?: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos'; - v?: boolean; -} - -export interface CatMlJobs extends Generic { - job_id?: string; - allow_no_jobs?: boolean; - bytes?: 'b' | 'k' | 'kb' | 'm' | 'mb' | 'g' | 'gb' | 't' | 'tb' | 'p' | 'pb'; - format?: string; - h?: string | string[]; - help?: boolean; - s?: string | string[]; - time?: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos'; - v?: boolean; -} - -export interface CatMlTrainedModels extends Generic { - model_id?: string; - allow_no_match?: boolean; - from?: number; - size?: number; - bytes?: 'b' | 'k' | 'kb' | 'm' | 'mb' | 'g' | 'gb' | 't' | 'tb' | 'p' | 'pb'; - format?: string; - h?: string | string[]; - help?: boolean; - s?: string | string[]; - time?: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos'; - v?: boolean; -} - -export interface CatTransforms extends Generic { - transform_id?: string; - from?: number; - size?: number; - allow_no_match?: boolean; - format?: string; - h?: string | string[]; - help?: boolean; - s?: string | string[]; - time?: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos'; - v?: boolean; -} - -export interface CcrDeleteAutoFollowPattern extends Generic { - name: string; -} - -export interface CcrFollow extends Generic { - index: string; - wait_for_active_shards?: string; - body: T; -} - -export interface CcrFollowInfo extends Generic { - index: string | string[]; -} - -export interface CcrFollowStats extends Generic { - index: string | string[]; -} - -export interface CcrForgetFollower extends Generic { - index: string; - body: T; -} - -export interface CcrGetAutoFollowPattern extends Generic { - name?: string; -} - -export interface CcrPauseAutoFollowPattern extends Generic { - name: string; -} - -export interface CcrPauseFollow extends Generic { - index: string; -} - -export interface CcrPutAutoFollowPattern extends Generic { - name: string; - body: T; -} - -export interface CcrResumeAutoFollowPattern extends Generic { - name: string; -} - -export interface CcrResumeFollow extends Generic { - index: string; - body?: T; -} - -export interface CcrStats extends Generic { -} - -export interface CcrUnfollow extends Generic { - index: string; -} - -export interface DataFrameTransformDeprecatedDeleteTransform extends Generic { - transform_id: string; - force?: boolean; -} - -export interface DataFrameTransformDeprecatedGetTransform extends Generic { - transform_id?: string; - from?: number; - size?: number; - allow_no_match?: boolean; -} - -export interface DataFrameTransformDeprecatedGetTransformStats extends Generic { - transform_id: string; - from?: number; - size?: number; - allow_no_match?: boolean; -} - -export interface DataFrameTransformDeprecatedPreviewTransform extends Generic { - body: T; -} - -export interface DataFrameTransformDeprecatedPutTransform extends Generic { - transform_id: string; - defer_validation?: boolean; - body: T; -} - -export interface DataFrameTransformDeprecatedStartTransform extends Generic { - transform_id: string; - timeout?: string; -} - -export interface DataFrameTransformDeprecatedStopTransform extends Generic { - transform_id: string; - wait_for_completion?: boolean; - timeout?: string; - allow_no_match?: boolean; -} - -export interface DataFrameTransformDeprecatedUpdateTransform extends Generic { - transform_id: string; - defer_validation?: boolean; - body: T; -} - -export interface EnrichDeletePolicy extends Generic { - name: string; -} - -export interface EnrichExecutePolicy extends Generic { - name: string; - wait_for_completion?: boolean; -} - -export interface EnrichGetPolicy extends Generic { - name?: string | string[]; -} - -export interface EnrichPutPolicy extends Generic { - name: string; - body: T; -} - -export interface EnrichStats extends Generic { -} - -export interface EqlSearch extends Generic { - index: string; - body: T; -} - -export interface GraphExplore extends Generic { - index: string | string[]; - routing?: string; - timeout?: string; - body?: T; -} - -export interface IlmDeleteLifecycle extends Generic { - policy: string; -} - -export interface IlmExplainLifecycle extends Generic { - index: string; - only_managed?: boolean; - only_errors?: boolean; -} - -export interface IlmGetLifecycle extends Generic { - policy?: string; -} - -export interface IlmGetStatus extends Generic { -} - -export interface IlmMoveToStep extends Generic { - index: string; - body?: T; -} - -export interface IlmPutLifecycle extends Generic { - policy: string; - body?: T; -} - -export interface IlmRemovePolicy extends Generic { - index: string; -} - -export interface IlmRetry extends Generic { - index: string; -} - -export interface IlmStart extends Generic { -} - -export interface IlmStop extends Generic { -} - -export interface IndicesFreeze extends Generic { - index: string; - timeout?: string; - master_timeout?: string; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - wait_for_active_shards?: string; -} - -export interface IndicesReloadSearchAnalyzers extends Generic { - index: string | string[]; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; -} - -export interface IndicesUnfreeze extends Generic { - index: string; - timeout?: string; - master_timeout?: string; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - wait_for_active_shards?: string; -} - -export interface LicenseDelete extends Generic { -} - -export interface LicenseGet extends Generic { - local?: boolean; - accept_enterprise?: boolean; -} - -export interface LicenseGetBasicStatus extends Generic { -} - -export interface LicenseGetTrialStatus extends Generic { -} - -export interface LicensePost extends Generic { - acknowledge?: boolean; - body?: T; -} - -export interface LicensePostStartBasic extends Generic { - acknowledge?: boolean; -} - -export interface LicensePostStartTrial extends Generic { - type?: string; - acknowledge?: boolean; -} - -export interface MigrationDeprecations extends Generic { - index?: string; -} - -export interface MlCloseJob extends Generic { - job_id: string; - allow_no_jobs?: boolean; - force?: boolean; - timeout?: string; - body?: T; -} - -export interface MlDeleteCalendar extends Generic { - calendar_id: string; -} - -export interface MlDeleteCalendarEvent extends Generic { - calendar_id: string; - event_id: string; -} - -export interface MlDeleteCalendarJob extends Generic { - calendar_id: string; - job_id: string; -} - -export interface MlDeleteDataFrameAnalytics extends Generic { - id: string; - force?: boolean; -} - -export interface MlDeleteDatafeed extends Generic { - datafeed_id: string; - force?: boolean; -} - -export interface MlDeleteExpiredData extends Generic { - body?: T; -} - -export interface MlDeleteFilter extends Generic { - filter_id: string; -} - -export interface MlDeleteForecast extends Generic { - job_id: string; - forecast_id?: string; - allow_no_forecasts?: boolean; - timeout?: string; -} - -export interface MlDeleteJob extends Generic { - job_id: string; - force?: boolean; - wait_for_completion?: boolean; -} - -export interface MlDeleteModelSnapshot extends Generic { - job_id: string; - snapshot_id: string; -} - -export interface MlDeleteTrainedModel extends Generic { - model_id: string; -} - -export interface MlEstimateModelMemory extends Generic { - body: T; -} - -export interface MlEvaluateDataFrame extends Generic { - body: T; -} - -export interface MlExplainDataFrameAnalytics extends Generic { - id?: string; - body?: T; -} - -export interface MlFindFileStructure extends Generic { - lines_to_sample?: number; - line_merge_size_limit?: number; - timeout?: string; - charset?: string; - format?: 'ndjson' | 'xml' | 'delimited' | 'semi_structured_text'; - has_header_row?: boolean; - column_names?: string | string[]; - delimiter?: string; - quote?: string; - should_trim_fields?: boolean; - grok_pattern?: string; - timestamp_field?: string; - timestamp_format?: string; - explain?: boolean; - body: T; -} - -export interface MlFlushJob extends Generic { - job_id: string; - calc_interim?: boolean; - start?: string; - end?: string; - advance_time?: string; - skip_time?: string; - body?: T; -} - -export interface MlForecast extends Generic { - job_id: string; - duration?: string; - expires_in?: string; - max_model_memory?: string; -} - -export interface MlGetBuckets extends Generic { - job_id: string; - timestamp?: string; - expand?: boolean; - exclude_interim?: boolean; - from?: number; - size?: number; - start?: string; - end?: string; - anomaly_score?: number; - sort?: string; - desc?: boolean; - body?: T; -} - -export interface MlGetCalendarEvents extends Generic { - calendar_id: string; - job_id?: string; - start?: string; - end?: string; - from?: number; - size?: number; -} - -export interface MlGetCalendars extends Generic { - calendar_id?: string; - from?: number; - size?: number; - body?: T; -} - -export interface MlGetCategories extends Generic { - job_id: string; - category_id?: number; - from?: number; - size?: number; - body?: T; -} - -export interface MlGetDataFrameAnalytics extends Generic { - id?: string; - allow_no_match?: boolean; - from?: number; - size?: number; -} - -export interface MlGetDataFrameAnalyticsStats extends Generic { - id?: string; - allow_no_match?: boolean; - from?: number; - size?: number; -} - -export interface MlGetDatafeedStats extends Generic { - datafeed_id?: string; - allow_no_datafeeds?: boolean; -} - -export interface MlGetDatafeeds extends Generic { - datafeed_id?: string; - allow_no_datafeeds?: boolean; -} - -export interface MlGetFilters extends Generic { - filter_id?: string; - from?: number; - size?: number; -} - -export interface MlGetInfluencers extends Generic { - job_id: string; - exclude_interim?: boolean; - from?: number; - size?: number; - start?: string; - end?: string; - influencer_score?: number; - sort?: string; - desc?: boolean; - body?: T; -} - -export interface MlGetJobStats extends Generic { - job_id?: string; - allow_no_jobs?: boolean; -} - -export interface MlGetJobs extends Generic { - job_id?: string; - allow_no_jobs?: boolean; -} - -export interface MlGetModelSnapshots extends Generic { - job_id: string; - snapshot_id?: string; - from?: number; - size?: number; - start?: string; - end?: string; - sort?: string; - desc?: boolean; - body?: T; -} - -export interface MlGetOverallBuckets extends Generic { - job_id: string; - top_n?: number; - bucket_span?: string; - overall_score?: number; - exclude_interim?: boolean; - start?: string; - end?: string; - allow_no_jobs?: boolean; - body?: T; -} - -export interface MlGetRecords extends Generic { - job_id: string; - exclude_interim?: boolean; - from?: number; - size?: number; - start?: string; - end?: string; - record_score?: number; - sort?: string; - desc?: boolean; - body?: T; -} - -export interface MlGetTrainedModels extends Generic { - model_id?: string; - allow_no_match?: boolean; - include_model_definition?: boolean; - decompress_definition?: boolean; - from?: number; - size?: number; - tags?: string | string[]; - for_export?: boolean; -} - -export interface MlGetTrainedModelsStats extends Generic { - model_id?: string; - allow_no_match?: boolean; - from?: number; - size?: number; -} - -export interface MlInfo extends Generic { -} - -export interface MlOpenJob extends Generic { - job_id: string; -} - -export interface MlPostCalendarEvents extends Generic { - calendar_id: string; - body: T; -} - -export interface MlPostData extends Generic { - job_id: string; - reset_start?: string; - reset_end?: string; - body: T; -} - -export interface MlPreviewDatafeed extends Generic { - datafeed_id: string; -} - -export interface MlPutCalendar extends Generic { - calendar_id: string; - body?: T; -} - -export interface MlPutCalendarJob extends Generic { - calendar_id: string; - job_id: string; -} - -export interface MlPutDataFrameAnalytics extends Generic { - id: string; - body: T; -} - -export interface MlPutDatafeed extends Generic { - datafeed_id: string; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - ignore_throttled?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - body: T; -} - -export interface MlPutFilter extends Generic { - filter_id: string; - body: T; -} - -export interface MlPutJob extends Generic { - job_id: string; - body: T; -} - -export interface MlPutTrainedModel extends Generic { - model_id: string; - body: T; -} - -export interface MlRevertModelSnapshot extends Generic { - job_id: string; - snapshot_id: string; - delete_intervening_results?: boolean; - body?: T; -} - -export interface MlSetUpgradeMode extends Generic { - enabled?: boolean; - timeout?: string; -} - -export interface MlStartDataFrameAnalytics extends Generic { - id: string; - timeout?: string; - body?: T; -} - -export interface MlStartDatafeed extends Generic { - datafeed_id: string; - start?: string; - end?: string; - timeout?: string; - body?: T; -} - -export interface MlStopDataFrameAnalytics extends Generic { - id: string; - allow_no_match?: boolean; - force?: boolean; - timeout?: string; - body?: T; -} - -export interface MlStopDatafeed extends Generic { - datafeed_id: string; - allow_no_datafeeds?: boolean; - force?: boolean; - timeout?: string; -} - -export interface MlUpdateDatafeed extends Generic { - datafeed_id: string; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - ignore_throttled?: boolean; - expand_wildcards?: 'open' | 'closed' | 'hidden' | 'none' | 'all'; - body: T; -} - -export interface MlUpdateFilter extends Generic { - filter_id: string; - body: T; -} - -export interface MlUpdateJob extends Generic { - job_id: string; - body: T; -} - -export interface MlUpdateModelSnapshot extends Generic { - job_id: string; - snapshot_id: string; - body: T; -} - -export interface MlValidate extends Generic { - body: T; -} - -export interface MlValidateDetector extends Generic { - body: T; -} - -export interface MonitoringBulk extends Generic { - type?: string; - system_id?: string; - system_api_version?: string; - interval?: string; - body: T; -} - -export interface RollupDeleteJob extends Generic { - id: string; -} - -export interface RollupGetJobs extends Generic { - id?: string; -} - -export interface RollupGetRollupCaps extends Generic { - id?: string; -} - -export interface RollupGetRollupIndexCaps extends Generic { - index: string; -} - -export interface RollupPutJob extends Generic { - id: string; - body: T; -} - -export interface RollupRollupSearch extends Generic { - index: string | string[]; - type?: string; - typed_keys?: boolean; - rest_total_hits_as_int?: boolean; - body: T; -} - -export interface RollupStartJob extends Generic { - id: string; -} - -export interface RollupStopJob extends Generic { - id: string; - wait_for_completion?: boolean; - timeout?: string; -} - -export interface SearchableSnapshotsClearCache extends Generic { - index?: string | string[]; - ignore_unavailable?: boolean; - allow_no_indices?: boolean; - expand_wildcards?: 'open' | 'closed' | 'none' | 'all'; -} - -export interface SearchableSnapshotsMount extends Generic { - repository: string; - snapshot: string; - master_timeout?: string; - wait_for_completion?: boolean; - body: T; -} - -export interface SearchableSnapshotsRepositoryStats extends Generic { - repository: string; -} - -export interface SearchableSnapshotsStats extends Generic { - index?: string | string[]; -} - -export interface SecurityAuthenticate extends Generic { -} - -export interface SecurityChangePassword extends Generic { - username?: string; - refresh?: 'true' | 'false' | 'wait_for'; - body: T; -} - -export interface SecurityClearCachedRealms extends Generic { - realms: string | string[]; - usernames?: string | string[]; -} - -export interface SecurityClearCachedRoles extends Generic { - name: string | string[]; -} - -export interface SecurityCreateApiKey extends Generic { - refresh?: 'true' | 'false' | 'wait_for'; - body: T; -} - -export interface SecurityDeletePrivileges extends Generic { - application: string; - name: string; - refresh?: 'true' | 'false' | 'wait_for'; -} - -export interface SecurityDeleteRole extends Generic { - name: string; - refresh?: 'true' | 'false' | 'wait_for'; -} - -export interface SecurityDeleteRoleMapping extends Generic { - name: string; - refresh?: 'true' | 'false' | 'wait_for'; -} - -export interface SecurityDeleteUser extends Generic { - username: string; - refresh?: 'true' | 'false' | 'wait_for'; -} - -export interface SecurityDisableUser extends Generic { - username: string; - refresh?: 'true' | 'false' | 'wait_for'; -} - -export interface SecurityEnableUser extends Generic { - username: string; - refresh?: 'true' | 'false' | 'wait_for'; -} - -export interface SecurityGetApiKey extends Generic { - id?: string; - name?: string; - username?: string; - realm_name?: string; - owner?: boolean; -} - -export interface SecurityGetBuiltinPrivileges extends Generic { -} - -export interface SecurityGetPrivileges extends Generic { - application?: string; - name?: string; -} - -export interface SecurityGetRole extends Generic { - name?: string | string[]; -} - -export interface SecurityGetRoleMapping extends Generic { - name?: string | string[]; -} - -export interface SecurityGetToken extends Generic { - body: T; -} - -export interface SecurityGetUser extends Generic { - username?: string | string[]; -} - -export interface SecurityGetUserPrivileges extends Generic { -} - -export interface SecurityHasPrivileges extends Generic { - user?: string; - body: T; -} - -export interface SecurityInvalidateApiKey extends Generic { - body: T; -} - -export interface SecurityInvalidateToken extends Generic { - body: T; -} - -export interface SecurityPutPrivileges extends Generic { - refresh?: 'true' | 'false' | 'wait_for'; - body: T; -} - -export interface SecurityPutRole extends Generic { - name: string; - refresh?: 'true' | 'false' | 'wait_for'; - body: T; -} - -export interface SecurityPutRoleMapping extends Generic { - name: string; - refresh?: 'true' | 'false' | 'wait_for'; - body: T; -} - -export interface SecurityPutUser extends Generic { - username: string; - refresh?: 'true' | 'false' | 'wait_for'; - body: T; -} - -export interface SlmDeleteLifecycle extends Generic { - policy_id: string; -} - -export interface SlmExecuteLifecycle extends Generic { - policy_id: string; -} - -export interface SlmExecuteRetention extends Generic { -} - -export interface SlmGetLifecycle extends Generic { - policy_id?: string | string[]; -} - -export interface SlmGetStats extends Generic { -} - -export interface SlmGetStatus extends Generic { -} - -export interface SlmPutLifecycle extends Generic { - policy_id: string; - body?: T; -} - -export interface SlmStart extends Generic { -} - -export interface SlmStop extends Generic { -} - -export interface SqlClearCursor extends Generic { - body: T; -} - -export interface SqlQuery extends Generic { - format?: string; - body: T; -} - -export interface SqlTranslate extends Generic { - body: T; -} - -export interface SslCertificates extends Generic { -} - -export interface TransformDeleteTransform extends Generic { - transform_id: string; - force?: boolean; -} - -export interface TransformGetTransform extends Generic { - transform_id?: string; - from?: number; - size?: number; - allow_no_match?: boolean; -} - -export interface TransformGetTransformStats extends Generic { - transform_id: string; - from?: number; - size?: number; - allow_no_match?: boolean; -} - -export interface TransformPreviewTransform extends Generic { - body: T; -} - -export interface TransformPutTransform extends Generic { - transform_id: string; - defer_validation?: boolean; - body: T; -} - -export interface TransformStartTransform extends Generic { - transform_id: string; - timeout?: string; -} - -export interface TransformStopTransform extends Generic { - transform_id: string; - force?: boolean; - wait_for_completion?: boolean; - timeout?: string; - allow_no_match?: boolean; - wait_for_checkpoint?: boolean; -} - -export interface TransformUpdateTransform extends Generic { - transform_id: string; - defer_validation?: boolean; - body: T; -} - -export interface WatcherAckWatch extends Generic { - watch_id: string; - action_id?: string | string[]; -} - -export interface WatcherActivateWatch extends Generic { - watch_id: string; -} - -export interface WatcherDeactivateWatch extends Generic { - watch_id: string; -} - -export interface WatcherDeleteWatch extends Generic { - id: string; -} - -export interface WatcherExecuteWatch extends Generic { - id?: string; - debug?: boolean; - body?: T; -} - -export interface WatcherGetWatch extends Generic { - id: string; -} - -export interface WatcherPutWatch extends Generic { - id: string; - active?: boolean; - version?: number; - if_seq_no?: number; - if_primary_term?: number; - body?: T; -} - -export interface WatcherStart extends Generic { -} - -export interface WatcherStats extends Generic { - metric?: string | string[]; - emit_stacktraces?: boolean; -} - -export interface WatcherStop extends Generic { -} - -export interface XpackInfo extends Generic { - categories?: string | string[]; -} - -export interface XpackUsage extends Generic { - master_timeout?: string; -} diff --git a/catalog-info.yaml b/catalog-info.yaml new file mode 100644 index 000000000..c0ccdc314 --- /dev/null +++ b/catalog-info.yaml @@ -0,0 +1,43 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/catalog-info.json +apiVersion: backstage.io/v1alpha1 +kind: Component +metadata: + name: elasticsearch-js +spec: + type: library + owner: group:devtools-team + lifecycle: production + +--- +# yaml-language-server: $schema=https://gist.githubusercontent.com/elasticmachine/988b80dae436cafea07d9a4a460a011d/raw/e57ee3bed7a6f73077a3f55a38e76e40ec87a7cf/rre.schema.json +apiVersion: backstage.io/v1alpha1 +kind: Resource +metadata: + name: elasticsearch-js-integration-tests + description: elasticsearch-js - integration tests +spec: + type: buildkite-pipeline + owner: group:devtools-team + system: buildkite + implementation: + apiVersion: buildkite.elastic.dev/v1 + kind: Pipeline + metadata: + name: elasticsearch-js - integration tests + spec: + repository: elastic/elasticsearch-js + pipeline_file: .buildkite/pipeline.yml + env: + ELASTIC_SLACK_NOTIFICATIONS_ENABLED: "true" + SLACK_NOTIFICATIONS_CHANNEL: "#devtools-notify-javascript" + teams: + devtools-team: + access_level: MANAGE_BUILD_AND_READ + everyone: + access_level: READ_ONLY + provider_settings: + build_pull_requests: true + build_branches: false + separate_pull_request_statuses: true + cancel_intermediate_builds: true diff --git a/docs/authentication.asciidoc b/docs/authentication.asciidoc deleted file mode 100644 index c27553913..000000000 --- a/docs/authentication.asciidoc +++ /dev/null @@ -1,132 +0,0 @@ -[[auth-reference]] -== Authentication - -This document contains code snippets to show you how to connect to various {es} -providers. - - -=== Elastic Cloud - -If you are using https://www.elastic.co/cloud[Elastic Cloud], the client offers -an easy way to connect to it via the `cloud` option. You must pass the Cloud ID -that you can find in the cloud console, then your username and password inside -the `auth` option. - -NOTE: When connecting to Elastic Cloud, the client will automatically enable -both request and response compression by default, since it yields significant -throughput improvements. Moreover, the client will also set the ssl option -`secureProtocol` to `TLSv1_2_method` unless specified otherwise. You can still -override this option by configuring them. - -IMPORTANT: Do not enable sniffing when using Elastic Cloud, since the nodes are -behind a load balancer, Elastic Cloud will take care of everything for you. - -[source,js] ----- -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ - cloud: { - id: 'name:bG9jYWxob3N0JGFiY2QkZWZnaA==', - }, - auth: { - username: 'elastic', - password: 'changeme' - } -}) ----- - - -=== Basic authentication - -You can provide your credentials by passing the `username` and `password` -parameters via the `auth` option. - -NOTE: If you provide both basic authentication credentials and the Api Key configuration, the Api Key will take precedence. - -[source,js] ----- -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ - node: 'https://localhost:9200/', - auth: { - username: 'elastic', - password: 'changeme' - } -}) ----- - - -Otherwise, you can provide your credentials in the node(s) URL. - -[source,js] ----- -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ - node: 'https://username:password@localhost:9200/' -}) ----- - - -=== ApiKey authentication - -You can use the -https://www.elastic.co/guide/en/elasticsearch/reference/7.x/security-api-create-api-key.html[ApiKey] -authentication by passing the `apiKey` parameter via the `auth` option. The -`apiKey` parameter can be either a base64 encoded string or an object with the -values that you can obtain from the -https://www.elastic.co/guide/en/elasticsearch/reference/7.x/security-api-create-api-key.html[create api key endpoint]. - -NOTE: If you provide both basic authentication credentials and the Api Key configuration, the Api Key will take precedence. - -[source,js] ----- -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ - node: 'https://localhost:9200/', - auth: { - apiKey: 'base64EncodedKey' - } -}) ----- - -[source,js] ----- -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ - node: 'https://localhost:9200/', - auth: { - apiKey: { - id: 'foo', - api_key: 'bar' - } - } -}) ----- - - -=== SSL configuration - -Without any additional configuration you can specify `https://` node urls, but -the certificates used to sign these requests will not verified -(`rejectUnauthorized: false`). To turn on certificate verification, you must -specify an `ssl` object either in the top level config or in each host config -object and set `rejectUnauthorized: true`. The ssl config object can contain -many of the same configuration options that -https://nodejs.org/api/tls.html#tls_tls_connect_options_callback[tls.connect()] -accepts. - -[source,js] ----- -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ - node: 'http://localhost:9200/', - auth: { - username: 'elastic', - password: 'changeme' - }, - ssl: { - ca: fs.readFileSync('./cacert.pem'), - rejectUnauthorized: true - } -}) ----- \ No newline at end of file diff --git a/docs/breaking-changes.asciidoc b/docs/breaking-changes.asciidoc deleted file mode 100644 index fb8dc3b76..000000000 --- a/docs/breaking-changes.asciidoc +++ /dev/null @@ -1,321 +0,0 @@ -[[breaking-changes]] -== Breaking changes coming from the old client - -If you were already using the previous version of this client – the one you used -to install with `npm install elasticsearch` – you will encounter some breaking -changes. - - -=== Don’t panic! - -Every breaking change was carefully weighed, and each is justified. Furthermore, -the new codebase has been rewritten with modern JavaScript and has been -carefully designed to be easy to maintain. - - -=== Breaking changes - -* Minimum supported version of Node.js is `v8`. - -* Everything has been rewritten using ES6 classes to help users extend the -defaults more easily. - -* There is no longer an integrated logger. The client now is an event emitter -that emits the following events: `request`, `response`, and `error`. - -* The code is no longer shipped with all the versions of the API, but only that -of the package’s major version. This means that if you are using {es} `v6`, you -are required to install `@elastic/elasticsearch@6`, and so on. - -* The internals are completely different, so if you used to tweak them a lot, -you will need to refactor your code. The public API should be almost the same. - -* There is no longer browser support, for that will be distributed via another -module: `@elastic/elasticsearch-browser`. This module is intended for Node.js -only. - -* The returned value of an API call will no longer be the `body`, `statusCode`, -and `headers` for callbacks, and only the `body` for promises. The new returned -value will be a unique object containing the `body`, `statusCode`, `headers`, -`warnings`, and `meta`, for both callback and promises. - - -[source,js] ----- -// before -const body = await client.search({ - index: 'my-index', - body: { foo: 'bar' } -}) - -client.search({ - index: 'my-index', - body: { foo: 'bar' } -}, (err, body, statusCode, headers) => { - if (err) console.log(err) -}) - -// after -const { body, statusCode, headers, warnings } = await client.search({ - index: 'my-index', - body: { foo: 'bar' } -}) - -client.search({ - index: 'my-index', - body: { foo: 'bar' } -}, (err, { body, statusCode, headers, warnings }) => { - if (err) console.log(err) -}) ----- - - -* Errors: there is no longer a custom error class for every HTTP status code -(such as `BadRequest` or `NotFound`). There is instead a single `ResponseError`. -Every error class has been renamed, and now each is suffixed with `Error` at the -end. - -* Removed errors: `RequestTypeError`, `Generic`, and all the status code -specific errors (such as `BadRequest` or `NotFound`). - -* Added errors: `ConfigurationError` (in case of bad configurations) and -`ResponseError` that contains all the data you may need to handle the specific -error, such as `statusCode`, `headers`, `body`, and `message`. - - -* Renamed errors: - -** `RequestTimeout` (408 statusCode) => `TimeoutError` -** `ConnectionFault` => `ConnectionError` -** `NoConnections` => `NoLivingConnectionsError` -** `Serialization` => `SerializationError` -** `Serialization` => `DeserializationError` - -* You must specify the port number in the configuration. In the previous -version, you can specify the host and port in a variety of ways. With the new -client, there is only one way to do it, via the `node` parameter. - -* The `plugins` option has been removed. If you want to extend the client now, -you should use the `client.extend` API. - -[source,js] ----- -// before -const { Client } = require('elasticsearch') -const client = new Client({ plugins: [...] }) - -// after -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ ... }) -client.extend(...) ----- - -* There is a clear distinction between the API related parameters and the client -related configurations. The parameters `ignore`, `headers`, `requestTimeout` and -`maxRetries` are no longer part of the API object and you need to specify them -in a second option object. - -[source,js] ----- -// before -const body = await client.search({ - index: 'my-index', - body: { foo: 'bar' }, - ignore: [404] -}) - -client.search({ - index: 'my-index', - body: { foo: 'bar' }, - ignore: [404] -}, (err, body, statusCode, headers) => { - if (err) console.log(err) -}) - -// after -const { body, statusCode, headers, warnings } = await client.search({ - index: 'my-index', - body: { foo: 'bar' } -}, { - ignore: [404] -}) - -client.search({ - index: 'my-index', - body: { foo: 'bar' } -}, { - ignore: [404] -}, (err, { body, statusCode, headers, warnings }) => { - if (err) console.log(err) -}) ----- - -* The `transport.request` method no longer accepts the `query` key. Use the -`querystring` key instead (which can be a string or an object). You also -need to send a bulk-like request instead of the `body` key, use the `bulkBody` -key. In this method, the client specific parameters should be passed as a second -object. - -[source,js] ----- -// before -const body = await client.transport.request({ - method: 'GET', - path: '/my-index/_search', - body: { foo: 'bar' }, - query: { bar: 'baz' } - ignore: [404] -}) - -client.transport.request({ - method: 'GET', - path: '/my-index/_search', - body: { foo: 'bar' }, - query: { bar: 'baz' } - ignore: [404] -}, (err, body, statusCode, headers) => { - if (err) console.log(err) -}) - -// after -const { body, statusCode, headers, warnings } = await client.transport.request({ - method: 'GET', - path: '/my-index/_search', - body: { foo: 'bar' }, - querystring: { bar: 'baz' } -}, { - ignore: [404] -}) - -client.transport.request({ - method: 'GET', - path: '/my-index/_search', - body: { foo: 'bar' }, - querystring: { bar: 'baz' } -}, { - ignore: [404] -}, (err, { body, statusCode, headers, warnings }) => { - if (err) console.log(err) -}) ----- - -=== Talk is cheap. Show me the code. - -You can find a code snippet with the old client below followed by the same code -logic but with the new client. - -[source,js] ----- -const { Client, errors } = require('elasticsearch') -const client = new Client({ - host: 'http://localhost:9200/', - plugins: [utility] -}) - -async function run () { - try { - const body = await client.search({ - index: 'game-of-thrones', - body: { - query: { - match: { quote: 'winter' } - } - } - ignore: [404] - }) - console.log(body) - } catch (err) { - if (err instanceof errors.BadRequest) { - console.log('Bad request') - } else { - console.log(err) - } - } -} - -function utility (Client, config, components) { - const ca = components.clientAction.factory - Client.prototype.utility = components.clientAction.namespaceFactory() - const utility = Client.prototype.utility.prototype - - utility.index = ca({ - params: { - refresh: { - type: 'enum', - options: [ - 'true', - 'false', - 'wait_for', - '' - ] - }, - }, - urls: [ - { - fmt: '/<%=index%>/_doc', - req: { - index: { - type: 'string', - required: true - } - } - } - ], - needBody: true, - method: 'POST' - }) -}) ----- - -And now with the new client. - -[source,js] ----- -const { Client, errors } = require('@elastic/elasticsearch') -// NOTE: `host` has been renamed to `node`, -// and `plugins` is no longer supported -const client = new Client({ node: 'http://localhost:9200/' }) - -async function run () { - try { - // NOTE: we are using the destructuring assignment - const { body } = await client.search({ - index: 'game-of-thrones', - body: { - query: { - match: { quote: 'winter' } - } - } - // NOTE: `ignore` now is in a separated object - }, { - ignore: [404] - }) - console.log(body) - } catch (err) { - // NOTE: we are checking the `statusCode` property - if (err.statusCode === 400) { - console.log('Bad request') - } else { - console.log(err) - } - } -} - -// NOTE: we can still extend the client, but with a different API. -// This new API is a little bit more verbose, since you must write -// your own validations, but it's way more flexible. -client.extend('utility.index', ({ makeRequest, ConfigurationError }) => { - return function utilityIndex (params, options) { - const { body, index, ...querystring } = params - if (body == null) throw new ConfigurationError('Missing body') - if (index == null) throw new ConfigurationError('Missing index') - const requestParams = { - method: 'POST', - path: `/${index}/_doc`, - body: body, - querystring - } - return makeRequest(requestParams, options) - } -}) ----- diff --git a/docs/child.asciidoc b/docs/child.asciidoc deleted file mode 100644 index cba6b4037..000000000 --- a/docs/child.asciidoc +++ /dev/null @@ -1,33 +0,0 @@ -[[child-client]] -== Creating a child client - -There are some use cases where you may need multiple instances of the client. -You can easily do that by calling `new Client()` as many times as you need, but -you will lose all the benefits of using one single client, such as the long -living connections and the connection pool handling. To avoid this problem, the -client offers a `child` API, which returns a new client instance that shares the -connection pool with the parent client. - -NOTE: The event emitter is shared between the parent and the child(ren). If you -extend the parent client, the child client will have the same extensions, while -if the child client adds an extension, the parent client will not be extended. - -You can pass to the `child` every client option you would pass to a normal -client, but the connection pool specific options (`ssl`, `agent`, `pingTimeout`, -`Connection`, and `resurrectStrategy`). - -CAUTION: If you call `close` in any of the parent/child clients, every client -will be closed. - -[source,js] ----- -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) -const child = client.child({ - headers: { 'x-foo': 'bar' }, - requestTimeout: 1000 -}) - -client.info(console.log) -child.info(console.log) ----- \ No newline at end of file diff --git a/docs/configuration.asciidoc b/docs/configuration.asciidoc deleted file mode 100644 index 669126a17..000000000 --- a/docs/configuration.asciidoc +++ /dev/null @@ -1,329 +0,0 @@ -[[client-configuration]] -== Client configuration - -The client is designed to be easily configured for your needs. In the following -section, you can see the possible basic options that you can use to configure -it. - -[source,js] ----- -const { Client } = require('@elastic/elasticsearch') - -const client = new Client({ - node: 'http://localhost:9200/', - maxRetries: 5, - requestTimeout: 60000, - sniffOnStart: true -}) ----- - - -=== Basic options - -[cols=2*] -|=== -|`node` or `nodes` -a|The Elasticsearch endpoint to use. + -It can be a single string or an array of strings: -[source,js] ----- -node: 'http://localhost:9200/' ----- -Or it can be an object (or an array of objects) that represents the node: -[source,js] ----- -node: { - url: new URL('http://localhost:9200/'), - ssl: 'ssl options', - agent: 'http agent options', - id: 'custom node id', - headers: { 'custom': 'headers' } - roles: { - master: true, - data: true, - ingest: true, - ml: false - } -} ----- - -|`auth` -a|Your authentication data. You can use both basic authentication and -{ref}/security-api-create-api-key.html[ApiKey]. + -See https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/auth-reference.html[Authentication] -for more details. + -_Default:_ `null` - -Basic authentication: -[source,js] ----- -auth: { - username: 'elastic', - password: 'changeme' -} ----- -{ref}/security-api-create-api-key.html[ApiKey] authentication: -[source,js] ----- -auth: { - apiKey: 'base64EncodedKey' -} ----- - - -|`maxRetries` -|`number` - Max number of retries for each request. + -_Default:_ `3` - -|`requestTimeout` -|`number` - Max request timeout for each request. + -_Default:_ `30000` - -|`pingTimeout` -|`number` - Max ping request timeout for each request. + -_Default:_ `3000` - -|`sniffInterval` -|`number, boolean` - Perform a sniff operation every `n` milliseconds. + -_Default:_ `false` - -|`sniffOnStart` -|`boolean` - Perform a sniff once the client is started. + -_Default:_ `false` - -|`sniffEndpoint` -|`string` - Endpoint to ping during a sniff. + -_Default:_ `'_nodes/_all/http'` - -|`sniffOnConnectionFault` -|`boolean` - Perform a sniff on connection fault. + -_Default:_ `false` - -|`resurrectStrategy` -|`string` - Configure the node resurrection strategy. + -_Options:_ `'ping'`, `'optimistic'`, `'none'` + -_Default:_ `'ping'` - -|`suggestCompression` -|`boolean` - Adds `accept-encoding` header to every request. + -_Default:_ `false` - -|`compression` -|`string, boolean` - Enables gzip request body compression. + -_Options:_ `'gzip'`, `false` + -_Default:_ `false` - -|`ssl` -|`http.SecureContextOptions` - ssl https://nodejs.org/api/tls.html[configuraton]. + -_Default:_ `null` - -|`agent` -a|`http.AgentOptions, function` - http agent https://nodejs.org/api/http.html#http_new_agent_options[options], -or a function that returns an actual http agent instance. + -_Default:_ `null` -[source,js] ----- -const client = new Client({ - node: 'http://localhost:9200/', - agent: { agent: 'options' } -}) - -const client = new Client({ - node: 'http://localhost:9200/', - agent: () => new CustomAgent() -}) ----- - -|`nodeFilter` -a|`function` - Filters which node not to use for a request. + -_Default:_ -[source,js] ----- -function defaultNodeFilter (node) { - // avoid master only nodes - if (node.roles.master === true && - node.roles.data === false && - node.roles.ingest === false) { - return false - } - return true -} ----- - -|`nodeSelector` -a|`function` - custom selection strategy. + -_Options:_ `'round-robin'`, `'random'`, custom function + -_Default:_ `'round-robin'` + -_Custom function example:_ -[source,js] ----- -function nodeSelector (connections) { - const index = calculateIndex() - return connections[index] -} ----- - -|`generateRequestId` -a|`function` - function to generate the request id for every request, it takes -two parameters, the request parameters and options. + -By default it generates an incremental integer for every request. + -_Custom function example:_ -[source,js] ----- -function generateRequestId (params, options) { - // your id generation logic - // must be syncronous - return 'id' -} ----- - -|`name` -|`string` - The name to identify the client instance in the events. + -_Default:_ `elasticsearch-js` - -|`opaqueIdPrefix` -|`string` - A string that will be use to prefix any `X-Opaque-Id` header. + -See https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/observability.html#_x-opaque-id_support[`X-Opaque-Id` support] for more details. + -_Default:_ `null` - -|`headers` -|`object` - A set of custom headers to send in every request. + -_Default:_ `{}` - -|`cloud` -a|`object` - Custom configuration for connecting to -https://cloud.elastic.co[Elastic Cloud]. See https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/auth-reference.html[Authentication] -for more details. + -_Default:_ `null` + -_Cloud configuration example:_ -[source,js] ----- -const client = new Client({ - cloud: { - id: 'name:bG9jYWxob3N0JGFiY2QkZWZnaA==' - }, - auth: { - username: 'elastic', - password: 'changeme' - } -}) ----- - -|=== - - -=== Advanced configuration - -If you need to customize the client behavior heavily, you are in the right -place! The client allows you to customize the following internals: - -* `Transport` class -* `ConnectionPool` class -* `Connection` class -* `Serializer` class - - -=== `Transport` - -This class is responsible for performing the request to {es} and handling -errors, it also handles the sniffing. - -[source,js] ----- -const { Client, Transport } = require('@elastic/elasticsearch') - -class MyTransport extends Transport { - request (params, options, callback) { - // your code - } -} - -const client = new Client({ - Transport: MyTransport -}) ----- - -Sometimes you need to inject a small snippet of your code and then continue to -use the usual client code. In such cases, call `super.method`: - -[source,js] ----- -class MyTransport extends Transport { - request (params, options, callback) { - // your code - return super.request(params, options, callback) - } -} ----- - - -=== `ConnectionPool` - -This class is responsible for keeping in memory all the {es} Connection that we -are using. There is a single Connection for every node. The connection pool -handles the resurrection strategies and the updates of the pool. - -[source,js] ----- -const { Client, ConnectionPool } = require('@elastic/elasticsearch') - -class MyConnectionPool extends ConnectionPool { - markAlive (connection) { - // your code - super.markAlive(connection) - } -} - -const client = new Client({ - ConnectionPool: MyConnectionPool -}) ----- - - -=== `Connection` - -This class represents a single node, it holds every information we have on the -node, such as roles, id, URL, custom headers and so on. The actual HTTP request -is performed here, this means that if you want to swap the default HTTP client -(Node.js core), you should override the `request` method of this class. - -[source,js] ----- -const { Client, Connection } = require('@elastic/elasticsearch') - -class MyConnection extends Connection { - request (params, callback) { - // your code - } -} - -const client = new Client({ - Connection: MyConnection -}) ----- - - -=== `Serializer` - -This class is responsible for the serialization of every request, it offers the -following methods: - -* `serialize(object: any): string;` serializes request objects. -* `deserialize(json: string): any;` deserializes response strings. -* `ndserialize(array: any[]): string;` serializes bulk request objects. -* `qserialize(object: any): string;` serializes request query parameters. - -[source,js] ----- -const { Client, Serializer } = require('@elastic/elasticsearch') - -class MySerializer extends Serializer { - serialize (object) { - // your code - } -} - -const client = new Client({ - Serializer: MySerializer -}) ----- diff --git a/docs/doc_examples/00272f75a6afea91f8554ef7cda0c1f2.asciidoc b/docs/doc_examples/00272f75a6afea91f8554ef7cda0c1f2.asciidoc new file mode 100644 index 000000000..2bd8aa7df --- /dev/null +++ b/docs/doc_examples/00272f75a6afea91f8554ef7cda0c1f2.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.clearCachedRealms({ + realms: "default_file,ldap1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/004743b9c9f61588926ccf734696b713.asciidoc b/docs/doc_examples/004743b9c9f61588926ccf734696b713.asciidoc new file mode 100644 index 000000000..146e97702 --- /dev/null +++ b/docs/doc_examples/004743b9c9f61588926ccf734696b713.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.forcemerge({ + index: ".ds-my-data-stream-2099.03.07-000001", + max_num_segments: 1, +}); +console.log(response); +---- diff --git a/docs/doc_examples/004a17b42ab5155bb61da797a006fa9f.asciidoc b/docs/doc_examples/004a17b42ab5155bb61da797a006fa9f.asciidoc new file mode 100644 index 000000000..4c8ea77d9 --- /dev/null +++ b/docs/doc_examples/004a17b42ab5155bb61da797a006fa9f.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + pinned: { + ids: ["1", "4", "100"], + organic: { + match: { + description: "iphone", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/006e0e16c9f1da58c0bfe57377f7fc38.asciidoc b/docs/doc_examples/006e0e16c9f1da58c0bfe57377f7fc38.asciidoc new file mode 100644 index 000000000..f1d0b2224 --- /dev/null +++ b/docs/doc_examples/006e0e16c9f1da58c0bfe57377f7fc38.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "whitespace", + filter: ["stemmer"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/007179b5e241da650562a5f0a5007823.asciidoc b/docs/doc_examples/007179b5e241da650562a5f0a5007823.asciidoc new file mode 100644 index 000000000..7ef9728b5 --- /dev/null +++ b/docs/doc_examples/007179b5e241da650562a5f0a5007823.asciidoc @@ -0,0 +1,40 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.putWatch({ + id: "cluster_health_watch", + trigger: { + schedule: { + interval: "10s", + }, + }, + input: { + http: { + request: { + host: "localhost", + port: 9200, + path: "/_cluster/health", + }, + }, + }, + condition: { + compare: { + "ctx.payload.status": { + eq: "red", + }, + }, + }, + actions: { + send_email: { + email: { + to: "username@example.org", + subject: "Cluster Status Warning", + body: "Cluster status is RED", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/008ed823c89e703c447ac89c6b689833.asciidoc b/docs/doc_examples/008ed823c89e703c447ac89c6b689833.asciidoc new file mode 100644 index 000000000..9498fb4aa --- /dev/null +++ b/docs/doc_examples/008ed823c89e703c447ac89c6b689833.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.migration.postFeatureUpgrade(); +console.log(response); +---- diff --git a/docs/doc_examples/0091fc75271b1fbbd4269622a4881e8b.asciidoc b/docs/doc_examples/0091fc75271b1fbbd4269622a4881e8b.asciidoc new file mode 100644 index 000000000..af23c1cc6 --- /dev/null +++ b/docs/doc_examples/0091fc75271b1fbbd4269622a4881e8b.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + query: { + match: { + "http.clientip": "40.135.0.0", + }, + }, + fields: ["http.clientip"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/00ad41bde67beac991534ae0e04b1296.asciidoc b/docs/doc_examples/00ad41bde67beac991534ae0e04b1296.asciidoc new file mode 100644 index 000000000..aad48ff29 --- /dev/null +++ b/docs/doc_examples/00ad41bde67beac991534ae0e04b1296.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getDataStream({ + name: "my-data-stream", + filter_path: "data_streams.indices.index_name", +}); +console.log(response); +---- diff --git a/docs/doc_examples/00b3b6d76a368ae71277ea24af318693.asciidoc b/docs/doc_examples/00b3b6d76a368ae71277ea24af318693.asciidoc new file mode 100644 index 000000000..7f1e2c035 --- /dev/null +++ b/docs/doc_examples/00b3b6d76a368ae71277ea24af318693.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.shardStores(); +console.log(response); +---- diff --git a/docs/doc_examples/00c05aa931fc985985e3e21c93cf43ff.asciidoc b/docs/doc_examples/00c05aa931fc985985e3e21c93cf43ff.asciidoc new file mode 100644 index 000000000..63bfc32fe --- /dev/null +++ b/docs/doc_examples/00c05aa931fc985985e3e21c93cf43ff.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.renderSearchTemplate({ + source: '{ "query": {{#toJson}}my_query{{/toJson}} }', + params: { + my_query: { + match_all: {}, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/00d65f7b9daa1c6b18eedd8ace206bae.asciidoc b/docs/doc_examples/00d65f7b9daa1c6b18eedd8ace206bae.asciidoc new file mode 100644 index 000000000..36a79d081 --- /dev/null +++ b/docs/doc_examples/00d65f7b9daa1c6b18eedd8ace206bae.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: ["asciifolding"], + text: "açaí à la carte", +}); +console.log(response); +---- diff --git a/docs/doc_examples/00e0c964c79fcc1876ab957da2ffce82.asciidoc b/docs/doc_examples/00e0c964c79fcc1876ab957da2ffce82.asciidoc new file mode 100644 index 000000000..92160eef7 --- /dev/null +++ b/docs/doc_examples/00e0c964c79fcc1876ab957da2ffce82.asciidoc @@ -0,0 +1,67 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "italian_example", + settings: { + analysis: { + filter: { + italian_elision: { + type: "elision", + articles: [ + "c", + "l", + "all", + "dall", + "dell", + "nell", + "sull", + "coll", + "pell", + "gl", + "agl", + "dagl", + "degl", + "negl", + "sugl", + "un", + "m", + "t", + "s", + "v", + "d", + ], + articles_case: true, + }, + italian_stop: { + type: "stop", + stopwords: "_italian_", + }, + italian_keywords: { + type: "keyword_marker", + keywords: ["esempio"], + }, + italian_stemmer: { + type: "stemmer", + language: "light_italian", + }, + }, + analyzer: { + rebuilt_italian: { + tokenizer: "standard", + filter: [ + "italian_elision", + "lowercase", + "italian_stop", + "italian_keywords", + "italian_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/00fea15cbca83be9d5f1a024ff2ec708.asciidoc b/docs/doc_examples/00fea15cbca83be9d5f1a024ff2ec708.asciidoc new file mode 100644 index 000000000..09675e02d --- /dev/null +++ b/docs/doc_examples/00fea15cbca83be9d5f1a024ff2ec708.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "text_embedding", + inference_id: "my-e5-model", + inference_config: { + service: "elasticsearch", + service_settings: { + num_allocations: 1, + num_threads: 1, + model_id: ".multilingual-e5-small", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/010d5e901a2690fa7b2396edbe6cd463.asciidoc b/docs/doc_examples/010d5e901a2690fa7b2396edbe6cd463.asciidoc new file mode 100644 index 000000000..de83fe2e1 --- /dev/null +++ b/docs/doc_examples/010d5e901a2690fa7b2396edbe6cd463.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "my-data-stream-template", + index_patterns: ["my-data-stream*"], + data_stream: {}, + priority: 500, +}); +console.log(response); +---- diff --git a/docs/doc_examples/015294a400986295039e52ebc62033be.asciidoc b/docs/doc_examples/015294a400986295039e52ebc62033be.asciidoc deleted file mode 100644 index 324425149..000000000 --- a/docs/doc_examples/015294a400986295039e52ebc62033be.asciidoc +++ /dev/null @@ -1,18 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.update({ - index: 'test', - id: '1', - body: { - doc: { - name: 'new_name' - }, - detect_noop: false - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/015e6e6132b6d6d44bddb06bc3b316ed.asciidoc b/docs/doc_examples/015e6e6132b6d6d44bddb06bc3b316ed.asciidoc new file mode 100644 index 000000000..dc90ae673 --- /dev/null +++ b/docs/doc_examples/015e6e6132b6d6d44bddb06bc3b316ed.asciidoc @@ -0,0 +1,46 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "retrievers_example", + retriever: { + rrf: { + retrievers: [ + { + standard: { + query: { + range: { + year: { + gt: 2023, + }, + }, + }, + }, + }, + { + standard: { + query: { + term: { + topic: "elastic", + }, + }, + }, + }, + ], + rank_window_size: 10, + rank_constant: 1, + }, + }, + _source: false, + aggs: { + topics: { + terms: { + field: "topic", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0163af36c8472ac0c5160c8b716f5b26.asciidoc b/docs/doc_examples/0163af36c8472ac0c5160c8b716f5b26.asciidoc new file mode 100644 index 000000000..184d88118 --- /dev/null +++ b/docs/doc_examples/0163af36c8472ac0c5160c8b716f5b26.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + filter_path: "aggregations", + query: { + term: { + type: "t-shirt", + }, + }, + aggs: { + avg_price: { + avg: { + field: "price", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0165d22da5f2fc7678392b31d8eb5566.asciidoc b/docs/doc_examples/0165d22da5f2fc7678392b31d8eb5566.asciidoc new file mode 100644 index 000000000..279e91656 --- /dev/null +++ b/docs/doc_examples/0165d22da5f2fc7678392b31d8eb5566.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "rerank", + inference_id: "my-rerank-model", + inference_config: { + service: "cohere", + service_settings: { + model_id: "rerank-english-v3.0", + api_key: "{{COHERE_API_KEY}}", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/016f3147dae9ff2c3e831257ae470361.asciidoc b/docs/doc_examples/016f3147dae9ff2c3e831257ae470361.asciidoc new file mode 100644 index 000000000..0985ba535 --- /dev/null +++ b/docs/doc_examples/016f3147dae9ff2c3e831257ae470361.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.updateAliases({ + actions: [ + { + add: { + index: "logs-*", + alias: "logs", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/019e329ed5a930aef825266822e7377a.asciidoc b/docs/doc_examples/019e329ed5a930aef825266822e7377a.asciidoc new file mode 100644 index 000000000..1e0efb671 --- /dev/null +++ b/docs/doc_examples/019e329ed5a930aef825266822e7377a.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "asciifold_example", + settings: { + analysis: { + analyzer: { + standard_asciifolding: { + tokenizer: "standard", + filter: ["my_ascii_folding"], + }, + }, + filter: { + my_ascii_folding: { + type: "asciifolding", + preserve_original: true, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/01ae196538fac197eedbbf458a4ef31b.asciidoc b/docs/doc_examples/01ae196538fac197eedbbf458a4ef31b.asciidoc new file mode 100644 index 000000000..487139330 --- /dev/null +++ b/docs/doc_examples/01ae196538fac197eedbbf458a4ef31b.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, + }, + }, + mappings: { + properties: { + kwd: { + type: "keyword", + ignore_above: 3, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "idx", + id: 1, + document: { + kwd: ["foo", "foo", "bang", "bar", "baz"], + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/01b23f09d2b7f140faf649eadbbf3ac3.asciidoc b/docs/doc_examples/01b23f09d2b7f140faf649eadbbf3ac3.asciidoc new file mode 100644 index 000000000..2752b8336 --- /dev/null +++ b/docs/doc_examples/01b23f09d2b7f140faf649eadbbf3ac3.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putComponentTemplate({ + name: "component_template1", + template: { + mappings: { + properties: { + "@timestamp": { + type: "date", + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.cluster.putComponentTemplate({ + name: "runtime_component_template", + template: { + mappings: { + runtime: { + day_of_week: { + type: "keyword", + script: { + source: + "emit(doc['@timestamp'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ENGLISH))", + }, + }, + }, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/01bc0f2ed30eb3dd23511d01ce0ac6e1.asciidoc b/docs/doc_examples/01bc0f2ed30eb3dd23511d01ce0ac6e1.asciidoc new file mode 100644 index 000000000..0a9693fe3 --- /dev/null +++ b/docs/doc_examples/01bc0f2ed30eb3dd23511d01ce0ac6e1.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.startTransform({ + transform_id: "ecommerce_transform", +}); +console.log(response); +---- diff --git a/docs/doc_examples/01cd0ea360282a2c591a366679d7187d.asciidoc b/docs/doc_examples/01cd0ea360282a2c591a366679d7187d.asciidoc new file mode 100644 index 000000000..71f3092da --- /dev/null +++ b/docs/doc_examples/01cd0ea360282a2c591a366679d7187d.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.tasks.list({ + human: "true", + detailed: "true", + actions: "indices:data/write/bulk", +}); +console.log(response); +---- diff --git a/docs/doc_examples/01da9e0620e48270617fc248e6415cac.asciidoc b/docs/doc_examples/01da9e0620e48270617fc248e6415cac.asciidoc new file mode 100644 index 000000000..e243fb4f9 --- /dev/null +++ b/docs/doc_examples/01da9e0620e48270617fc248e6415cac.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + aggs: { + "my-agg-name": { + terms: { + field: "my-field", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/01dc7bdc223bd651574ed2d3954a5b1c.asciidoc b/docs/doc_examples/01dc7bdc223bd651574ed2d3954a5b1c.asciidoc new file mode 100644 index 000000000..89f99c966 --- /dev/null +++ b/docs/doc_examples/01dc7bdc223bd651574ed2d3954a5b1c.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.executeWatch({ + id: "my_watch", +}); +console.log(response); +---- diff --git a/docs/doc_examples/01f50acf7998b24969f451e922d145eb.asciidoc b/docs/doc_examples/01f50acf7998b24969f451e922d145eb.asciidoc new file mode 100644 index 000000000..d92c8bdfe --- /dev/null +++ b/docs/doc_examples/01f50acf7998b24969f451e922d145eb.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "basque_example", + settings: { + analysis: { + filter: { + basque_stop: { + type: "stop", + stopwords: "_basque_", + }, + basque_keywords: { + type: "keyword_marker", + keywords: ["Adibidez"], + }, + basque_stemmer: { + type: "stemmer", + language: "basque", + }, + }, + analyzer: { + rebuilt_basque: { + tokenizer: "standard", + filter: [ + "lowercase", + "basque_stop", + "basque_keywords", + "basque_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/020c95db88ef356093f03be84893ddf9.asciidoc b/docs/doc_examples/020c95db88ef356093f03be84893ddf9.asciidoc new file mode 100644 index 000000000..0dc8775df --- /dev/null +++ b/docs/doc_examples/020c95db88ef356093f03be84893ddf9.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.followStats({ + index: "", +}); +console.log(response); +---- diff --git a/docs/doc_examples/020de6b6cb960a76297452725a38889f.asciidoc b/docs/doc_examples/020de6b6cb960a76297452725a38889f.asciidoc new file mode 100644 index 000000000..7fb6cecdf --- /dev/null +++ b/docs/doc_examples/020de6b6cb960a76297452725a38889f.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + has_child: { + type: "child", + query: { + match_all: {}, + }, + max_children: 10, + min_children: 2, + score_mode: "min", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0246f73cc2ed3dfec577119e8cd15404.asciidoc b/docs/doc_examples/0246f73cc2ed3dfec577119e8cd15404.asciidoc new file mode 100644 index 000000000..78f7c8a6c --- /dev/null +++ b/docs/doc_examples/0246f73cc2ed3dfec577119e8cd15404.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-index-000001", + properties: { + name: { + properties: { + last: { + type: "text", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/025155da86802ebf4c3aeee5aab692f9.asciidoc b/docs/doc_examples/025155da86802ebf4c3aeee5aab692f9.asciidoc new file mode 100644 index 000000000..f41e9eb7a --- /dev/null +++ b/docs/doc_examples/025155da86802ebf4c3aeee5aab692f9.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "sales", + mappings: { + properties: { + tags: { + type: "keyword", + }, + comments: { + type: "nested", + properties: { + username: { + type: "keyword", + }, + comment: { + type: "text", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/02520ac7816b2c4cf8fb413fd16122f2.asciidoc b/docs/doc_examples/02520ac7816b2c4cf8fb413fd16122f2.asciidoc new file mode 100644 index 000000000..6349c1f6e --- /dev/null +++ b/docs/doc_examples/02520ac7816b2c4cf8fb413fd16122f2.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.flushJob({ + job_id: "low_request_rate", + calc_interim: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0264e994a7e68561e2ca6be0f0d90ee9.asciidoc b/docs/doc_examples/0264e994a7e68561e2ca6be0f0d90ee9.asciidoc new file mode 100644 index 000000000..6be1d4da8 --- /dev/null +++ b/docs/doc_examples/0264e994a7e68561e2ca6be0f0d90ee9.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + JapaneseCars: { + terms: { + field: "make", + include: ["mazda", "honda"], + }, + }, + ActiveCarManufacturers: { + terms: { + field: "make", + exclude: ["rover", "jensen"], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0280247e0cf2e561c548f22c9fb31163.asciidoc b/docs/doc_examples/0280247e0cf2e561c548f22c9fb31163.asciidoc new file mode 100644 index 000000000..b2677e6bc --- /dev/null +++ b/docs/doc_examples/0280247e0cf2e561c548f22c9fb31163.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.invalidateToken({ + username: "myuser", +}); +console.log(response); +---- diff --git a/docs/doc_examples/02853293a5b7cd9cc7a886eb413bbeb6.asciidoc b/docs/doc_examples/02853293a5b7cd9cc7a886eb413bbeb6.asciidoc new file mode 100644 index 000000000..f6abfb4c5 --- /dev/null +++ b/docs/doc_examples/02853293a5b7cd9cc7a886eb413bbeb6.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "keyword", + char_filter: [ + { + type: "mapping", + mappings: [ + "٠ => 0", + "١ => 1", + "٢ => 2", + "٣ => 3", + "٤ => 4", + "٥ => 5", + "٦ => 6", + "٧ => 7", + "٨ => 8", + "٩ => 9", + ], + }, + ], + text: "My license plate is ٢٥٠١٥", +}); +console.log(response); +---- diff --git a/docs/doc_examples/028f6d6ac2594e20b78b8a8f8cbad49d.asciidoc b/docs/doc_examples/028f6d6ac2594e20b78b8a8f8cbad49d.asciidoc deleted file mode 100644 index d5e9d1d71..000000000 --- a/docs/doc_examples/028f6d6ac2594e20b78b8a8f8cbad49d.asciidoc +++ /dev/null @@ -1,43 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - aggs: { - countries: { - terms: { - field: 'artist.country', - order: [ - { - 'rock>playback_stats.avg': 'desc' - }, - { - _count: 'desc' - } - ] - }, - aggs: { - rock: { - filter: { - term: { - genre: 'rock' - } - }, - aggs: { - playback_stats: { - stats: { - field: 'play_count' - } - } - } - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/029de2f5383a42e1ac4ca1565bd2a130.asciidoc b/docs/doc_examples/029de2f5383a42e1ac4ca1565bd2a130.asciidoc new file mode 100644 index 000000000..18aa9dece --- /dev/null +++ b/docs/doc_examples/029de2f5383a42e1ac4ca1565bd2a130.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + full_name: { + type: "text", + index_prefixes: { + min_chars: 1, + max_chars: 10, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/02b00f21e9d23d82276ace0dd154d779.asciidoc b/docs/doc_examples/02b00f21e9d23d82276ace0dd154d779.asciidoc new file mode 100644 index 000000000..3619352ce --- /dev/null +++ b/docs/doc_examples/02b00f21e9d23d82276ace0dd154d779.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + routing: "user1,user2", + query: { + match: { + title: "document", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/02b6aa3e5652839f03de3a655854b897.asciidoc b/docs/doc_examples/02b6aa3e5652839f03de3a655854b897.asciidoc new file mode 100644 index 000000000..fad61ac26 --- /dev/null +++ b/docs/doc_examples/02b6aa3e5652839f03de3a655854b897.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-data-stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/02c48d461536709c3fc8a0e8147c3787.asciidoc b/docs/doc_examples/02c48d461536709c3fc8a0e8147c3787.asciidoc new file mode 100644 index 000000000..dec8449bb --- /dev/null +++ b/docs/doc_examples/02c48d461536709c3fc8a0e8147c3787.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "pipelineB", + description: "outer pipeline", + processors: [ + { + pipeline: { + name: "pipelineA", + }, + }, + { + set: { + field: "outer_pipeline_set", + value: "outer", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/02f65c6bab8f40bf3ce18160623d1870.asciidoc b/docs/doc_examples/02f65c6bab8f40bf3ce18160623d1870.asciidoc new file mode 100644 index 000000000..ab8ea60ad --- /dev/null +++ b/docs/doc_examples/02f65c6bab8f40bf3ce18160623d1870.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getTemplate({ + name: "template_1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/02fad6b80bb29c2a7e6840db2fc67b18.asciidoc b/docs/doc_examples/02fad6b80bb29c2a7e6840db2fc67b18.asciidoc new file mode 100644 index 000000000..e5501afe9 --- /dev/null +++ b/docs/doc_examples/02fad6b80bb29c2a7e6840db2fc67b18.asciidoc @@ -0,0 +1,38 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + my_wildcard: { + type: "wildcard", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + my_wildcard: "This string can be quite lengthy", + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + query: { + wildcard: { + my_wildcard: { + value: "*quite*lengthy", + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/0308cbd85281f95fc458042afe3f587d.asciidoc b/docs/doc_examples/0308cbd85281f95fc458042afe3f587d.asciidoc new file mode 100644 index 000000000..07249ec05 --- /dev/null +++ b/docs/doc_examples/0308cbd85281f95fc458042afe3f587d.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.get({ + index: "my-index-000001", + id: 0, + _source: "*.id", +}); +console.log(response); +---- diff --git a/docs/doc_examples/032eac56b798bea29390e102538f4a26.asciidoc b/docs/doc_examples/032eac56b798bea29390e102538f4a26.asciidoc new file mode 100644 index 000000000..936e558a2 --- /dev/null +++ b/docs/doc_examples/032eac56b798bea29390e102538f4a26.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.refresh({ + index: "my-index-000001,my-index-000002", +}); +console.log(response); +---- diff --git a/docs/doc_examples/033778305d52746f5ce0a2a922c8e521.asciidoc b/docs/doc_examples/033778305d52746f5ce0a2a922c8e521.asciidoc deleted file mode 100644 index 0bfc9f6e1..000000000 --- a/docs/doc_examples/033778305d52746f5ce0a2a922c8e521.asciidoc +++ /dev/null @@ -1,22 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - aggs: { - genres: { - terms: { - script: { - source: "doc['genre'].value", - lang: 'painless' - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/033838729cfb5d1a28d04f69ee78d924.asciidoc b/docs/doc_examples/033838729cfb5d1a28d04f69ee78d924.asciidoc new file mode 100644 index 000000000..1f4149800 --- /dev/null +++ b/docs/doc_examples/033838729cfb5d1a28d04f69ee78d924.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: { + type: "Polygon", + orientation: "LEFT", + coordinates: [ + [ + [-177, 10], + [176, 15], + [172, 0], + [176, -15], + [-177, -10], + [-177, 10], + ], + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0350410d11579f4e876c798ce1eaef5b.asciidoc b/docs/doc_examples/0350410d11579f4e876c798ce1eaef5b.asciidoc new file mode 100644 index 000000000..39af7ac7a --- /dev/null +++ b/docs/doc_examples/0350410d11579f4e876c798ce1eaef5b.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 5, + refresh: "true", + document: { + query: { + bool: { + should: [ + { + match: { + message: { + query: "Japanese art", + _name: "query1", + }, + }, + }, + { + match: { + message: { + query: "Holand culture", + _name: "query2", + }, + }, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0350ff5ebb8207c004eb771088339cb4.asciidoc b/docs/doc_examples/0350ff5ebb8207c004eb771088339cb4.asciidoc new file mode 100644 index 000000000..4962104be --- /dev/null +++ b/docs/doc_examples/0350ff5ebb8207c004eb771088339cb4.asciidoc @@ -0,0 +1,38 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "example-index", + retriever: { + rrf: { + retrievers: [ + { + standard: { + query: { + term: { + text: "blue shoes sale", + }, + }, + }, + }, + { + standard: { + query: { + sparse_vector: { + field: "ml.tokens", + inference_id: "my_elser_model", + query: "What blue shoes are on sale?", + }, + }, + }, + }, + ], + rank_window_size: 50, + rank_constant: 20, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/03582fc93683e573062bcfda45e01d69.asciidoc b/docs/doc_examples/03582fc93683e573062bcfda45e01d69.asciidoc new file mode 100644 index 000000000..acf03f1b5 --- /dev/null +++ b/docs/doc_examples/03582fc93683e573062bcfda45e01d69.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_custom_analyzer: { + type: "custom", + tokenizer: "standard", + char_filter: ["html_strip"], + filter: ["lowercase", "asciifolding"], + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.analyze({ + index: "my-index-000001", + analyzer: "my_custom_analyzer", + text: "Is this déjà vu?", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/035a7a919eb6513b4769a3727b7d6447.asciidoc b/docs/doc_examples/035a7a919eb6513b4769a3727b7d6447.asciidoc new file mode 100644 index 000000000..4c4a3671a --- /dev/null +++ b/docs/doc_examples/035a7a919eb6513b4769a3727b7d6447.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + analyzer: "whitespace", + text: "The quick brown fox.", +}); +console.log(response); +---- diff --git a/docs/doc_examples/03891265df2111a38e0b6b24c1b967e1.asciidoc b/docs/doc_examples/03891265df2111a38e0b6b24c1b967e1.asciidoc new file mode 100644 index 000000000..d0da136f7 --- /dev/null +++ b/docs/doc_examples/03891265df2111a38e0b6b24c1b967e1.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getServiceAccounts(); +console.log(response); +---- diff --git a/docs/doc_examples/03b1d76fa0b773d5b7d74ecb7e1e1a80.asciidoc b/docs/doc_examples/03b1d76fa0b773d5b7d74ecb7e1e1a80.asciidoc new file mode 100644 index 000000000..23fad0184 --- /dev/null +++ b/docs/doc_examples/03b1d76fa0b773d5b7d74ecb7e1e1a80.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.restore({ + repository: "my_repository", + snapshot: "my_snapshot_2099.05.06", + indices: "my-index,logs-my_app-default", + rename_pattern: "(.+)", + rename_replacement: "restored-$1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/03c4b815bf1e6a8c5cfcc6ddf94bc093.asciidoc b/docs/doc_examples/03c4b815bf1e6a8c5cfcc6ddf94bc093.asciidoc new file mode 100644 index 000000000..7e6d9a983 --- /dev/null +++ b/docs/doc_examples/03c4b815bf1e6a8c5cfcc6ddf94bc093.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.query({ + format: "txt", + query: "SELECT * FROM library ORDER BY page_count DESC LIMIT 5", +}); +console.log(response); +---- diff --git a/docs/doc_examples/04412d11783dac25b5fd2ec5407078a3.asciidoc b/docs/doc_examples/04412d11783dac25b5fd2ec5407078a3.asciidoc new file mode 100644 index 000000000..fbefc580b --- /dev/null +++ b/docs/doc_examples/04412d11783dac25b5fd2ec5407078a3.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.connector.updateApiKeyId({ + connector_id: "my-connector", + api_key_id: "my-api-key-id", + api_key_secret_id: "my-connector-secret-id", +}); +console.log(response); +---- diff --git a/docs/doc_examples/044b2f99e7438e408685b258db17f863.asciidoc b/docs/doc_examples/044b2f99e7438e408685b258db17f863.asciidoc new file mode 100644 index 000000000..577720dbb --- /dev/null +++ b/docs/doc_examples/044b2f99e7438e408685b258db17f863.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-data-stream", + query: '\n process where process.name == "regsvr32.exe"\n ', + size: 50, +}); +console.log(response); +---- diff --git a/docs/doc_examples/046b2249bbc49e77848c114cee940f17.asciidoc b/docs/doc_examples/046b2249bbc49e77848c114cee940f17.asciidoc new file mode 100644 index 000000000..7541cfe0d --- /dev/null +++ b/docs/doc_examples/046b2249bbc49e77848c114cee940f17.asciidoc @@ -0,0 +1,52 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + retriever: { + rrf: { + retrievers: [ + { + standard: { + query: { + multi_match: { + query: "How is the weather in Jamaica?", + fields: ["title", "description"], + }, + }, + }, + }, + { + standard: { + query: { + text_expansion: { + "ml.inference.title_expanded.predicted_value": { + model_id: ".elser_model_2", + model_text: "How is the weather in Jamaica?", + }, + }, + }, + }, + }, + { + standard: { + query: { + text_expansion: { + "ml.inference.description_expanded.predicted_value": { + model_id: ".elser_model_2", + model_text: "How is the weather in Jamaica?", + }, + }, + }, + }, + }, + ], + window_size: 10, + rank_constant: 20, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0470d7101637568b9d3d1239f06325a7.asciidoc b/docs/doc_examples/0470d7101637568b9d3d1239f06325a7.asciidoc new file mode 100644 index 000000000..0e0773a12 --- /dev/null +++ b/docs/doc_examples/0470d7101637568b9d3d1239f06325a7.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_internal/desired_nodes/<history_id>/<version>", + body: { + nodes: [ + { + settings: { + "node.name": "instance-000187", + "node.external_id": "instance-000187", + "node.roles": ["data_hot", "master"], + "node.attr.data": "hot", + "node.attr.logical_availability_zone": "zone-0", + }, + processors: 8, + memory: "58gb", + storage: "2tb", + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/047266b0d20fdb62ebc72d51952c8f6d.asciidoc b/docs/doc_examples/047266b0d20fdb62ebc72d51952c8f6d.asciidoc index ae2d02e7e..6358e9f97 100644 --- a/docs/doc_examples/047266b0d20fdb62ebc72d51952c8f6d.asciidoc +++ b/docs/doc_examples/047266b0d20fdb62ebc72d51952c8f6d.asciidoc @@ -4,20 +4,14 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - multi_match: { - query: 'Will Smith', - type: 'cross_fields', - fields: [ - 'first_name', - 'last_name' - ], - operator: 'and' - } - } - } -}) -console.log(response) + query: { + multi_match: { + query: "Will Smith", + type: "cross_fields", + fields: ["first_name", "last_name"], + operator: "and", + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/048652b6abfe195da8ea8cef10ee01b1.asciidoc b/docs/doc_examples/048652b6abfe195da8ea8cef10ee01b1.asciidoc new file mode 100644 index 000000000..9ab89027c --- /dev/null +++ b/docs/doc_examples/048652b6abfe195da8ea8cef10ee01b1.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.resetTransform({ + transform_id: "ecommerce_transform", +}); +console.log(response); +---- diff --git a/docs/doc_examples/048d8abd42d094bbdcf4452a58ccb35b.asciidoc b/docs/doc_examples/048d8abd42d094bbdcf4452a58ccb35b.asciidoc deleted file mode 100644 index 42b87e0bc..000000000 --- a/docs/doc_examples/048d8abd42d094bbdcf4452a58ccb35b.asciidoc +++ /dev/null @@ -1,17 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.create({ - index: 'twitter', - id: '1', - body: { - user: 'kimchy', - post_date: '2009-11-15T14:12:12', - message: 'trying out Elasticsearch' - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/04d586a536061ec1045d0bb2dc3d1a5f.asciidoc b/docs/doc_examples/04d586a536061ec1045d0bb2dc3d1a5f.asciidoc new file mode 100644 index 000000000..ffb79faa8 --- /dev/null +++ b/docs/doc_examples/04d586a536061ec1045d0bb2dc3d1a5f.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "set_os", + description: "sets the value of host.os.name from the field os", + processors: [ + { + set: { + field: "host.os.name", + value: "{{{os}}}", + }, + }, + ], +}); +console.log(response); + +const response1 = await client.ingest.simulate({ + id: "set_os", + docs: [ + { + _source: { + os: "Ubuntu", + }, + }, + ], +}); +console.log(response1); +---- diff --git a/docs/doc_examples/04d6ce0c903bd468afbecd3aa1c4a78a.asciidoc b/docs/doc_examples/04d6ce0c903bd468afbecd3aa1c4a78a.asciidoc new file mode 100644 index 000000000..5496c5a44 --- /dev/null +++ b/docs/doc_examples/04d6ce0c903bd468afbecd3aa1c4a78a.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "my-pipeline-id", + description: "My optional pipeline description", + processors: [ + { + set: { + description: "My optional processor description", + field: "my-keyword-field", + value: "foo", + }, + }, + ], + _meta: { + reason: "set my-keyword-field to foo", + serialization: { + class: "MyPipeline", + id: 10, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/04de2e3a9c00c2056b07bf9cf9e63a99.asciidoc b/docs/doc_examples/04de2e3a9c00c2056b07bf9cf9e63a99.asciidoc new file mode 100644 index 000000000..00ef08a92 --- /dev/null +++ b/docs/doc_examples/04de2e3a9c00c2056b07bf9cf9e63a99.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "text_embedding", + inference_id: "google_vertex_ai_embeddings", + inference_config: { + service: "googlevertexai", + service_settings: { + service_account_json: "", + model_id: "", + location: "", + project_id: "", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/04f5dd677c777bcb15d7d5fa63275fc8.asciidoc b/docs/doc_examples/04f5dd677c777bcb15d7d5fa63275fc8.asciidoc index d7a06bce5..2622f6cef 100644 --- a/docs/doc_examples/04f5dd677c777bcb15d7d5fa63275fc8.asciidoc +++ b/docs/doc_examples/04f5dd677c777bcb15d7d5fa63275fc8.asciidoc @@ -4,9 +4,8 @@ [source, js] ---- const response = await client.cluster.health({ - wait_for_status: 'yellow', - timeout: '50s' -}) -console.log(response) + wait_for_status: "yellow", + timeout: "50s", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/04fe1e3a0047b0cdb10987b79fc3f3f3.asciidoc b/docs/doc_examples/04fe1e3a0047b0cdb10987b79fc3f3f3.asciidoc deleted file mode 100644 index 12bf2c8fa..000000000 --- a/docs/doc_examples/04fe1e3a0047b0cdb10987b79fc3f3f3.asciidoc +++ /dev/null @@ -1,30 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - term: { - user: 'kimchy' - } - }, - sort: { - _script: { - type: 'number', - script: { - lang: 'painless', - source: "doc['field_name'].value * params.factor", - params: { - factor: 1.1 - } - }, - order: 'asc' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/0502284d4685c478eb68761f979f4303.asciidoc b/docs/doc_examples/0502284d4685c478eb68761f979f4303.asciidoc new file mode 100644 index 000000000..35597e672 --- /dev/null +++ b/docs/doc_examples/0502284d4685c478eb68761f979f4303.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.evaluateDataFrame({ + index: "house_price_predictions", + query: { + bool: { + filter: [ + { + term: { + "ml.is_training": false, + }, + }, + ], + }, + }, + evaluation: { + regression: { + actual_field: "price", + predicted_field: "ml.price_prediction", + metrics: { + r_squared: {}, + mse: {}, + msle: { + offset: 10, + }, + huber: { + delta: 1.5, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/050b3947025fee403232b8e6e9112dab.asciidoc b/docs/doc_examples/050b3947025fee403232b8e6e9112dab.asciidoc new file mode 100644 index 000000000..7e17a91ea --- /dev/null +++ b/docs/doc_examples/050b3947025fee403232b8e6e9112dab.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.query({ + format: "yaml", + query: "SELECT * FROM library ORDER BY page_count DESC", + fetch_size: 5, +}); +console.log(response); +---- diff --git a/docs/doc_examples/05148cc541f447486d9daf15ab77292b.asciidoc b/docs/doc_examples/05148cc541f447486d9daf15ab77292b.asciidoc new file mode 100644 index 000000000..cebe6e3bd --- /dev/null +++ b/docs/doc_examples/05148cc541f447486d9daf15ab77292b.asciidoc @@ -0,0 +1,54 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "logs", + policy: { + phases: { + hot: { + actions: { + rollover: { + max_primary_shard_size: "50gb", + }, + }, + }, + warm: { + min_age: "30d", + actions: { + shrink: { + number_of_shards: 1, + }, + forcemerge: { + max_num_segments: 1, + }, + }, + }, + cold: { + min_age: "60d", + actions: { + searchable_snapshot: { + snapshot_repository: "found-snapshots", + }, + }, + }, + frozen: { + min_age: "90d", + actions: { + searchable_snapshot: { + snapshot_repository: "found-snapshots", + }, + }, + }, + delete: { + min_age: "735d", + actions: { + delete: {}, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0518c673094fb18ecb491a3b78af4695.asciidoc b/docs/doc_examples/0518c673094fb18ecb491a3b78af4695.asciidoc new file mode 100644 index 000000000..1b3f88827 --- /dev/null +++ b/docs/doc_examples/0518c673094fb18ecb491a3b78af4695.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + warm: { + actions: { + allocate: { + include: { + box_type: "hot,warm", + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/05284c8ea91769c09c8db47db8a6629a.asciidoc b/docs/doc_examples/05284c8ea91769c09c8db47db8a6629a.asciidoc new file mode 100644 index 000000000..1017e8824 --- /dev/null +++ b/docs/doc_examples/05284c8ea91769c09c8db47db8a6629a.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.repositories({ + v: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/053497b6960f80fd7b005b7c6d54358f.asciidoc b/docs/doc_examples/053497b6960f80fd7b005b7c6d54358f.asciidoc new file mode 100644 index 000000000..5a4d4ee7a --- /dev/null +++ b/docs/doc_examples/053497b6960f80fd7b005b7c6d54358f.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + delete: { + actions: { + delete: {}, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/05500e77aef581d92f6c605f7a48f7df.asciidoc b/docs/doc_examples/05500e77aef581d92f6c605f7a48f7df.asciidoc new file mode 100644 index 000000000..80f74e73e --- /dev/null +++ b/docs/doc_examples/05500e77aef581d92f6c605f7a48f7df.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: { + type: "polygon", + coordinates: [ + [ + [1000, -1001], + [1001, -1001], + [1001, -1000], + [1000, -1000], + [1000, -1001], + ], + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/059e04aaf093379401f665c33ac796dc.asciidoc b/docs/doc_examples/059e04aaf093379401f665c33ac796dc.asciidoc new file mode 100644 index 000000000..44a0e488d --- /dev/null +++ b/docs/doc_examples/059e04aaf093379401f665c33ac796dc.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "whitespace", + filter: [ + { + type: "keyword_marker", + keywords: ["jumping"], + }, + "stemmer", + ], + text: "fox running and jumping", + explain: true, + attributes: "keyword", +}); +console.log(response); +---- diff --git a/docs/doc_examples/05a09078fe1016e900e445ad4039cf97.asciidoc b/docs/doc_examples/05a09078fe1016e900e445ad4039cf97.asciidoc new file mode 100644 index 000000000..791b13f13 --- /dev/null +++ b/docs/doc_examples/05a09078fe1016e900e445ad4039cf97.asciidoc @@ -0,0 +1,78 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "clientips", + mappings: { + properties: { + client_ip: { + type: "keyword", + }, + env: { + type: "keyword", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "clientips", + operations: [ + { + index: {}, + }, + { + client_ip: "172.21.0.5", + env: "Development", + }, + { + index: {}, + }, + { + client_ip: "172.21.2.113", + env: "QA", + }, + { + index: {}, + }, + { + client_ip: "172.21.2.162", + env: "QA", + }, + { + index: {}, + }, + { + client_ip: "172.21.3.15", + env: "Production", + }, + { + index: {}, + }, + { + client_ip: "172.21.3.16", + env: "Production", + }, + ], +}); +console.log(response1); + +const response2 = await client.enrich.putPolicy({ + name: "clientip_policy", + match: { + indices: "clientips", + match_field: "client_ip", + enrich_fields: ["env"], + }, +}); +console.log(response2); + +const response3 = await client.enrich.executePolicy({ + name: "clientip_policy", + wait_for_completion: "false", +}); +console.log(response3); +---- diff --git a/docs/doc_examples/05ba0fdd0215e313ecea8a2f8f5a43b4.asciidoc b/docs/doc_examples/05ba0fdd0215e313ecea8a2f8f5a43b4.asciidoc new file mode 100644 index 000000000..cdd215727 --- /dev/null +++ b/docs/doc_examples/05ba0fdd0215e313ecea8a2f8f5a43b4.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getDataStream({ + name: "my-data-stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/05bee3adf46b9d6a2fef96c51bf958da.asciidoc b/docs/doc_examples/05bee3adf46b9d6a2fef96c51bf958da.asciidoc new file mode 100644 index 000000000..39c360520 --- /dev/null +++ b/docs/doc_examples/05bee3adf46b9d6a2fef96c51bf958da.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "click_role", + indices: [ + { + names: ["events-*"], + privileges: ["read"], + query: { + match: { + category: "click", + }, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/05e637284bc3bedd46e0b7c26ad983c4.asciidoc b/docs/doc_examples/05e637284bc3bedd46e0b7c26ad983c4.asciidoc new file mode 100644 index 000000000..1b9967b11 --- /dev/null +++ b/docs/doc_examples/05e637284bc3bedd46e0b7c26ad983c4.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "alibabacloud_ai_search_embeddings_pipeline", + processors: [ + { + inference: { + model_id: "alibabacloud_ai_search_embeddings", + input_output: { + input_field: "content", + output_field: "content_embedding", + }, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/05f4a4b284f68f7fb13603d7cd854083.asciidoc b/docs/doc_examples/05f4a4b284f68f7fb13603d7cd854083.asciidoc new file mode 100644 index 000000000..d80e2c6ab --- /dev/null +++ b/docs/doc_examples/05f4a4b284f68f7fb13603d7cd854083.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "logs-my_app-default", + settings: { + index: { + lifecycle: { + name: "new-lifecycle-policy", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/05f6049c677a156bdf9b83e71a3b87ed.asciidoc b/docs/doc_examples/05f6049c677a156bdf9b83e71a3b87ed.asciidoc new file mode 100644 index 000000000..ceb726560 --- /dev/null +++ b/docs/doc_examples/05f6049c677a156bdf9b83e71a3b87ed.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ssl.certificates(); +console.log(response); +---- diff --git a/docs/doc_examples/0601b5cb5328c9ebff30f4be1b210f93.asciidoc b/docs/doc_examples/0601b5cb5328c9ebff30f4be1b210f93.asciidoc new file mode 100644 index 000000000..709f0fadb --- /dev/null +++ b/docs/doc_examples/0601b5cb5328c9ebff30f4be1b210f93.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.status({ + repository: "my_repository", + snapshot: "snapshot_2", +}); +console.log(response); +---- diff --git a/docs/doc_examples/060a56477e39f272fc5a9cfe47443cf1.asciidoc b/docs/doc_examples/060a56477e39f272fc5a9cfe47443cf1.asciidoc new file mode 100644 index 000000000..7f41907ea --- /dev/null +++ b/docs/doc_examples/060a56477e39f272fc5a9cfe47443cf1.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "my_tokenizer", + }, + }, + tokenizer: { + my_tokenizer: { + type: "simple_pattern", + pattern: "[0123456789]{3}", + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.analyze({ + index: "my-index-000001", + analyzer: "my_analyzer", + text: "fd-786-335-514-x", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/0620a10ff15a2bb3eb489afc24ff0131.asciidoc b/docs/doc_examples/0620a10ff15a2bb3eb489afc24ff0131.asciidoc new file mode 100644 index 000000000..6d337053c --- /dev/null +++ b/docs/doc_examples/0620a10ff15a2bb3eb489afc24ff0131.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + size: "surprise_me", +}); +console.log(response); +---- diff --git a/docs/doc_examples/06454a8e85e2d3479c90390bb955eb39.asciidoc b/docs/doc_examples/06454a8e85e2d3479c90390bb955eb39.asciidoc new file mode 100644 index 000000000..5c103b7f9 --- /dev/null +++ b/docs/doc_examples/06454a8e85e2d3479c90390bb955eb39.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.get({ + repository: "my_repository", + snapshot: "snapshot*,-snapshot_3", + sort: "name", +}); +console.log(response); +---- diff --git a/docs/doc_examples/066e0bdcdfa3b8afa5d1e5777f73fccb.asciidoc b/docs/doc_examples/066e0bdcdfa3b8afa5d1e5777f73fccb.asciidoc new file mode 100644 index 000000000..ebf376463 --- /dev/null +++ b/docs/doc_examples/066e0bdcdfa3b8afa5d1e5777f73fccb.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.rollover({ + alias: "my-alias", + conditions: { + max_age: "7d", + max_docs: 1000, + max_primary_shard_size: "50gb", + max_primary_shard_docs: "2000", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/069030e5f43d8f8ce3e3eca40205027e.asciidoc b/docs/doc_examples/069030e5f43d8f8ce3e3eca40205027e.asciidoc new file mode 100644 index 000000000..083a9bb5a --- /dev/null +++ b/docs/doc_examples/069030e5f43d8f8ce3e3eca40205027e.asciidoc @@ -0,0 +1,58 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + manager: { + properties: { + age: { + type: "integer", + }, + name: { + type: "text", + }, + }, + }, + employees: { + type: "nested", + properties: { + age: { + type: "integer", + }, + name: { + type: "text", + }, + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + region: "US", + manager: { + name: "Alice White", + age: 30, + }, + employees: [ + { + name: "John Smith", + age: 34, + }, + { + name: "Peter Brown", + age: 26, + }, + ], + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/06a761823a694850a6efe5d5bf61478c.asciidoc b/docs/doc_examples/06a761823a694850a6efe5d5bf61478c.asciidoc new file mode 100644 index 000000000..e5aaf6495 --- /dev/null +++ b/docs/doc_examples/06a761823a694850a6efe5d5bf61478c.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.enrich.putPolicy({ + name: "users-policy", + match: { + indices: "users", + match_field: "email", + enrich_fields: ["first_name", "last_name", "city", "zip", "state"], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/06afce2955f9094d96d27067ebca32e8.asciidoc b/docs/doc_examples/06afce2955f9094d96d27067ebca32e8.asciidoc deleted file mode 100644 index b60b6820d..000000000 --- a/docs/doc_examples/06afce2955f9094d96d27067ebca32e8.asciidoc +++ /dev/null @@ -1,48 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - bool: { - must: { - term: { - user: 'kimchy' - } - }, - filter: { - term: { - tag: 'tech' - } - }, - must_not: { - range: { - age: { - gte: 10, - lte: 20 - } - } - }, - should: [ - { - term: { - tag: 'wow' - } - }, - { - term: { - tag: 'elasticsearch' - } - } - ], - minimum_should_match: 1, - boost: 1 - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/06b5d3d56c4d4e3b61ae42ea26401c40.asciidoc b/docs/doc_examples/06b5d3d56c4d4e3b61ae42ea26401c40.asciidoc new file mode 100644 index 000000000..57334617a --- /dev/null +++ b/docs/doc_examples/06b5d3d56c4d4e3b61ae42ea26401c40.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.msearch({ + index: "my-index-000001", + searches: [ + {}, + { + query: { + match: { + message: "this is a test", + }, + }, + }, + { + index: "my-index-000002", + }, + { + query: { + match_all: {}, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/06c0db0f42223761e32fa418066b275f.asciidoc b/docs/doc_examples/06c0db0f42223761e32fa418066b275f.asciidoc new file mode 100644 index 000000000..2ee03c83d --- /dev/null +++ b/docs/doc_examples/06c0db0f42223761e32fa418066b275f.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.createRepository({ + name: "my-repo", + repository: { + type: "s3", + settings: { + bucket: "repo-bucket", + client: "elastic-internal-71bcd3", + base_path: "myrepo", + readonly: true, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/06d65e3505dcb306977185e8545cf4a8.asciidoc b/docs/doc_examples/06d65e3505dcb306977185e8545cf4a8.asciidoc new file mode 100644 index 000000000..c4044aad9 --- /dev/null +++ b/docs/doc_examples/06d65e3505dcb306977185e8545cf4a8.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "cluster.routing.allocation.total_shards_per_node": 400, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0709a38613d2de90d418ce12b36af30e.asciidoc b/docs/doc_examples/0709a38613d2de90d418ce12b36af30e.asciidoc new file mode 100644 index 000000000..dfe9d6be5 --- /dev/null +++ b/docs/doc_examples/0709a38613d2de90d418ce12b36af30e.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.reroute(); +console.log(response); +---- diff --git a/docs/doc_examples/070cf72783cfe534a04f2f64e4016052.asciidoc b/docs/doc_examples/070cf72783cfe534a04f2f64e4016052.asciidoc new file mode 100644 index 000000000..9bde2346e --- /dev/null +++ b/docs/doc_examples/070cf72783cfe534a04f2f64e4016052.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + subobjects: false, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: "metric_1", + document: { + time: "100ms", + "time.min": "10ms", + "time.max": "900ms", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/0718a0b4f4905a8c90c1ff93de557e56.asciidoc b/docs/doc_examples/0718a0b4f4905a8c90c1ff93de557e56.asciidoc new file mode 100644 index 000000000..260c6a967 --- /dev/null +++ b/docs/doc_examples/0718a0b4f4905a8c90c1ff93de557e56.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "exams", + size: 0, + aggs: { + grades_stats: { + extended_stats: { + field: "grade", + sigma: 3, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0721c8adec544d5ecea3fcc410e45feb.asciidoc b/docs/doc_examples/0721c8adec544d5ecea3fcc410e45feb.asciidoc new file mode 100644 index 000000000..c292cfd78 --- /dev/null +++ b/docs/doc_examples/0721c8adec544d5ecea3fcc410e45feb.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.activateUserProfile({ + grant_type: "password", + username: "jacknich", + password: "l0ng-r4nd0m-p@ssw0rd", +}); +console.log(response); +---- diff --git a/docs/doc_examples/0722b302b2b3275a988d858044f99d5d.asciidoc b/docs/doc_examples/0722b302b2b3275a988d858044f99d5d.asciidoc new file mode 100644 index 000000000..84abd3971 --- /dev/null +++ b/docs/doc_examples/0722b302b2b3275a988d858044f99d5d.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getMapping({ + index: "kibana_sample_data_ecommerce", +}); +console.log(response); +---- diff --git a/docs/doc_examples/0737ebaea33631f001fb3f4226948492.asciidoc b/docs/doc_examples/0737ebaea33631f001fb3f4226948492.asciidoc new file mode 100644 index 000000000..99f666660 --- /dev/null +++ b/docs/doc_examples/0737ebaea33631f001fb3f4226948492.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my_ip_locations", + mappings: { + properties: { + geoip: { + properties: { + location: { + type: "geo_point", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/073864d3f52f8f79aafdaa85a88ac46a.asciidoc b/docs/doc_examples/073864d3f52f8f79aafdaa85a88ac46a.asciidoc new file mode 100644 index 000000000..e1d337be8 --- /dev/null +++ b/docs/doc_examples/073864d3f52f8f79aafdaa85a88ac46a.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.clearCachedRealms({ + realms: "*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/074e4602d1ca54412380a40867d078bc.asciidoc b/docs/doc_examples/074e4602d1ca54412380a40867d078bc.asciidoc new file mode 100644 index 000000000..9d98d539a --- /dev/null +++ b/docs/doc_examples/074e4602d1ca54412380a40867d078bc.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "my-index-000001", + settings: { + "index.indexing.slowlog.threshold.index.warn": "10s", + "index.indexing.slowlog.threshold.index.info": "5s", + "index.indexing.slowlog.threshold.index.debug": "2s", + "index.indexing.slowlog.threshold.index.trace": "500ms", + "index.indexing.slowlog.source": "1000", + "index.indexing.slowlog.reformat": true, + "index.indexing.slowlog.include.user": true, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0755471d7dce4785d2e7ed0c10182ea3.asciidoc b/docs/doc_examples/0755471d7dce4785d2e7ed0c10182ea3.asciidoc new file mode 100644 index 000000000..23b0f56fe --- /dev/null +++ b/docs/doc_examples/0755471d7dce4785d2e7ed0c10182ea3.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.getTransformStats({ + transform_id: "ecommerce-customer-transform", +}); +console.log(response); +---- diff --git a/docs/doc_examples/07a5fdeb7805cec1d28ba288b28f5ff5.asciidoc b/docs/doc_examples/07a5fdeb7805cec1d28ba288b28f5ff5.asciidoc new file mode 100644 index 000000000..3580265c3 --- /dev/null +++ b/docs/doc_examples/07a5fdeb7805cec1d28ba288b28f5ff5.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.rollup.stopJob({ + id: "sensor", + wait_for_completion: "true", + timeout: "10s", +}); +console.log(response); +---- diff --git a/docs/doc_examples/07ba3eaa931f2cf110052e3544db51f8.asciidoc b/docs/doc_examples/07ba3eaa931f2cf110052e3544db51f8.asciidoc new file mode 100644 index 000000000..eb7e9575d --- /dev/null +++ b/docs/doc_examples/07ba3eaa931f2cf110052e3544db51f8.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + max_docs: 10, + source: { + index: "my-index-000001", + query: { + function_score: { + random_score: {}, + min_score: 0.9, + }, + }, + }, + dest: { + index: "my-new-index-000001", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/07c07f6d497b1a3012aa4320f830e09e.asciidoc b/docs/doc_examples/07c07f6d497b1a3012aa4320f830e09e.asciidoc new file mode 100644 index 000000000..e70d7e96c --- /dev/null +++ b/docs/doc_examples/07c07f6d497b1a3012aa4320f830e09e.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.forgetFollower({ + index: "leader_index", + follower_cluster: "follower_cluster", + follower_index: "follower_index", + follower_index_uuid: "vYpnaWPRQB6mNspmoCeYyA", + leader_remote_cluster: "leader_cluster", +}); +console.log(response); +---- diff --git a/docs/doc_examples/07dadb9b0a774bd8e7f3527cf8a44afc.asciidoc b/docs/doc_examples/07dadb9b0a774bd8e7f3527cf8a44afc.asciidoc new file mode 100644 index 000000000..e69d25470 --- /dev/null +++ b/docs/doc_examples/07dadb9b0a774bd8e7f3527cf8a44afc.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + semantic: { + field: "inference_field", + query: "Best surfing places", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/07de76cb0e7f11c7533788faf8c093c3.asciidoc b/docs/doc_examples/07de76cb0e7f11c7533788faf8c093c3.asciidoc new file mode 100644 index 000000000..66dacc74e --- /dev/null +++ b/docs/doc_examples/07de76cb0e7f11c7533788faf8c093c3.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + title: { + type: "text", + }, + labels: { + type: "flattened", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/07ec38b97601286ec106986a84e1e5f7.asciidoc b/docs/doc_examples/07ec38b97601286ec106986a84e1e5f7.asciidoc new file mode 100644 index 000000000..b473ddcdc --- /dev/null +++ b/docs/doc_examples/07ec38b97601286ec106986a84e1e5f7.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "job-candidates", + mappings: { + properties: { + name: { + type: "keyword", + }, + programming_languages: { + type: "keyword", + }, + required_matches: { + type: "long", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/080c34d8151d02b760571e3a2899fa97.asciidoc b/docs/doc_examples/080c34d8151d02b760571e3a2899fa97.asciidoc new file mode 100644 index 000000000..0c931b62f --- /dev/null +++ b/docs/doc_examples/080c34d8151d02b760571e3a2899fa97.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "test", + settings: { + analysis: { + filter: { + email: { + type: "pattern_capture", + preserve_original: true, + patterns: ["([^@]+)", "(\\p{L}+)", "(\\d+)", "@(.+)"], + }, + }, + analyzer: { + email: { + tokenizer: "uax_url_email", + filter: ["email", "lowercase", "unique"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/082e78c7a2061a7c4a52b494e5ede0e8.asciidoc b/docs/doc_examples/082e78c7a2061a7c4a52b494e5ede0e8.asciidoc new file mode 100644 index 000000000..269067032 --- /dev/null +++ b/docs/doc_examples/082e78c7a2061a7c4a52b494e5ede0e8.asciidoc @@ -0,0 +1,42 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-rank-vectors-bit", + mappings: { + properties: { + my_vector: { + type: "rank_vectors", + element_type: "bit", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "my-rank-vectors-bit", + refresh: "true", + operations: [ + { + index: { + _id: "1", + }, + }, + { + my_vector: [127, -127, 0, 1, 42], + }, + { + index: { + _id: "2", + }, + }, + { + my_vector: "8100012a7f", + }, + ], +}); +console.log(response1); +---- diff --git a/docs/doc_examples/083b92e8ea264e49bf9fd40fc6a3094b.asciidoc b/docs/doc_examples/083b92e8ea264e49bf9fd40fc6a3094b.asciidoc new file mode 100644 index 000000000..16eda0fcc --- /dev/null +++ b/docs/doc_examples/083b92e8ea264e49bf9fd40fc6a3094b.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "text_embedding", + inference_id: "my-e5-model", + inference_config: { + service: "elasticsearch", + service_settings: { + adaptive_allocations: { + enabled: true, + min_number_of_allocations: 3, + max_number_of_allocations: 10, + }, + num_threads: 1, + model_id: ".multilingual-e5-small", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/083e514297c09e91211f0d168aef1b0b.asciidoc b/docs/doc_examples/083e514297c09e91211f0d168aef1b0b.asciidoc new file mode 100644 index 000000000..744d438b3 --- /dev/null +++ b/docs/doc_examples/083e514297c09e91211f0d168aef1b0b.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.updateByQuery({ + index: "logs-generic-default", + query: { + match: { + "event.sequence": "97", + }, + }, + script: { + source: "ctx._source.event.original = params.new_event", + lang: "painless", + params: { + new_event: "FOOBAR", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/086ec4c5d86bbf80fb80162e94037689.asciidoc b/docs/doc_examples/086ec4c5d86bbf80fb80162e94037689.asciidoc new file mode 100644 index 000000000..7508b3af3 --- /dev/null +++ b/docs/doc_examples/086ec4c5d86bbf80fb80162e94037689.asciidoc @@ -0,0 +1,49 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + weighted_tokens: { + query_expansion_field: { + tokens: { + "2161": 0.4679, + "2621": 0.307, + "2782": 0.1299, + "2851": 0.1056, + "3088": 0.3041, + "3376": 0.1038, + "3467": 0.4873, + "3684": 0.8958, + "4380": 0.334, + "4542": 0.4636, + "4633": 2.2805, + "4785": 1.2628, + "4860": 1.0655, + "5133": 1.0709, + "7139": 1.0016, + "7224": 0.2486, + "7387": 0.0985, + "7394": 0.0542, + "8915": 0.369, + "9156": 2.8947, + "10505": 0.2771, + "11464": 0.3996, + "13525": 0.0088, + "14178": 0.8161, + "16893": 0.1376, + "17851": 1.5348, + "19939": 0.6012, + }, + pruning_config: { + tokens_freq_ratio_threshold: 5, + tokens_weight_threshold: 0.4, + only_score_pruned_tokens: false, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0881397074d261ccc2db514daf116c31.asciidoc b/docs/doc_examples/0881397074d261ccc2db514daf116c31.asciidoc new file mode 100644 index 000000000..c15090e30 --- /dev/null +++ b/docs/doc_examples/0881397074d261ccc2db514daf116c31.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getApiKey({ + id: "VuaCfGcBCdbkQm-e5aOx", + with_limited_by: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/08a76b3f5a8394d8f9084113334a260a.asciidoc b/docs/doc_examples/08a76b3f5a8394d8f9084113334a260a.asciidoc new file mode 100644 index 000000000..d10141ebc --- /dev/null +++ b/docs/doc_examples/08a76b3f5a8394d8f9084113334a260a.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "latency", + size: 0, + aggs: { + load_time_boxplot: { + boxplot: { + field: "load_time", + compression: 200, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/08c9af9dd519c011deedd406f3061836.asciidoc b/docs/doc_examples/08c9af9dd519c011deedd406f3061836.asciidoc new file mode 100644 index 000000000..0707e105c --- /dev/null +++ b/docs/doc_examples/08c9af9dd519c011deedd406f3061836.asciidoc @@ -0,0 +1,46 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.previewDatafeed({ + datafeed_config: { + indices: ["kibana_sample_data_ecommerce"], + query: { + bool: { + filter: [ + { + term: { + _index: "kibana_sample_data_ecommerce", + }, + }, + ], + }, + }, + scroll_size: 1000, + }, + job_config: { + description: "Find customers spending an unusually high amount in an hour", + analysis_config: { + bucket_span: "1h", + detectors: [ + { + detector_description: "High total sales", + function: "high_sum", + field_name: "taxful_total_price", + over_field_name: "customer_full_name.keyword", + }, + ], + influencers: ["customer_full_name.keyword", "category.keyword"], + }, + analysis_limits: { + model_memory_limit: "10mb", + }, + data_description: { + time_field: "order_date", + time_format: "epoch_ms", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/08e08feb514b24006e13f258d617d873.asciidoc b/docs/doc_examples/08e08feb514b24006e13f258d617d873.asciidoc new file mode 100644 index 000000000..ad2134f5b --- /dev/null +++ b/docs/doc_examples/08e08feb514b24006e13f258d617d873.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.getScript({ + id: "calculate-score", +}); +console.log(response); +---- diff --git a/docs/doc_examples/08e79ca9fdcdfebb2c6a79e6837e649d.asciidoc b/docs/doc_examples/08e79ca9fdcdfebb2c6a79e6837e649d.asciidoc new file mode 100644 index 000000000..ab12f4a4c --- /dev/null +++ b/docs/doc_examples/08e79ca9fdcdfebb2c6a79e6837e649d.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + tag_cardinality: { + cardinality: { + field: "tag", + missing: "N/A", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/08f20902821a4f7a73ce7b959c5bdbdc.asciidoc b/docs/doc_examples/08f20902821a4f7a73ce7b959c5bdbdc.asciidoc new file mode 100644 index 000000000..5c797b641 --- /dev/null +++ b/docs/doc_examples/08f20902821a4f7a73ce7b959c5bdbdc.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + regexp: { + "user.id": { + value: "k.*y", + flags: "ALL", + case_insensitive: true, + max_determinized_states: 10000, + rewrite: "constant_score_blended", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/091200b658023db31dffc2f08a85a9cc.asciidoc b/docs/doc_examples/091200b658023db31dffc2f08a85a9cc.asciidoc new file mode 100644 index 000000000..6c0f24be4 --- /dev/null +++ b/docs/doc_examples/091200b658023db31dffc2f08a85a9cc.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "my-index-000001", + settings: { + index: { + "routing.allocation.total_shards_per_node": -1, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0957bbd535f58c97b12ffba90813d64c.asciidoc b/docs/doc_examples/0957bbd535f58c97b12ffba90813d64c.asciidoc new file mode 100644 index 000000000..f21197ae4 --- /dev/null +++ b/docs/doc_examples/0957bbd535f58c97b12ffba90813d64c.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "analyze_sample", + settings: { + "index.analyze.max_token_count": 20000, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/095d60b2cfc5004c97efc49f27287262.asciidoc b/docs/doc_examples/095d60b2cfc5004c97efc49f27287262.asciidoc new file mode 100644 index 000000000..2b125347a --- /dev/null +++ b/docs/doc_examples/095d60b2cfc5004c97efc49f27287262.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sales_over_time: { + date_histogram: { + field: "date", + fixed_interval: "30d", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/095e3f21941a9cc75f398389a075152d.asciidoc b/docs/doc_examples/095e3f21941a9cc75f398389a075152d.asciidoc new file mode 100644 index 000000000..f88e0adc9 --- /dev/null +++ b/docs/doc_examples/095e3f21941a9cc75f398389a075152d.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.inferTrainedModel({ + model_id: "cross-encoder__ms-marco-tinybert-l-2-v2", + docs: [ + { + text_field: + "Berlin has a population of 3,520,031 registered inhabitants in an area of 891.82 square kilometers.", + }, + { + text_field: "New York City is famous for the Metropolitan Museum of Art.", + }, + ], + inference_config: { + text_similarity: { + text: "How many people live in Berlin?", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/09769561f082b50558fb7d8707719963.asciidoc b/docs/doc_examples/09769561f082b50558fb7d8707719963.asciidoc new file mode 100644 index 000000000..674ca8a30 --- /dev/null +++ b/docs/doc_examples/09769561f082b50558fb7d8707719963.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.stats({ + metric: "ingest", + filter_path: "nodes.*.ingest", +}); +console.log(response); +---- diff --git a/docs/doc_examples/0989cc65d8924f666ce3eb0820d2d244.asciidoc b/docs/doc_examples/0989cc65d8924f666ce3eb0820d2d244.asciidoc deleted file mode 100644 index b7f079096..000000000 --- a/docs/doc_examples/0989cc65d8924f666ce3eb0820d2d244.asciidoc +++ /dev/null @@ -1,24 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response0 = await client.index({ - index: 'users', - refresh: 'wait_for', - body: { - user_id: 12345 - } -}) -console.log(response0) - -const response1 = await client.index({ - index: 'users', - refresh: 'wait_for', - body: { - user_id: 12346 - } -}) -console.log(response1) ----- - diff --git a/docs/doc_examples/099006ab11b52ea99693401dceee8bad.asciidoc b/docs/doc_examples/099006ab11b52ea99693401dceee8bad.asciidoc new file mode 100644 index 000000000..e9a963852 --- /dev/null +++ b/docs/doc_examples/099006ab11b52ea99693401dceee8bad.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.putScript({ + id: "calculate-score", + script: { + lang: "painless", + source: "Math.log(_score * 2) + params['my_modifier']", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/09944369863fd8666d5301d717317276.asciidoc b/docs/doc_examples/09944369863fd8666d5301d717317276.asciidoc new file mode 100644 index 000000000..9a6bf1366 --- /dev/null +++ b/docs/doc_examples/09944369863fd8666d5301d717317276.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: [ + { + type: "condition", + filter: ["lowercase"], + script: { + source: "token.getTerm().length() < 5", + }, + }, + ], + text: "THE QUICK BROWN FOX", +}); +console.log(response); +---- diff --git a/docs/doc_examples/09a44b619a99f6bf3f01bd5e258fd22d.asciidoc b/docs/doc_examples/09a44b619a99f6bf3f01bd5e258fd22d.asciidoc new file mode 100644 index 000000000..40a384942 --- /dev/null +++ b/docs/doc_examples/09a44b619a99f6bf3f01bd5e258fd22d.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "keyword", + text: "New York", +}); +console.log(response); +---- diff --git a/docs/doc_examples/09a478fe32a7b7d814083ffa5297bcdf.asciidoc b/docs/doc_examples/09a478fe32a7b7d814083ffa5297bcdf.asciidoc new file mode 100644 index 000000000..31140522e --- /dev/null +++ b/docs/doc_examples/09a478fe32a7b7d814083ffa5297bcdf.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + fuzzy: { + "user.id": { + value: "ki", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/09bdf9a7e22733d668476724042a406c.asciidoc b/docs/doc_examples/09bdf9a7e22733d668476724042a406c.asciidoc new file mode 100644 index 000000000..74ff0e94b --- /dev/null +++ b/docs/doc_examples/09bdf9a7e22733d668476724042a406c.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "timeseries_template", + index_patterns: ["timeseries"], + data_stream: {}, + template: { + settings: { + number_of_shards: 1, + number_of_replicas: 1, + "index.lifecycle.name": "timeseries_policy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/09cb1b18bf4033b4afafb25bd3dab12c.asciidoc b/docs/doc_examples/09cb1b18bf4033b4afafb25bd3dab12c.asciidoc new file mode 100644 index 000000000..c5eca11d6 --- /dev/null +++ b/docs/doc_examples/09cb1b18bf4033b4afafb25bd3dab12c.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + rule: { + match_criteria: { + user_query: "pugs", + }, + ruleset_ids: ["my-ruleset"], + organic: { + match: { + description: "puggles", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/09ce0ec993c494ac01f01ef9815fcc4b.asciidoc b/docs/doc_examples/09ce0ec993c494ac01f01ef9815fcc4b.asciidoc new file mode 100644 index 000000000..2d45c039f --- /dev/null +++ b/docs/doc_examples/09ce0ec993c494ac01f01ef9815fcc4b.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-index", + runtime: { + "http.clientip": { + type: "ip", + script: + "\n String clientip=grok('%{COMMONAPACHELOG}').extract(doc[\"message\"].value)?.clientip;\n if (clientip != null) emit(clientip);\n ", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/09d617863a103c82fb4101e6165ea7fe.asciidoc b/docs/doc_examples/09d617863a103c82fb4101e6165ea7fe.asciidoc index 26a1900e0..f8e0b44af 100644 --- a/docs/doc_examples/09d617863a103c82fb4101e6165ea7fe.asciidoc +++ b/docs/doc_examples/09d617863a103c82fb4101e6165ea7fe.asciidoc @@ -4,12 +4,9 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - match_all: {} - } - } -}) -console.log(response) + query: { + match_all: {}, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/09e6e06ba562f4b9bac59455e9151a80.asciidoc b/docs/doc_examples/09e6e06ba562f4b9bac59455e9151a80.asciidoc new file mode 100644 index 000000000..2c768dba6 --- /dev/null +++ b/docs/doc_examples/09e6e06ba562f4b9bac59455e9151a80.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.evaluateDataFrame({ + index: "animal_classification", + evaluation: { + classification: { + actual_field: "animal_class", + metrics: { + auc_roc: { + class_name: "dog", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0a3003fa5af850e415634b50b1029859.asciidoc b/docs/doc_examples/0a3003fa5af850e415634b50b1029859.asciidoc new file mode 100644 index 000000000..9d6394951 --- /dev/null +++ b/docs/doc_examples/0a3003fa5af850e415634b50b1029859.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "logs-generic-default*", + filter_path: "hits.hits._index", + query: { + match: { + "event.sequence": "97", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0a3186bf20b5359393406fc0cb433313.asciidoc b/docs/doc_examples/0a3186bf20b5359393406fc0cb433313.asciidoc new file mode 100644 index 000000000..e534e77b4 --- /dev/null +++ b/docs/doc_examples/0a3186bf20b5359393406fc0cb433313.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.query({ + format: "json", + query: "SELECT * FROM library ORDER BY page_count DESC", + fetch_size: 5, + columnar: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0a46ac2968a574ce145f197f10d30152.asciidoc b/docs/doc_examples/0a46ac2968a574ce145f197f10d30152.asciidoc new file mode 100644 index 000000000..ebec6756f --- /dev/null +++ b/docs/doc_examples/0a46ac2968a574ce145f197f10d30152.asciidoc @@ -0,0 +1,46 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "library", + refresh: "true", + operations: [ + { + index: { + _id: "Leviathan Wakes", + }, + }, + { + name: "Leviathan Wakes", + author: "James S.A. Corey", + release_date: "2011-06-02", + page_count: 561, + }, + { + index: { + _id: "Hyperion", + }, + }, + { + name: "Hyperion", + author: "Dan Simmons", + release_date: "1989-05-26", + page_count: 482, + }, + { + index: { + _id: "Dune", + }, + }, + { + name: "Dune", + author: "Frank Herbert", + release_date: "1965-06-01", + page_count: 604, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/0a46cc8fe93e372909660a63dc52ae3b.asciidoc b/docs/doc_examples/0a46cc8fe93e372909660a63dc52ae3b.asciidoc new file mode 100644 index 000000000..497a5e10b --- /dev/null +++ b/docs/doc_examples/0a46cc8fe93e372909660a63dc52ae3b.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "", + aliases: { + "my-alias": { + is_write_index: true, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0a650401134f07e40216f0d0d1a66a32.asciidoc b/docs/doc_examples/0a650401134f07e40216f0d0d1a66a32.asciidoc new file mode 100644 index 000000000..68956e123 --- /dev/null +++ b/docs/doc_examples/0a650401134f07e40216f0d0d1a66a32.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.allocation({ + v: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/0a6d56a66a2652ac6de68f8bd544a175.asciidoc b/docs/doc_examples/0a6d56a66a2652ac6de68f8bd544a175.asciidoc new file mode 100644 index 000000000..994e1c6be --- /dev/null +++ b/docs/doc_examples/0a6d56a66a2652ac6de68f8bd544a175.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "index1", + query: { + query_string: { + query: "running with scissors", + fields: ["comment", "comment.english"], + }, + }, + highlight: { + order: "score", + fields: { + comment: { + matched_fields: ["comment.english"], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0a701bdc7b6786026f40c0be8ebfc753.asciidoc b/docs/doc_examples/0a701bdc7b6786026f40c0be8ebfc753.asciidoc new file mode 100644 index 000000000..187671a16 --- /dev/null +++ b/docs/doc_examples/0a701bdc7b6786026f40c0be8ebfc753.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.previewTransform({ + source: { + index: "kibana_sample_data_ecommerce", + query: { + bool: { + filter: { + term: { + currency: "EUR", + }, + }, + }, + }, + }, + latest: { + unique_key: ["geoip.country_iso_code", "geoip.region_name"], + sort: "order_date", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0a758d9dec74d9e942cf41a06499234f.asciidoc b/docs/doc_examples/0a758d9dec74d9e942cf41a06499234f.asciidoc new file mode 100644 index 000000000..c33a0b1dd --- /dev/null +++ b/docs/doc_examples/0a758d9dec74d9e942cf41a06499234f.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + document: { + counter: 1, + tags: ["red"], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0a84c5b7c0793be745b13eaf13e94422.asciidoc b/docs/doc_examples/0a84c5b7c0793be745b13eaf13e94422.asciidoc new file mode 100644 index 000000000..cf13f17c8 --- /dev/null +++ b/docs/doc_examples/0a84c5b7c0793be745b13eaf13e94422.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "my-index-000001", + settings: { + index: { + "routing.allocation.total_shards_per_node": "2", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0a9173f3b22716c78653976dc4799eae.asciidoc b/docs/doc_examples/0a9173f3b22716c78653976dc4799eae.asciidoc new file mode 100644 index 000000000..dd6c9eac3 --- /dev/null +++ b/docs/doc_examples/0a9173f3b22716c78653976dc4799eae.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_buckets: { + composite: { + sources: [ + { + product: { + terms: { + field: "product", + }, + }, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0a958e486ede3f519d48431ab689eded.asciidoc b/docs/doc_examples/0a958e486ede3f519d48431ab689eded.asciidoc deleted file mode 100644 index 9945b33ee..000000000 --- a/docs/doc_examples/0a958e486ede3f519d48431ab689eded.asciidoc +++ /dev/null @@ -1,24 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.update({ - index: 'test', - id: '1', - body: { - script: { - source: 'ctx._source.counter += params.count', - lang: 'painless', - params: { - count: 4 - } - }, - upsert: { - counter: 1 - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/0ac295efdabd59e7b1f1a4577535d942.asciidoc b/docs/doc_examples/0ac295efdabd59e7b1f1a4577535d942.asciidoc new file mode 100644 index 000000000..cbc8b66d8 --- /dev/null +++ b/docs/doc_examples/0ac295efdabd59e7b1f1a4577535d942.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-data-stream", + query: + '\n sequence\n [ process where process.name == "regsvr32.exe" ]\n [ file where stringContains(file.name, "scrobj.dll") ]\n ', +}); +console.log(response); +---- diff --git a/docs/doc_examples/0ac9916f47a2483b89c1416684af322a.asciidoc b/docs/doc_examples/0ac9916f47a2483b89c1416684af322a.asciidoc deleted file mode 100644 index f10420a38..000000000 --- a/docs/doc_examples/0ac9916f47a2483b89c1416684af322a.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - match: { - message: { - query: 'to be or not to be', - operator: 'and', - zero_terms_query: 'all' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/0ac9e7dd7e4acba51888256326ed5ffe.asciidoc b/docs/doc_examples/0ac9e7dd7e4acba51888256326ed5ffe.asciidoc new file mode 100644 index 000000000..e6c8eca1b --- /dev/null +++ b/docs/doc_examples/0ac9e7dd7e4acba51888256326ed5ffe.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + track_total_hits: true, + query: { + match: { + "user.id": "elkbee", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0ad86b582aff1235f37ccb2cc90adad5.asciidoc b/docs/doc_examples/0ad86b582aff1235f37ccb2cc90adad5.asciidoc new file mode 100644 index 000000000..2d94108dd --- /dev/null +++ b/docs/doc_examples/0ad86b582aff1235f37ccb2cc90adad5.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.open({ + index: ".ds-my-data-stream-2099.03.07-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/0ad8edd10542ec2c4d5d8700d7e2ba97.asciidoc b/docs/doc_examples/0ad8edd10542ec2c4d5d8700d7e2ba97.asciidoc new file mode 100644 index 000000000..5c948b3d2 --- /dev/null +++ b/docs/doc_examples/0ad8edd10542ec2c4d5d8700d7e2ba97.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "text_embedding", + inference_id: "amazon_bedrock_embeddings", + inference_config: { + service: "amazonbedrock", + service_settings: { + access_key: "", + secret_key: "", + region: "us-east-1", + provider: "amazontitan", + model: "amazon.titan-embed-text-v2:0", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0adbce828234ca221e3d03b184296407.asciidoc b/docs/doc_examples/0adbce828234ca221e3d03b184296407.asciidoc new file mode 100644 index 000000000..65d80a5f0 --- /dev/null +++ b/docs/doc_examples/0adbce828234ca221e3d03b184296407.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-index", + runtime: { + "http.clientip": { + type: "ip", + script: + "\n String clientip=grok('%{COMMONAPACHELOG}').extract(doc[\"message\"].value)?.clientip;\n if (clientip != null) emit(clientip); \n ", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0ade87c8cb0e3c188d2e3dce279d5cc2.asciidoc b/docs/doc_examples/0ade87c8cb0e3c188d2e3dce279d5cc2.asciidoc new file mode 100644 index 000000000..2c93643b2 --- /dev/null +++ b/docs/doc_examples/0ade87c8cb0e3c188d2e3dce279d5cc2.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.connector.updateFiltering({ + connector_id: "my-g-drive-connector", + rules: [ + { + field: "file_extension", + id: "exclude-txt-files", + order: 0, + policy: "exclude", + rule: "equals", + value: "txt", + }, + { + field: "_", + id: "DEFAULT", + order: 1, + policy: "include", + rule: "regex", + value: ".*", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/0afaf1cad692e6201aa574c8feb6e622.asciidoc b/docs/doc_examples/0afaf1cad692e6201aa574c8feb6e622.asciidoc deleted file mode 100644 index ba2ee8cb4..000000000 --- a/docs/doc_examples/0afaf1cad692e6201aa574c8feb6e622.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - aggs: { - tags: { - terms: { - field: 'tags', - include: '.*sport.*', - exclude: 'water_.*' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/0aff04881be21eea45375ec4f4f50e66.asciidoc b/docs/doc_examples/0aff04881be21eea45375ec4f4f50e66.asciidoc new file mode 100644 index 000000000..3aa94fd9f --- /dev/null +++ b/docs/doc_examples/0aff04881be21eea45375ec4f4f50e66.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.createApiKey({ + name: "my-api-key", +}); +console.log(response); +---- diff --git a/docs/doc_examples/0b1c5486f96bfa5db8db854c0178dbe5.asciidoc b/docs/doc_examples/0b1c5486f96bfa5db8db854c0178dbe5.asciidoc new file mode 100644 index 000000000..ce0112e17 --- /dev/null +++ b/docs/doc_examples/0b1c5486f96bfa5db8db854c0178dbe5.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + cluster: { + remote: { + cluster_one: { + seeds: ["127.0.0.1:{remote-interface-default-port}"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0b47b0bef81b9b5eecfb3775695bd6ad.asciidoc b/docs/doc_examples/0b47b0bef81b9b5eecfb3775695bd6ad.asciidoc new file mode 100644 index 000000000..ac32302d7 --- /dev/null +++ b/docs/doc_examples/0b47b0bef81b9b5eecfb3775695bd6ad.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putUser({ + username: "remote_monitor", + password: "changeme", + roles: ["remote_monitoring_agent"], + full_name: "Internal Agent For Remote Monitoring", +}); +console.log(response); +---- diff --git a/docs/doc_examples/0b4e50f1b5a0537cbb1a41276bb51c54.asciidoc b/docs/doc_examples/0b4e50f1b5a0537cbb1a41276bb51c54.asciidoc new file mode 100644 index 000000000..6f9ec7de2 --- /dev/null +++ b/docs/doc_examples/0b4e50f1b5a0537cbb1a41276bb51c54.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + runtime_mappings: { + day_of_week: { + type: "keyword", + script: { + source: + "emit(doc['@timestamp'].value.dayOfWeekEnum\n .getDisplayName(TextStyle.FULL, Locale.ENGLISH))", + }, + }, + }, + aggs: { + day_of_week: { + terms: { + field: "day_of_week", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0b615ff4ef5a8847ee8109b2fd11619a.asciidoc b/docs/doc_examples/0b615ff4ef5a8847ee8109b2fd11619a.asciidoc new file mode 100644 index 000000000..ccd775ed1 --- /dev/null +++ b/docs/doc_examples/0b615ff4ef5a8847ee8109b2fd11619a.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + script_score: { + query: { + match: { + message: "some message", + }, + }, + script: { + id: "calculate-score", + params: { + my_modifier: 2, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0b913fb9e010d877c0be015519cfddc6.asciidoc b/docs/doc_examples/0b913fb9e010d877c0be015519cfddc6.asciidoc new file mode 100644 index 000000000..9717c0327 --- /dev/null +++ b/docs/doc_examples/0b913fb9e010d877c0be015519cfddc6.asciidoc @@ -0,0 +1,42 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + document: { + "@timestamp": "2019-05-18T15:57:27.541Z", + ip: "225.44.217.191", + extension: "jpg", + response: "200", + geo: { + coordinates: { + lat: 38.53146222, + lon: -121.7864906, + }, + }, + url: "https://media-for-the-masses.theacademyofperformingartsandscience.org/uploads/charles-fullerton.jpg", + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000002", + document: { + "@timestamp": "2019-05-20T03:44:20.844Z", + ip: "198.247.165.49", + extension: "php", + response: "200", + geo: { + coordinates: { + lat: 37.13189556, + lon: -76.4929875, + }, + }, + memory: 241720, + url: "https://theacademyofperformingartsandscience.org/people/type:astronauts/name:laurel-b-clark/profile", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/0b987b4101e016653a32d7b092d47e4c.asciidoc b/docs/doc_examples/0b987b4101e016653a32d7b092d47e4c.asciidoc new file mode 100644 index 000000000..a2a3e2f36 --- /dev/null +++ b/docs/doc_examples/0b987b4101e016653a32d7b092d47e4c.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + region: { + type: "keyword", + }, + manager: { + properties: { + age: { + type: "integer", + }, + name: { + properties: { + first: { + type: "text", + }, + last: { + type: "text", + }, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0ba0b2db24852abccb7c0fc1098d566e.asciidoc b/docs/doc_examples/0ba0b2db24852abccb7c0fc1098d566e.asciidoc deleted file mode 100644 index 7e23f1bce..000000000 --- a/docs/doc_examples/0ba0b2db24852abccb7c0fc1098d566e.asciidoc +++ /dev/null @@ -1,19 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.index({ - index: 'twitter', - id: '2', - routing: 'user1', - body: { - counter: 1, - tags: [ - 'white' - ] - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/0bbd30b9be3e54ff3028b9f4459634d2.asciidoc b/docs/doc_examples/0bbd30b9be3e54ff3028b9f4459634d2.asciidoc deleted file mode 100644 index f531241da..000000000 --- a/docs/doc_examples/0bbd30b9be3e54ff3028b9f4459634d2.asciidoc +++ /dev/null @@ -1,22 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.putMapping({ - index: 'my_index', - body: { - properties: { - name: { - properties: { - last: { - type: 'text' - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/0bc6155e0c88062a4d8490da49db3aa8.asciidoc b/docs/doc_examples/0bc6155e0c88062a4d8490da49db3aa8.asciidoc new file mode 100644 index 000000000..01200be80 --- /dev/null +++ b/docs/doc_examples/0bc6155e0c88062a4d8490da49db3aa8.asciidoc @@ -0,0 +1,49 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "retrievers_example_nested", + retriever: { + rrf: { + retrievers: [ + { + standard: { + query: { + nested: { + path: "nested_field", + inner_hits: { + name: "nested_vector", + _source: false, + fields: ["nested_field.paragraph_id"], + }, + query: { + knn: { + field: "nested_field.nested_vector", + query_vector: [1, 0, 0.5], + k: 10, + }, + }, + }, + }, + }, + }, + { + standard: { + query: { + term: { + topic: "ai", + }, + }, + }, + }, + ], + rank_window_size: 10, + rank_constant: 1, + }, + }, + _source: ["topic"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/0bcd380315ef4691b8c79df6ca53a85f.asciidoc b/docs/doc_examples/0bcd380315ef4691b8c79df6ca53a85f.asciidoc new file mode 100644 index 000000000..395132389 --- /dev/null +++ b/docs/doc_examples/0bcd380315ef4691b8c79df6ca53a85f.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + sort: [ + { + price: { + unmapped_type: "long", + }, + }, + ], + query: { + term: { + product: "chocolate", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0bd3923424a20a4ba860b0774b9991b1.asciidoc b/docs/doc_examples/0bd3923424a20a4ba860b0774b9991b1.asciidoc deleted file mode 100644 index 55e11a522..000000000 --- a/docs/doc_examples/0bd3923424a20a4ba860b0774b9991b1.asciidoc +++ /dev/null @@ -1,39 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'drivers', - body: { - query: { - nested: { - path: 'driver', - query: { - nested: { - path: 'driver.vehicle', - query: { - bool: { - must: [ - { - match: { - 'driver.vehicle.make': 'Powell Motors' - } - }, - { - match: { - 'driver.vehicle.model': 'Canyonero' - } - } - ] - } - } - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/0be2c28ee65384774b1e479b47dc3d92.asciidoc b/docs/doc_examples/0be2c28ee65384774b1e479b47dc3d92.asciidoc deleted file mode 100644 index 4a8775f7e..000000000 --- a/docs/doc_examples/0be2c28ee65384774b1e479b47dc3d92.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.putSettings({ - index: 'twitter', - body: { - index: { - refresh_interval: '1s' - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/0bee07a581c5776e068f6f4efad5a399.asciidoc b/docs/doc_examples/0bee07a581c5776e068f6f4efad5a399.asciidoc new file mode 100644 index 000000000..486847ab6 --- /dev/null +++ b/docs/doc_examples/0bee07a581c5776e068f6f4efad5a399.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.esql.asyncQuery({ + format: "json", + query: + "\n FROM my-index-000001,cluster_one:my-index-000001,cluster_two:my-index*\n | STATS COUNT(http.response.status_code) BY user.id\n | LIMIT 2\n ", + include_ccs_metadata: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0c05c66cfe3a2169b1ec1aba77e26db2.asciidoc b/docs/doc_examples/0c05c66cfe3a2169b1ec1aba77e26db2.asciidoc new file mode 100644 index 000000000..a53e1145d --- /dev/null +++ b/docs/doc_examples/0c05c66cfe3a2169b1ec1aba77e26db2.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "test", + query: { + rank_feature: { + field: "pagerank", + saturation: {}, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0c2ca704a39dda8b3a7c5806ec6c6cf8.asciidoc b/docs/doc_examples/0c2ca704a39dda8b3a7c5806ec6c6cf8.asciidoc new file mode 100644 index 000000000..4b95c61f7 --- /dev/null +++ b/docs/doc_examples/0c2ca704a39dda8b3a7c5806ec6c6cf8.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-index-000001", + runtime: { + "http.client_ip": { + type: "ip", + script: + "\n String clientip=grok('%{COMMONAPACHELOG}').extract(doc[\"message\"].value)?.clientip;\n if (clientip != null) emit(clientip); \n ", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0c2d9ac7e3f28d4d802e21cbbbcfeb34.asciidoc b/docs/doc_examples/0c2d9ac7e3f28d4d802e21cbbbcfeb34.asciidoc new file mode 100644 index 000000000..9d967d750 --- /dev/null +++ b/docs/doc_examples/0c2d9ac7e3f28d4d802e21cbbbcfeb34.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.recovery({ + v: "true", + h: "i,s,t,ty,st,shost,thost,f,fp,b,bp", +}); +console.log(response); +---- diff --git a/docs/doc_examples/0c464965126cc09e6812716a145991d4.asciidoc b/docs/doc_examples/0c464965126cc09e6812716a145991d4.asciidoc new file mode 100644 index 000000000..179e814a0 --- /dev/null +++ b/docs/doc_examples/0c464965126cc09e6812716a145991d4.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.info({ + node_id: "ingest", +}); +console.log(response); +---- diff --git a/docs/doc_examples/0c4ad860a485fe53d8140ad3ccd11dcf.asciidoc b/docs/doc_examples/0c4ad860a485fe53d8140ad3ccd11dcf.asciidoc deleted file mode 100644 index 441a025db..000000000 --- a/docs/doc_examples/0c4ad860a485fe53d8140ad3ccd11dcf.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - terms: { - user: [ - 'kimchy', - 'elasticsearch' - ], - boost: 1 - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/0c52af573c9401a2a687e86a4beb182b.asciidoc b/docs/doc_examples/0c52af573c9401a2a687e86a4beb182b.asciidoc new file mode 100644 index 000000000..e0413d7dd --- /dev/null +++ b/docs/doc_examples/0c52af573c9401a2a687e86a4beb182b.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "cbor-attachment", + description: "Extract attachment information", + processors: [ + { + attachment: { + field: "data", + remove_binary: true, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/0c688eecf4ebdffdbe1deae0983c3ed8.asciidoc b/docs/doc_examples/0c688eecf4ebdffdbe1deae0983c3ed8.asciidoc new file mode 100644 index 000000000..5ee262246 --- /dev/null +++ b/docs/doc_examples/0c688eecf4ebdffdbe1deae0983c3ed8.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "user_hits", + size: 0, + aggs: { + users_per_day: { + date_histogram: { + field: "timestamp", + calendar_interval: "day", + }, + aggs: { + distinct_users: { + cardinality: { + field: "user_id", + }, + }, + total_new_users: { + cumulative_cardinality: { + buckets_path: "distinct_users", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0c6f9c9da75293fae69659ac1d6329de.asciidoc b/docs/doc_examples/0c6f9c9da75293fae69659ac1d6329de.asciidoc new file mode 100644 index 000000000..a22aa8928 --- /dev/null +++ b/docs/doc_examples/0c6f9c9da75293fae69659ac1d6329de.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.invalidateToken({ + refresh_token: "vLBPvmAB6KvwvJZr27cS", +}); +console.log(response); +---- diff --git a/docs/doc_examples/0c6fc67c2dd1c1771cd866ce471d74e1.asciidoc b/docs/doc_examples/0c6fc67c2dd1c1771cd866ce471d74e1.asciidoc new file mode 100644 index 000000000..1ade11a6c --- /dev/null +++ b/docs/doc_examples/0c6fc67c2dd1c1771cd866ce471d74e1.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "mapping4", + roles: ["superuser"], + enabled: true, + rules: { + any: [ + { + field: { + username: "esadmin", + }, + }, + { + field: { + groups: ["cn=admins,dc=example,dc=com", "cn=other,dc=example,dc=com"], + }, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0c7c40cd17985c3dd32aeaadbafc4fce.asciidoc b/docs/doc_examples/0c7c40cd17985c3dd32aeaadbafc4fce.asciidoc new file mode 100644 index 000000000..c89f06982 --- /dev/null +++ b/docs/doc_examples/0c7c40cd17985c3dd32aeaadbafc4fce.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.renderSearchTemplate({ + source: { + query: { + match: { + message: "{{^name_exists}}Hello World{{/name_exists}}", + }, + }, + }, + params: { + name_exists: false, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0c892d328b73d38396aaef6d9cbcd36b.asciidoc b/docs/doc_examples/0c892d328b73d38396aaef6d9cbcd36b.asciidoc new file mode 100644 index 000000000..4b46d7ad7 --- /dev/null +++ b/docs/doc_examples/0c892d328b73d38396aaef6d9cbcd36b.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.delete({ + index: "my-index-000001", + id: 1, + routing: "shard-1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/0c8be7aec84ea86b243904f5d4162f5a.asciidoc b/docs/doc_examples/0c8be7aec84ea86b243904f5d4162f5a.asciidoc new file mode 100644 index 000000000..54759101e --- /dev/null +++ b/docs/doc_examples/0c8be7aec84ea86b243904f5d4162f5a.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "cooking_blog", + query: { + match: { + title: { + query: "fluffy pancakes breakfast", + minimum_should_match: 2, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0ca6aae1ab2f0be6127beea8a245374e.asciidoc b/docs/doc_examples/0ca6aae1ab2f0be6127beea8a245374e.asciidoc new file mode 100644 index 000000000..a2c6d6af8 --- /dev/null +++ b/docs/doc_examples/0ca6aae1ab2f0be6127beea8a245374e.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.asyncSearch.submit({ + index: "my-index-000001,cluster*:my-index-000001,-cluster_three:*", + query: { + match: { + "user.id": "kimchy", + }, + }, + _source: ["user.id", "message", "http.response.status_code"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/0cc991e3f7f8511a34730e154b3c5edc.asciidoc b/docs/doc_examples/0cc991e3f7f8511a34730e154b3c5edc.asciidoc deleted file mode 100644 index b76b0655e..000000000 --- a/docs/doc_examples/0cc991e3f7f8511a34730e154b3c5edc.asciidoc +++ /dev/null @@ -1,18 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.reindex({ - body: { - source: { - index: 'twitter' - }, - dest: { - index: 'new_twitter' - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/0ce3606f1dba490eef83c4317b315b62.asciidoc b/docs/doc_examples/0ce3606f1dba490eef83c4317b315b62.asciidoc deleted file mode 100644 index c89a1ffb3..000000000 --- a/docs/doc_examples/0ce3606f1dba490eef83c4317b315b62.asciidoc +++ /dev/null @@ -1,18 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'twitter', - body: { - query: { - term: { - user: 'kimchy' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/0cee58617e75f493c5049d77be1c49f3.asciidoc b/docs/doc_examples/0cee58617e75f493c5049d77be1c49f3.asciidoc new file mode 100644 index 000000000..8d23a9a95 --- /dev/null +++ b/docs/doc_examples/0cee58617e75f493c5049d77be1c49f3.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + fuzzy: { + "user.id": { + value: "ki", + fuzziness: "AUTO", + max_expansions: 50, + prefix_length: 0, + transpositions: true, + rewrite: "constant_score_blended", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0cf29da4b9f0503bd1a79bdc883aadbc.asciidoc b/docs/doc_examples/0cf29da4b9f0503bd1a79bdc883aadbc.asciidoc new file mode 100644 index 000000000..5f8112567 --- /dev/null +++ b/docs/doc_examples/0cf29da4b9f0503bd1a79bdc883aadbc.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "exams", + size: 0, + runtime_mappings: { + "grade.corrected": { + type: "double", + script: { + source: "emit(Math.min(100, doc['grade'].value * params.correction))", + params: { + correction: 1.2, + }, + }, + }, + }, + aggs: { + avg_corrected_grade: { + avg: { + field: "grade.corrected", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0d0f7ece06f21e624d21b09804732f61.asciidoc b/docs/doc_examples/0d0f7ece06f21e624d21b09804732f61.asciidoc new file mode 100644 index 000000000..700c152f7 --- /dev/null +++ b/docs/doc_examples/0d0f7ece06f21e624d21b09804732f61.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "exams", + size: 0, + aggs: { + grade_avg: { + avg: { + field: "grade", + missing: 10, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0d30077cd34e93377a3a86f2ebd69415.asciidoc b/docs/doc_examples/0d30077cd34e93377a3a86f2ebd69415.asciidoc new file mode 100644 index 000000000..fb9189c62 --- /dev/null +++ b/docs/doc_examples/0d30077cd34e93377a3a86f2ebd69415.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.connector.put({ + connector_id: "my-connector", + index_name: "search-google-drive", + name: "My Connector", + description: "My Connector to sync data to Elastic index from Google Drive", + service_type: "google_drive", + language: "en", +}); +console.log(response); +---- diff --git a/docs/doc_examples/0d49474511b236bc89e768c8ee91adf1.asciidoc b/docs/doc_examples/0d49474511b236bc89e768c8ee91adf1.asciidoc new file mode 100644 index 000000000..1cfc2e11d --- /dev/null +++ b/docs/doc_examples/0d49474511b236bc89e768c8ee91adf1.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + simple_query_string: { + query: '"fried eggs" +(eggplant | potato) -frittata', + fields: ["title^5", "body"], + default_operator: "and", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0d54ddad2bf6f76aa5c35f53ba77748a.asciidoc b/docs/doc_examples/0d54ddad2bf6f76aa5c35f53ba77748a.asciidoc new file mode 100644 index 000000000..ef4ef2bff --- /dev/null +++ b/docs/doc_examples/0d54ddad2bf6f76aa5c35f53ba77748a.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: ["porter_stem"], + text: "the foxes jumping quickly", +}); +console.log(response); +---- diff --git a/docs/doc_examples/0d59af9dc556dc526b9394051efa800a.asciidoc b/docs/doc_examples/0d59af9dc556dc526b9394051efa800a.asciidoc new file mode 100644 index 000000000..19e14ecd5 --- /dev/null +++ b/docs/doc_examples/0d59af9dc556dc526b9394051efa800a.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.rollover({ + alias: "logs-foo-bar", +}); +console.log(response); +---- diff --git a/docs/doc_examples/0d689ac6e78be5d438f9b5d441be2b44.asciidoc b/docs/doc_examples/0d689ac6e78be5d438f9b5d441be2b44.asciidoc new file mode 100644 index 000000000..c95b502ca --- /dev/null +++ b/docs/doc_examples/0d689ac6e78be5d438f9b5d441be2b44.asciidoc @@ -0,0 +1,57 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "retrievers_example", + retriever: { + rrf: { + retrievers: [ + { + standard: { + query: { + term: { + topic: "elastic", + }, + }, + }, + }, + { + rrf: { + retrievers: [ + { + standard: { + query: { + query_string: { + query: + "(information retrieval) OR (artificial intelligence)", + default_field: "text", + }, + }, + }, + }, + { + knn: { + field: "vector", + query_vector: [0.23, 0.67, 0.89], + k: 3, + num_candidates: 5, + }, + }, + ], + rank_window_size: 10, + rank_constant: 1, + }, + }, + ], + rank_window_size: 10, + rank_constant: 1, + }, + }, + _source: false, + size: 1, + explain: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0d8063b484a18f8672fb5ed8712c5c97.asciidoc b/docs/doc_examples/0d8063b484a18f8672fb5ed8712c5c97.asciidoc new file mode 100644 index 000000000..47243afad --- /dev/null +++ b/docs/doc_examples/0d8063b484a18f8672fb5ed8712c5c97.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "template_1", + index_patterns: ["foo", "bar"], + template: { + settings: { + number_of_shards: 3, + }, + }, + _meta: { + description: "set number of shards to three", + serialization: { + class: "MyIndexTemplate", + id: 17, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0d94d76b7f00d0459d1f8c962c144dcd.asciidoc b/docs/doc_examples/0d94d76b7f00d0459d1f8c962c144dcd.asciidoc new file mode 100644 index 000000000..85243323d --- /dev/null +++ b/docs/doc_examples/0d94d76b7f00d0459d1f8c962c144dcd.asciidoc @@ -0,0 +1,42 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "mapping8", + roles: ["superuser"], + enabled: true, + rules: { + all: [ + { + any: [ + { + field: { + dn: "*,ou=admin,dc=example,dc=com", + }, + }, + { + field: { + username: ["es-admin", "es-system"], + }, + }, + ], + }, + { + field: { + groups: "cn=people,dc=example,dc=com", + }, + }, + { + except: { + field: { + "metadata.terminated_date": null, + }, + }, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0da477cb8a7883539ce3ae7ac1e9c5cb.asciidoc b/docs/doc_examples/0da477cb8a7883539ce3ae7ac1e9c5cb.asciidoc new file mode 100644 index 000000000..f0fba00e2 --- /dev/null +++ b/docs/doc_examples/0da477cb8a7883539ce3ae7ac1e9c5cb.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + prices: { + histogram: { + field: "price", + interval: 50, + min_doc_count: 1, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0da747e9d98bae157d3520ff1b489ad4.asciidoc b/docs/doc_examples/0da747e9d98bae157d3520ff1b489ad4.asciidoc new file mode 100644 index 000000000..841b610e1 --- /dev/null +++ b/docs/doc_examples/0da747e9d98bae157d3520ff1b489ad4.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.createRepository({ + name: "my_s3_repository", + repository: { + type: "s3", + settings: { + bucket: "my-bucket", + client: "my-alternate-client", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0db06c3cba57cf442ac7fab89966e1e1.asciidoc b/docs/doc_examples/0db06c3cba57cf442ac7fab89966e1e1.asciidoc new file mode 100644 index 000000000..5530f7f7c --- /dev/null +++ b/docs/doc_examples/0db06c3cba57cf442ac7fab89966e1e1.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + refresh: "true", + document: { + my_id: "1", + text: "This is a question", + my_join_field: "question", + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 2, + refresh: "true", + document: { + my_id: "2", + text: "This is another question", + my_join_field: "question", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/0dd30ffe2f900dde86cc9bb601d5e68e.asciidoc b/docs/doc_examples/0dd30ffe2f900dde86cc9bb601d5e68e.asciidoc new file mode 100644 index 000000000..0161abb42 --- /dev/null +++ b/docs/doc_examples/0dd30ffe2f900dde86cc9bb601d5e68e.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.nodes({ + v: "true", + h: "id,ip,port,v,m", +}); +console.log(response); +---- diff --git a/docs/doc_examples/0ddf705317d9c5095b4a1419a2e3bace.asciidoc b/docs/doc_examples/0ddf705317d9c5095b4a1419a2e3bace.asciidoc new file mode 100644 index 000000000..ba6d119b8 --- /dev/null +++ b/docs/doc_examples/0ddf705317d9c5095b4a1419a2e3bace.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getPrivileges(); +console.log(response); +---- diff --git a/docs/doc_examples/0dfa9733c94bc43c6f14c7b6984c98fb.asciidoc b/docs/doc_examples/0dfa9733c94bc43c6f14c7b6984c98fb.asciidoc new file mode 100644 index 000000000..2724e3a6e --- /dev/null +++ b/docs/doc_examples/0dfa9733c94bc43c6f14c7b6984c98fb.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.componentTemplates({ + name: "my-template-*", + v: "true", + s: "name", +}); +console.log(response); +---- diff --git a/docs/doc_examples/0dfde6a9d953822fd4b3aa0121ddd8fb.asciidoc b/docs/doc_examples/0dfde6a9d953822fd4b3aa0121ddd8fb.asciidoc new file mode 100644 index 000000000..5627427c4 --- /dev/null +++ b/docs/doc_examples/0dfde6a9d953822fd4b3aa0121ddd8fb.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.renderQuery({ + name: "my-app", + params: { + query_string: "my first query", + text_fields: [ + { + name: "title", + boost: 5, + }, + { + name: "description", + boost: 1, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0e0d8f652d7d29371b5ea7c7544385eb.asciidoc b/docs/doc_examples/0e0d8f652d7d29371b5ea7c7544385eb.asciidoc new file mode 100644 index 000000000..f81e78aaf --- /dev/null +++ b/docs/doc_examples/0e0d8f652d7d29371b5ea7c7544385eb.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "amazon-bedrock-embeddings", + knn: { + field: "content_embedding", + query_vector_builder: { + text_embedding: { + model_id: "amazon_bedrock_embeddings", + model_text: "Calculate fuel cost", + }, + }, + k: 10, + num_candidates: 100, + }, + _source: ["id", "content"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/0e118857b815b62118a30c042f079db1.asciidoc b/docs/doc_examples/0e118857b815b62118a30c042f079db1.asciidoc index 45acb2f61..fbc3b97e9 100644 --- a/docs/doc_examples/0e118857b815b62118a30c042f079db1.asciidoc +++ b/docs/doc_examples/0e118857b815b62118a30c042f079db1.asciidoc @@ -4,19 +4,13 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - multi_match: { - query: 'quick brown f', - type: 'phrase_prefix', - fields: [ - 'subject', - 'message' - ] - } - } - } -}) -console.log(response) + query: { + multi_match: { + query: "quick brown f", + type: "phrase_prefix", + fields: ["subject", "message"], + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/0e31b8ad176b31028becf9500989bcbd.asciidoc b/docs/doc_examples/0e31b8ad176b31028becf9500989bcbd.asciidoc new file mode 100644 index 000000000..53fcff58d --- /dev/null +++ b/docs/doc_examples/0e31b8ad176b31028becf9500989bcbd.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "text_embedding", + inference_id: "watsonx-embeddings", + inference_config: { + service: "watsonxai", + service_settings: { + api_key: "", + url: "", + model_id: "ibm/slate-30m-english-rtrvr", + project_id: "", + api_version: "2024-03-14", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0e3b4a48a3450cd99c95ec46d4701b58.asciidoc b/docs/doc_examples/0e3b4a48a3450cd99c95ec46d4701b58.asciidoc new file mode 100644 index 000000000..71e7fd206 --- /dev/null +++ b/docs/doc_examples/0e3b4a48a3450cd99c95ec46d4701b58.asciidoc @@ -0,0 +1,42 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + filter_path: "aggregations", + aggs: { + hats: { + filter: { + term: { + type: "hat", + }, + }, + aggs: { + avg_price: { + avg: { + field: "price", + }, + }, + }, + }, + t_shirts: { + filter: { + term: { + type: "t-shirt", + }, + }, + aggs: { + avg_price: { + avg: { + field: "price", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0e5d25c7bb738c42d471020d678e2966.asciidoc b/docs/doc_examples/0e5d25c7bb738c42d471020d678e2966.asciidoc new file mode 100644 index 000000000..c75941ffe --- /dev/null +++ b/docs/doc_examples/0e5d25c7bb738c42d471020d678e2966.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.startTrainedModelDeployment({ + model_id: "my_model", + deployment_id: "my_model_for_ingest", +}); +console.log(response); +---- diff --git a/docs/doc_examples/0e71a18d1aac61720cdc6b3f91fe643f.asciidoc b/docs/doc_examples/0e71a18d1aac61720cdc6b3f91fe643f.asciidoc new file mode 100644 index 000000000..064744d5c --- /dev/null +++ b/docs/doc_examples/0e71a18d1aac61720cdc6b3f91fe643f.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + simple_query_string: { + fields: ["content"], + query: "foo bar -baz", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0e84bb54b8a9a5387f252eeffeb1098e.asciidoc b/docs/doc_examples/0e84bb54b8a9a5387f252eeffeb1098e.asciidoc new file mode 100644 index 000000000..5570248b6 --- /dev/null +++ b/docs/doc_examples/0e84bb54b8a9a5387f252eeffeb1098e.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.putWatch({ + id: "log_error_watch", + trigger: { + schedule: { + interval: "10s", + }, + }, + input: { + search: { + request: { + indices: ["logs"], + body: { + query: { + match: { + message: "error", + }, + }, + }, + }, + }, + }, + condition: { + compare: { + "ctx.payload.hits.total": { + gt: 0, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0ea146b178561bc8b9002bed8a35641f.asciidoc b/docs/doc_examples/0ea146b178561bc8b9002bed8a35641f.asciidoc new file mode 100644 index 000000000..a5321db01 --- /dev/null +++ b/docs/doc_examples/0ea146b178561bc8b9002bed8a35641f.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.autoscaling.getAutoscalingPolicy({ + name: "my_autoscaling_policy", +}); +console.log(response); +---- diff --git a/docs/doc_examples/0ea2167ce7c87d311b20c4f8c698a8d0.asciidoc b/docs/doc_examples/0ea2167ce7c87d311b20c4f8c698a8d0.asciidoc new file mode 100644 index 000000000..c55280692 --- /dev/null +++ b/docs/doc_examples/0ea2167ce7c87d311b20c4f8c698a8d0.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + slice: { + id: 0, + max: 2, + }, + query: { + match: { + message: "foo", + }, + }, + pit: { + id: "46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA==", + }, +}); +console.log(response); + +const response1 = await client.search({ + slice: { + id: 1, + max: 2, + }, + pit: { + id: "46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA==", + }, + query: { + match: { + message: "foo", + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/0eae571e9e1c40a40cb4b1c9530a8987.asciidoc b/docs/doc_examples/0eae571e9e1c40a40cb4b1c9530a8987.asciidoc new file mode 100644 index 000000000..542a15de3 --- /dev/null +++ b/docs/doc_examples/0eae571e9e1c40a40cb4b1c9530a8987.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.migrateToDataTiers({ + legacy_template_to_delete: "global-template", + node_attribute: "custom_attribute_name", +}); +console.log(response); +---- diff --git a/docs/doc_examples/0eb2c1284a9829224913a860190580d8.asciidoc b/docs/doc_examples/0eb2c1284a9829224913a860190580d8.asciidoc new file mode 100644 index 000000000..6406bb3d7 --- /dev/null +++ b/docs/doc_examples/0eb2c1284a9829224913a860190580d8.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "fingerprint_example", + settings: { + analysis: { + analyzer: { + whitespace_fingerprint: { + tokenizer: "whitespace", + filter: ["fingerprint"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0ec2178fb0103862b47cc20bc5885972.asciidoc b/docs/doc_examples/0ec2178fb0103862b47cc20bc5885972.asciidoc new file mode 100644 index 000000000..0bcba66a8 --- /dev/null +++ b/docs/doc_examples/0ec2178fb0103862b47cc20bc5885972.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.createRepository({ + name: "my_fs_backup", + repository: { + type: "fs", + settings: { + location: "my_fs_backup_location", + readonly: true, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0eccea755bd4f6dd47579a9022690546.asciidoc b/docs/doc_examples/0eccea755bd4f6dd47579a9022690546.asciidoc new file mode 100644 index 000000000..bbe82e2f4 --- /dev/null +++ b/docs/doc_examples/0eccea755bd4f6dd47579a9022690546.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + cluster: { + remote: { + my_remote: { + mode: "proxy", + proxy_address: "my.remote.cluster.com:9443", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0f028f71f04c1d569fab402869565a84.asciidoc b/docs/doc_examples/0f028f71f04c1d569fab402869565a84.asciidoc new file mode 100644 index 000000000..007f558d8 --- /dev/null +++ b/docs/doc_examples/0f028f71f04c1d569fab402869565a84.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: ".reindexed-v9-ml-anomalies-custom-example", + settings: { + index: { + number_of_replicas: "", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0f2e5e006b663a88ee99b130ab1b4844.asciidoc b/docs/doc_examples/0f2e5e006b663a88ee99b130ab1b4844.asciidoc new file mode 100644 index 000000000..f40987611 --- /dev/null +++ b/docs/doc_examples/0f2e5e006b663a88ee99b130ab1b4844.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + sort: [ + { + _geo_distance: { + "pin.location": [ + [-70, 40], + [-71, 42], + ], + order: "asc", + unit: "km", + }, + }, + ], + query: { + term: { + user: "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0f3a78296825d507dda6771f7ceb9d61.asciidoc b/docs/doc_examples/0f3a78296825d507dda6771f7ceb9d61.asciidoc new file mode 100644 index 000000000..89d44803c --- /dev/null +++ b/docs/doc_examples/0f3a78296825d507dda6771f7ceb9d61.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "cluster.routing.allocation.exclude._ip": "10.0.0.1", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0f4583c56cfe5bd59eeb35bfba02957c.asciidoc b/docs/doc_examples/0f4583c56cfe5bd59eeb35bfba02957c.asciidoc new file mode 100644 index 000000000..87f3c4188 --- /dev/null +++ b/docs/doc_examples/0f4583c56cfe5bd59eeb35bfba02957c.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.rankEval({ + index: "my-index-000001", + requests: [ + { + id: "JFK query", + request: { + query: { + match_all: {}, + }, + }, + ratings: [], + }, + ], + metric: { + recall: { + k: 20, + relevant_rating_threshold: 1, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0f547926ebf092e19fc5fb433e9ac8c1.asciidoc b/docs/doc_examples/0f547926ebf092e19fc5fb433e9ac8c1.asciidoc new file mode 100644 index 000000000..7cfd78e19 --- /dev/null +++ b/docs/doc_examples/0f547926ebf092e19fc5fb433e9ac8c1.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "whitespace", + filter: ["lowercase", "porter_stem"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0f7aa40ad26d59a9268630b980a3d594.asciidoc b/docs/doc_examples/0f7aa40ad26d59a9268630b980a3d594.asciidoc new file mode 100644 index 000000000..4d208c97c --- /dev/null +++ b/docs/doc_examples/0f7aa40ad26d59a9268630b980a3d594.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.simulateTemplate({ + name: "template_1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/0fa220ee3fb267020382f74aa70eb1e9.asciidoc b/docs/doc_examples/0fa220ee3fb267020382f74aa70eb1e9.asciidoc new file mode 100644 index 000000000..4b29c0392 --- /dev/null +++ b/docs/doc_examples/0fa220ee3fb267020382f74aa70eb1e9.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.state({ + metric: "_all", + index: "foo,bar", +}); +console.log(response); +---- diff --git a/docs/doc_examples/0fb472645116d58ddef89ca976d15a01.asciidoc b/docs/doc_examples/0fb472645116d58ddef89ca976d15a01.asciidoc new file mode 100644 index 000000000..921548536 --- /dev/null +++ b/docs/doc_examples/0fb472645116d58ddef89ca976d15a01.asciidoc @@ -0,0 +1,73 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "my-index-000001", + refresh: "true", + operations: [ + { + index: {}, + }, + { + "@timestamp": 1516729294000, + model_number: "QVKC92Q", + measures: { + voltage: 5.2, + }, + }, + { + index: {}, + }, + { + "@timestamp": 1516642894000, + model_number: "QVKC92Q", + measures: { + voltage: 5.8, + }, + }, + { + index: {}, + }, + { + "@timestamp": 1516556494000, + model_number: "QVKC92Q", + measures: { + voltage: 5.1, + }, + }, + { + index: {}, + }, + { + "@timestamp": 1516470094000, + model_number: "QVKC92Q", + measures: { + voltage: 5.6, + }, + }, + { + index: {}, + }, + { + "@timestamp": 1516383694000, + model_number: "HG537PU", + measures: { + voltage: 4.2, + }, + }, + { + index: {}, + }, + { + "@timestamp": 1516297294000, + model_number: "HG537PU", + measures: { + voltage: 4, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/0fb7705ddbf1fc2b65d2de2e00fe5769.asciidoc b/docs/doc_examples/0fb7705ddbf1fc2b65d2de2e00fe5769.asciidoc new file mode 100644 index 000000000..77565b1fd --- /dev/null +++ b/docs/doc_examples/0fb7705ddbf1fc2b65d2de2e00fe5769.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "ledger", + size: 0, + aggs: { + profit: { + scripted_metric: { + init_script: { + id: "my_init_script", + }, + map_script: { + id: "my_map_script", + }, + combine_script: { + id: "my_combine_script", + }, + params: { + field: "amount", + }, + reduce_script: { + id: "my_reduce_script", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0fbca60a487f5f22a4d51d73b2434cc4.asciidoc b/docs/doc_examples/0fbca60a487f5f22a4d51d73b2434cc4.asciidoc new file mode 100644 index 000000000..fadf2814c --- /dev/null +++ b/docs/doc_examples/0fbca60a487f5f22a4d51d73b2434cc4.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "elser-embeddings", + mappings: { + properties: { + content_embedding: { + type: "sparse_vector", + }, + content: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0fc4b589df5388da784c6d981e769e31.asciidoc b/docs/doc_examples/0fc4b589df5388da784c6d981e769e31.asciidoc new file mode 100644 index 000000000..a052f882c --- /dev/null +++ b/docs/doc_examples/0fc4b589df5388da784c6d981e769e31.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putTemplate({ + name: "template_1", + index_patterns: ["te*"], + settings: { + number_of_shards: 1, + }, + aliases: { + alias1: {}, + alias2: { + filter: { + term: { + "user.id": "kimchy", + }, + }, + routing: "shard-1", + }, + "{index}-alias": {}, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0fd08e14ad651827be53897a6bdaf0b8.asciidoc b/docs/doc_examples/0fd08e14ad651827be53897a6bdaf0b8.asciidoc new file mode 100644 index 000000000..ba1afbe36 --- /dev/null +++ b/docs/doc_examples/0fd08e14ad651827be53897a6bdaf0b8.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match_bool_prefix: { + message: "quick brown f", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/0fe74ccd098c742619805a7c0bd0fae6.asciidoc b/docs/doc_examples/0fe74ccd098c742619805a7c0bd0fae6.asciidoc new file mode 100644 index 000000000..4b426ac41 --- /dev/null +++ b/docs/doc_examples/0fe74ccd098c742619805a7c0bd0fae6.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.scheduleNowTransform({ + transform_id: "ecommerce_transform", +}); +console.log(response); +---- diff --git a/docs/doc_examples/100d4e33158069f3caa32e8bfa0eb3d0.asciidoc b/docs/doc_examples/100d4e33158069f3caa32e8bfa0eb3d0.asciidoc new file mode 100644 index 000000000..0aa38abad --- /dev/null +++ b/docs/doc_examples/100d4e33158069f3caa32e8bfa0eb3d0.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + dynamic: "runtime", + properties: { + "@timestamp": { + type: "date", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/102c7de25d13c87cf28839ada9f63c95.asciidoc b/docs/doc_examples/102c7de25d13c87cf28839ada9f63c95.asciidoc new file mode 100644 index 000000000..dd86be71b --- /dev/null +++ b/docs/doc_examples/102c7de25d13c87cf28839ada9f63c95.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "index", + id: 1, + document: { + my_date: "2016-05-11T16:30:55.328Z", + }, +}); +console.log(response); + +const response1 = await client.search({ + index: "index", + query: { + constant_score: { + filter: { + range: { + my_date: { + gte: "now-1h", + lte: "now", + }, + }, + }, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/103296e16b4233926ad1f07360385606.asciidoc b/docs/doc_examples/103296e16b4233926ad1f07360385606.asciidoc new file mode 100644 index 000000000..06f7bd53e --- /dev/null +++ b/docs/doc_examples/103296e16b4233926ad1f07360385606.asciidoc @@ -0,0 +1,44 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "turkish_example", + settings: { + analysis: { + filter: { + turkish_stop: { + type: "stop", + stopwords: "_turkish_", + }, + turkish_lowercase: { + type: "lowercase", + language: "turkish", + }, + turkish_keywords: { + type: "keyword_marker", + keywords: ["örnek"], + }, + turkish_stemmer: { + type: "stemmer", + language: "turkish", + }, + }, + analyzer: { + rebuilt_turkish: { + tokenizer: "standard", + filter: [ + "apostrophe", + "turkish_lowercase", + "turkish_stop", + "turkish_keywords", + "turkish_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1070e59ba144cdf309fd9b2591612b95.asciidoc b/docs/doc_examples/1070e59ba144cdf309fd9b2591612b95.asciidoc new file mode 100644 index 000000000..a2b53a448 --- /dev/null +++ b/docs/doc_examples/1070e59ba144cdf309fd9b2591612b95.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "test", + id: 3, + document: { + test: "test", + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "test", + id: 4, + refresh: "false", + document: { + test: "test", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/10796a4efa3c2a5e9e50b6bdeb08bbb9.asciidoc b/docs/doc_examples/10796a4efa3c2a5e9e50b6bdeb08bbb9.asciidoc new file mode 100644 index 000000000..7377cbb91 --- /dev/null +++ b/docs/doc_examples/10796a4efa3c2a5e9e50b6bdeb08bbb9.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_internal/desired_nodes/Ywkh3INLQcuPT49f6kcppA/100", + body: { + nodes: [ + { + settings: { + "node.name": "instance-000187", + "node.external_id": "instance-000187", + "node.roles": ["data_hot", "master"], + "node.attr.data": "hot", + "node.attr.logical_availability_zone": "zone-0", + }, + processors: 8, + memory: "58gb", + storage: "2tb", + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/109db8ff7b715aca98de8ef1ab7e44ab.asciidoc b/docs/doc_examples/109db8ff7b715aca98de8ef1ab7e44ab.asciidoc new file mode 100644 index 000000000..a77dfed0d --- /dev/null +++ b/docs/doc_examples/109db8ff7b715aca98de8ef1ab7e44ab.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.resumeFollow({ + index: "", +}); +console.log(response); +---- diff --git a/docs/doc_examples/10a16abe990288253ea25a1b1712fe3d.asciidoc b/docs/doc_examples/10a16abe990288253ea25a1b1712fe3d.asciidoc new file mode 100644 index 000000000..2520b36c1 --- /dev/null +++ b/docs/doc_examples/10a16abe990288253ea25a1b1712fe3d.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.queryUser({ + with_profile_uid: "true", + query: { + prefix: { + roles: "other", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/10b924bf6298aa6157ed00ce12f8edc1.asciidoc b/docs/doc_examples/10b924bf6298aa6157ed00ce12f8edc1.asciidoc new file mode 100644 index 000000000..4cbd44f94 --- /dev/null +++ b/docs/doc_examples/10b924bf6298aa6157ed00ce12f8edc1.asciidoc @@ -0,0 +1,45 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.executeWatch({ + ignore_condition: true, + watch: { + trigger: { + schedule: { + interval: "10s", + }, + }, + input: { + search: { + request: { + indices: ["logs"], + body: { + query: { + match: { + message: "error", + }, + }, + }, + }, + }, + }, + condition: { + compare: { + "ctx.payload.hits.total": { + gt: 0, + }, + }, + }, + actions: { + log_error: { + logging: { + text: "Found {{ctx.payload.hits.total}} errors in the logs", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/10d8b17e73d31dcd907de67327ed78a2.asciidoc b/docs/doc_examples/10d8b17e73d31dcd907de67327ed78a2.asciidoc new file mode 100644 index 000000000..3cae02031 --- /dev/null +++ b/docs/doc_examples/10d8b17e73d31dcd907de67327ed78a2.asciidoc @@ -0,0 +1,49 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "dutch_example", + settings: { + analysis: { + filter: { + dutch_stop: { + type: "stop", + stopwords: "_dutch_", + }, + dutch_keywords: { + type: "keyword_marker", + keywords: ["voorbeeld"], + }, + dutch_stemmer: { + type: "stemmer", + language: "dutch", + }, + dutch_override: { + type: "stemmer_override", + rules: [ + "fiets=>fiets", + "bromfiets=>bromfiets", + "ei=>eier", + "kind=>kinder", + ], + }, + }, + analyzer: { + rebuilt_dutch: { + tokenizer: "standard", + filter: [ + "lowercase", + "dutch_stop", + "dutch_keywords", + "dutch_override", + "dutch_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/10d9da8a3b7061479be908c8c5c76cfb.asciidoc b/docs/doc_examples/10d9da8a3b7061479be908c8c5c76cfb.asciidoc new file mode 100644 index 000000000..ec93e4938 --- /dev/null +++ b/docs/doc_examples/10d9da8a3b7061479be908c8c5c76cfb.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getApiKey({ + realm_name: "native1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/10de9fd4a38755020a07c4ec964d44c9.asciidoc b/docs/doc_examples/10de9fd4a38755020a07c4ec964d44c9.asciidoc new file mode 100644 index 000000000..1803166ae --- /dev/null +++ b/docs/doc_examples/10de9fd4a38755020a07c4ec964d44c9.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "oidc-example", + roles: ["example_role"], + enabled: true, + rules: { + field: { + "realm.name": "oidc1", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/10e4c1f246ada8c6b500d8ea6c1e335f.asciidoc b/docs/doc_examples/10e4c1f246ada8c6b500d8ea6c1e335f.asciidoc new file mode 100644 index 000000000..bcb703c37 --- /dev/null +++ b/docs/doc_examples/10e4c1f246ada8c6b500d8ea6c1e335f.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + standard_shingle: { + tokenizer: "standard", + filter: ["shingle"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/10f0c8fed98455c460c374b50ffbb204.asciidoc b/docs/doc_examples/10f0c8fed98455c460c374b50ffbb204.asciidoc new file mode 100644 index 000000000..9fecf5000 --- /dev/null +++ b/docs/doc_examples/10f0c8fed98455c460c374b50ffbb204.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.rollover({ + alias: "dsl-data-stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/10f7a2c0a952ba3bc3d20b7d5f310f41.asciidoc b/docs/doc_examples/10f7a2c0a952ba3bc3d20b7d5f310f41.asciidoc new file mode 100644 index 000000000..d43215527 --- /dev/null +++ b/docs/doc_examples/10f7a2c0a952ba3bc3d20b7d5f310f41.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.list(); +console.log(response); +---- diff --git a/docs/doc_examples/111c31db1fd29baeaa9964eafaea6789.asciidoc b/docs/doc_examples/111c31db1fd29baeaa9964eafaea6789.asciidoc new file mode 100644 index 000000000..50c998961 --- /dev/null +++ b/docs/doc_examples/111c31db1fd29baeaa9964eafaea6789.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putUser({ + username: "analyst_user", + refresh: "true", + password: "l0nger-r4nd0mer-p@ssw0rd", + roles: ["my_analyst_role"], + full_name: "Monday Jaffe", + metadata: { + innovation: 8, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/111c69ca94162c1523b799a5c14723dd.asciidoc b/docs/doc_examples/111c69ca94162c1523b799a5c14723dd.asciidoc new file mode 100644 index 000000000..d7a3bc0f3 --- /dev/null +++ b/docs/doc_examples/111c69ca94162c1523b799a5c14723dd.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + document: { + full_text: "Quick Brown Foxes!", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1147a02afa087278e51fa365fb9e06b7.asciidoc b/docs/doc_examples/1147a02afa087278e51fa365fb9e06b7.asciidoc new file mode 100644 index 000000000..267bf1a6a --- /dev/null +++ b/docs/doc_examples/1147a02afa087278e51fa365fb9e06b7.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 1000, +}); +console.log(response); +---- diff --git a/docs/doc_examples/114d470e752efa9672ca68d7290fada8.asciidoc b/docs/doc_examples/114d470e752efa9672ca68d7290fada8.asciidoc new file mode 100644 index 000000000..71741889f --- /dev/null +++ b/docs/doc_examples/114d470e752efa9672ca68d7290fada8.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putAlias({ + index: "my-data-stream", + name: "my-alias", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1153bd92ca18356db927054958cd95c6.asciidoc b/docs/doc_examples/1153bd92ca18356db927054958cd95c6.asciidoc new file mode 100644 index 000000000..9b8284bc2 --- /dev/null +++ b/docs/doc_examples/1153bd92ca18356db927054958cd95c6.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + function_score: { + field_value_factor: { + field: "my-int", + factor: 1.2, + modifier: "sqrt", + missing: 1, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/115529722ba30b0b0d51a7ff87e59198.asciidoc b/docs/doc_examples/115529722ba30b0b0d51a7ff87e59198.asciidoc new file mode 100644 index 000000000..73710c751 --- /dev/null +++ b/docs/doc_examples/115529722ba30b0b0d51a7ff87e59198.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getRole({ + name: "my_admin_role", +}); +console.log(response); +---- diff --git a/docs/doc_examples/118f249a3b26c33416f641b33f2b74f8.asciidoc b/docs/doc_examples/118f249a3b26c33416f641b33f2b74f8.asciidoc new file mode 100644 index 000000000..0ee01c6b5 --- /dev/null +++ b/docs/doc_examples/118f249a3b26c33416f641b33f2b74f8.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "my_tokenizer", + }, + }, + tokenizer: { + my_tokenizer: { + type: "pattern", + pattern: ",", + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.analyze({ + index: "my-index-000001", + analyzer: "my_analyzer", + text: "comma,separated,values", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/11be807bdeaeecc8174dec88e0851ea7.asciidoc b/docs/doc_examples/11be807bdeaeecc8174dec88e0851ea7.asciidoc new file mode 100644 index 000000000..9e732ae71 --- /dev/null +++ b/docs/doc_examples/11be807bdeaeecc8174dec88e0851ea7.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "GET", + path: "/_connector/_sync_job", + querystring: { + connector_id: "my-connector-id", + size: "1", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/11c395d1649733bcab853fe31ec393b2.asciidoc b/docs/doc_examples/11c395d1649733bcab853fe31ec393b2.asciidoc new file mode 100644 index 000000000..07cbcf1e4 --- /dev/null +++ b/docs/doc_examples/11c395d1649733bcab853fe31ec393b2.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.license.get(); +console.log(response); +---- diff --git a/docs/doc_examples/11c43c4aa5435f8a99dcc0d1f03c648f.asciidoc b/docs/doc_examples/11c43c4aa5435f8a99dcc0d1f03c648f.asciidoc new file mode 100644 index 000000000..bdc4978ac --- /dev/null +++ b/docs/doc_examples/11c43c4aa5435f8a99dcc0d1f03c648f.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + aggs: { + grade_max: { + max: { + field: "grade", + missing: 10, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/11d9043d3050a7175069dec7e0adc963.asciidoc b/docs/doc_examples/11d9043d3050a7175069dec7e0adc963.asciidoc new file mode 100644 index 000000000..6c0308b64 --- /dev/null +++ b/docs/doc_examples/11d9043d3050a7175069dec7e0adc963.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + document: { + my_field: "a\\b", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/11e772ff5dbb73408ae30a1a367a0d9b.asciidoc b/docs/doc_examples/11e772ff5dbb73408ae30a1a367a0d9b.asciidoc new file mode 100644 index 000000000..10a9f5129 --- /dev/null +++ b/docs/doc_examples/11e772ff5dbb73408ae30a1a367a0d9b.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.deletePipeline({ + id: "*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/11e8d6e14686efabb8634b6522c05cb5.asciidoc b/docs/doc_examples/11e8d6e14686efabb8634b6522c05cb5.asciidoc new file mode 100644 index 000000000..7af802e6f --- /dev/null +++ b/docs/doc_examples/11e8d6e14686efabb8634b6522c05cb5.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match: { + "user.id": "kimchy", + }, + }, + highlight: { + pre_tags: ["", ""], + post_tags: ["", ""], + fields: { + body: {}, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/120fcf9f55128d6a81d5e87a9c235bbd.asciidoc b/docs/doc_examples/120fcf9f55128d6a81d5e87a9c235bbd.asciidoc new file mode 100644 index 000000000..7c9de2841 --- /dev/null +++ b/docs/doc_examples/120fcf9f55128d6a81d5e87a9c235bbd.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.chatCompletionUnified({ + inference_id: "openai-completion", + chat_completion_request: { + model: "gpt-4o", + messages: [ + { + role: "user", + content: "What is Elastic?", + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1216f8f7367df3aa823012cef310c08a.asciidoc b/docs/doc_examples/1216f8f7367df3aa823012cef310c08a.asciidoc deleted file mode 100644 index d31d44fa2..000000000 --- a/docs/doc_examples/1216f8f7367df3aa823012cef310c08a.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.reindex({ - body: { - source: { - index: 'test' - }, - dest: { - index: 'test2' - }, - script: { - source: 'ctx._source.tag = ctx._source.remove("flag")' - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/1233be1d4c9c7ca54126f1a0693b26de.asciidoc b/docs/doc_examples/1233be1d4c9c7ca54126f1a0693b26de.asciidoc new file mode 100644 index 000000000..47b7b293c --- /dev/null +++ b/docs/doc_examples/1233be1d4c9c7ca54126f1a0693b26de.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 3, + routing: 1, + refresh: "true", + document: { + my_id: "3", + text: "This is an answer", + my_join_field: { + name: "answer", + parent: "1", + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 4, + routing: 1, + refresh: "true", + document: { + my_id: "4", + text: "This is another answer", + my_join_field: { + name: "answer", + parent: "1", + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/123693835b3b85b9a2fa6fd1d3ad89c7.asciidoc b/docs/doc_examples/123693835b3b85b9a2fa6fd1d3ad89c7.asciidoc new file mode 100644 index 000000000..3f96a5c0c --- /dev/null +++ b/docs/doc_examples/123693835b3b85b9a2fa6fd1d3ad89c7.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + routing: "user1", + refresh: "true", + document: { + title: "This is a document", + }, +}); +console.log(response); + +const response1 = await client.get({ + index: "my-index-000001", + id: 1, + routing: "user1", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/12433d2b637d002e8d5c9a1adce69d3b.asciidoc b/docs/doc_examples/12433d2b637d002e8d5c9a1adce69d3b.asciidoc index b5c874cd0..4471f3a3c 100644 --- a/docs/doc_examples/12433d2b637d002e8d5c9a1adce69d3b.asciidoc +++ b/docs/doc_examples/12433d2b637d002e8d5c9a1adce69d3b.asciidoc @@ -4,8 +4,7 @@ [source, js] ---- const response = await client.indices.create({ - index: 'publications' -}) -console.log(response) + index: "publications", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/1252fa45847edba5ec2b2f33da70ec5b.asciidoc b/docs/doc_examples/1252fa45847edba5ec2b2f33da70ec5b.asciidoc new file mode 100644 index 000000000..70c2bd9ae --- /dev/null +++ b/docs/doc_examples/1252fa45847edba5ec2b2f33da70ec5b.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.state({ + filter_path: "routing_table.indices.**.state", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1259a9c151730e42de35bb2d1ba700c6.asciidoc b/docs/doc_examples/1259a9c151730e42de35bb2d1ba700c6.asciidoc new file mode 100644 index 000000000..bd4b4f7ae --- /dev/null +++ b/docs/doc_examples/1259a9c151730e42de35bb2d1ba700c6.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getMapping({ + index: "my-index-000001,my-index-000002", +}); +console.log(response); +---- diff --git a/docs/doc_examples/128283698535116931dca9d16a16dca2.asciidoc b/docs/doc_examples/128283698535116931dca9d16a16dca2.asciidoc new file mode 100644 index 000000000..5f8f9eb6e --- /dev/null +++ b/docs/doc_examples/128283698535116931dca9d16a16dca2.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getRole(); +console.log(response); +---- diff --git a/docs/doc_examples/1295f51b9e5d4ba9987b02478146b50b.asciidoc b/docs/doc_examples/1295f51b9e5d4ba9987b02478146b50b.asciidoc new file mode 100644 index 000000000..ac47ac1be --- /dev/null +++ b/docs/doc_examples/1295f51b9e5d4ba9987b02478146b50b.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + settings: { + "index.max_result_window": 5000, + }, +}); +console.log(response); + +const response1 = await client.cluster.putSettings({ + persistent: { + "search.max_buckets": 20000, + "search.allow_expensive_queries": false, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/12adea5d76f73d94d80d42f53f67563f.asciidoc b/docs/doc_examples/12adea5d76f73d94d80d42f53f67563f.asciidoc new file mode 100644 index 000000000..83d87f9c6 --- /dev/null +++ b/docs/doc_examples/12adea5d76f73d94d80d42f53f67563f.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.addBlock({ + index: ".ml-anomalies-custom-example", + block: "read_only", +}); +console.log(response); +---- diff --git a/docs/doc_examples/12cb446446211f95f651e196a1f059b4.asciidoc b/docs/doc_examples/12cb446446211f95f651e196a1f059b4.asciidoc new file mode 100644 index 000000000..b35ca9af9 --- /dev/null +++ b/docs/doc_examples/12cb446446211f95f651e196a1f059b4.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.create({ + repository: "my_repository", + snapshot: "my_snapshot", + wait_for_completion: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/12d5ff4b8d3d832b32a7e7e2a520d0bb.asciidoc b/docs/doc_examples/12d5ff4b8d3d832b32a7e7e2a520d0bb.asciidoc new file mode 100644 index 000000000..2cd793b99 --- /dev/null +++ b/docs/doc_examples/12d5ff4b8d3d832b32a7e7e2a520d0bb.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.getCalendarEvents({ + calendar_id: "planned-outages", + start: 1635638400000, + end: 1635724800000, +}); +console.log(response); +---- diff --git a/docs/doc_examples/12e9e758f7f18a6cbf27e9d0aea57a19.asciidoc b/docs/doc_examples/12e9e758f7f18a6cbf27e9d0aea57a19.asciidoc new file mode 100644 index 000000000..ba95e73e9 --- /dev/null +++ b/docs/doc_examples/12e9e758f7f18a6cbf27e9d0aea57a19.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.update({ + index: ".elastic-connectors", + id: "connector_id", + doc: { + features: { + native_connector_api_keys: { + enabled: true, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/12ec704d62ffedcb03787e6aba69d382.asciidoc b/docs/doc_examples/12ec704d62ffedcb03787e6aba69d382.asciidoc new file mode 100644 index 000000000..46613023e --- /dev/null +++ b/docs/doc_examples/12ec704d62ffedcb03787e6aba69d382.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "whitespace", + filter: [ + { + type: "stop", + stopwords: ["a"], + }, + { + type: "shingle", + filler_token: "+", + }, + ], + text: "fox jumps a lazy dog", +}); +console.log(response); +---- diff --git a/docs/doc_examples/12facf3617a41551ce2f0c4d005cb1c7.asciidoc b/docs/doc_examples/12facf3617a41551ce2f0c4d005cb1c7.asciidoc new file mode 100644 index 000000000..625db81d8 --- /dev/null +++ b/docs/doc_examples/12facf3617a41551ce2f0c4d005cb1c7.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "movies", + mappings: { + properties: { + name_and_plot: { + type: "text", + }, + name: { + type: "text", + copy_to: "name_and_plot", + }, + plot: { + type: "text", + copy_to: "name_and_plot", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1302e24b0476e0e9af7a2c890edf9f62.asciidoc b/docs/doc_examples/1302e24b0476e0e9af7a2c890edf9f62.asciidoc new file mode 100644 index 000000000..457716ba1 --- /dev/null +++ b/docs/doc_examples/1302e24b0476e0e9af7a2c890edf9f62.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + track_total_hits: false, + query: { + match: { + "user.id": "elkbee", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1313c540fef7e7c18a066f07789673fc.asciidoc b/docs/doc_examples/1313c540fef7e7c18a066f07789673fc.asciidoc new file mode 100644 index 000000000..8e9ede09f --- /dev/null +++ b/docs/doc_examples/1313c540fef7e7c18a066f07789673fc.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.getAsync({ + id: "FmdMX2pIang3UWhLRU5QS0lqdlppYncaMUpYQ05oSkpTc3kwZ21EdC1tbFJXQToxOTI=", + keep_alive: "5d", + wait_for_completion_timeout: "2s", + format: "json", +}); +console.log(response); +---- diff --git a/docs/doc_examples/132ea3d5a0ffb6b5203e356e8329f679.asciidoc b/docs/doc_examples/132ea3d5a0ffb6b5203e356e8329f679.asciidoc new file mode 100644 index 000000000..0f5963660 --- /dev/null +++ b/docs/doc_examples/132ea3d5a0ffb6b5203e356e8329f679.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_buckets: { + composite: { + sources: [ + { + date: { + date_histogram: { + field: "timestamp", + calendar_interval: "1d", + }, + }, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/134384b8c63cfbd8d762fb01757bb3f9.asciidoc b/docs/doc_examples/134384b8c63cfbd8d762fb01757bb3f9.asciidoc new file mode 100644 index 000000000..222bc95f8 --- /dev/null +++ b/docs/doc_examples/134384b8c63cfbd8d762fb01757bb3f9.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "logs-debug", + document: { + date: "2019-12-12", + message: "Starting up Elasticsearch", + level: "debug", + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "logs-debug", + document: { + date: "2019-12-12", + message: "Starting up Elasticsearch", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/135819da3a4bde684357c57a49ad8e85.asciidoc b/docs/doc_examples/135819da3a4bde684357c57a49ad8e85.asciidoc new file mode 100644 index 000000000..c5583614b --- /dev/null +++ b/docs/doc_examples/135819da3a4bde684357c57a49ad8e85.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.migration.deprecations(); +console.log(response); +---- diff --git a/docs/doc_examples/13670d1534125831c2059eebd86d840c.asciidoc b/docs/doc_examples/13670d1534125831c2059eebd86d840c.asciidoc new file mode 100644 index 000000000..080608e5e --- /dev/null +++ b/docs/doc_examples/13670d1534125831c2059eebd86d840c.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "brazilian_example", + settings: { + analysis: { + filter: { + brazilian_stop: { + type: "stop", + stopwords: "_brazilian_", + }, + brazilian_keywords: { + type: "keyword_marker", + keywords: ["exemplo"], + }, + brazilian_stemmer: { + type: "stemmer", + language: "brazilian", + }, + }, + analyzer: { + rebuilt_brazilian: { + tokenizer: "standard", + filter: [ + "lowercase", + "brazilian_stop", + "brazilian_keywords", + "brazilian_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/136ae86b8d497dda799cf1cb583df929.asciidoc b/docs/doc_examples/136ae86b8d497dda799cf1cb583df929.asciidoc new file mode 100644 index 000000000..0e065a362 --- /dev/null +++ b/docs/doc_examples/136ae86b8d497dda799cf1cb583df929.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "publications", + mappings: { + properties: { + id: { + type: "text", + }, + title: { + type: "text", + }, + abstract: { + type: "text", + }, + author: { + properties: { + id: { + type: "text", + }, + name: { + type: "text", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/137709a0a0dc38d6094291c9fc75b804.asciidoc b/docs/doc_examples/137709a0a0dc38d6094291c9fc75b804.asciidoc new file mode 100644 index 000000000..47b2e1b27 --- /dev/null +++ b/docs/doc_examples/137709a0a0dc38d6094291c9fc75b804.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + document: { + counter: 1, + tags: ["production"], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/137c62a4443bdd7d5b95a15022a9dc30.asciidoc b/docs/doc_examples/137c62a4443bdd7d5b95a15022a9dc30.asciidoc new file mode 100644 index 000000000..3c4e2ec94 --- /dev/null +++ b/docs/doc_examples/137c62a4443bdd7d5b95a15022a9dc30.asciidoc @@ -0,0 +1,41 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "arabic_example", + settings: { + analysis: { + filter: { + arabic_stop: { + type: "stop", + stopwords: "_arabic_", + }, + arabic_keywords: { + type: "keyword_marker", + keywords: ["مثال"], + }, + arabic_stemmer: { + type: "stemmer", + language: "arabic", + }, + }, + analyzer: { + rebuilt_arabic: { + tokenizer: "standard", + filter: [ + "lowercase", + "decimal_digit", + "arabic_stop", + "arabic_normalization", + "arabic_keywords", + "arabic_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/138ccd89f72aa7502dd9578403dcc589.asciidoc b/docs/doc_examples/138ccd89f72aa7502dd9578403dcc589.asciidoc deleted file mode 100644 index d6812ed4b..000000000 --- a/docs/doc_examples/138ccd89f72aa7502dd9578403dcc589.asciidoc +++ /dev/null @@ -1,13 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.get({ - index: 'twitter', - id: '0', - _source: 'false' -}) -console.log(response) ----- - diff --git a/docs/doc_examples/138f7703c47ddf63633fdf5ca9bc7fa4.asciidoc b/docs/doc_examples/138f7703c47ddf63633fdf5ca9bc7fa4.asciidoc new file mode 100644 index 000000000..bb8e802e8 --- /dev/null +++ b/docs/doc_examples/138f7703c47ddf63633fdf5ca9bc7fa4.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 2, + routing: "user1", + document: { + counter: 1, + tags: ["env2"], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/13917f7cfb6a382c293275ff71134ec4.asciidoc b/docs/doc_examples/13917f7cfb6a382c293275ff71134ec4.asciidoc new file mode 100644 index 000000000..73c11f5ef --- /dev/null +++ b/docs/doc_examples/13917f7cfb6a382c293275ff71134ec4.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.renderSearchTemplate({ + source: { + query: { + match: { + message: + "Hello {{#name_exists}}{{query_string}}{{/name_exists}}{{^name_exists}}World{{/name_exists}}", + }, + }, + }, + params: { + query_string: "Kimchy", + name_exists: true, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/13b02da42d3afe7f0b649e1c98ac9549.asciidoc b/docs/doc_examples/13b02da42d3afe7f0b649e1c98ac9549.asciidoc new file mode 100644 index 000000000..2418f9714 --- /dev/null +++ b/docs/doc_examples/13b02da42d3afe7f0b649e1c98ac9549.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "keep_types_example", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "standard", + filter: ["extract_alpha"], + }, + }, + filter: { + extract_alpha: { + type: "keep_types", + types: [""], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/13cc51ca3a783cdbb1f1d353eaedbf23.asciidoc b/docs/doc_examples/13cc51ca3a783cdbb1f1d353eaedbf23.asciidoc new file mode 100644 index 000000000..86959a78e --- /dev/null +++ b/docs/doc_examples/13cc51ca3a783cdbb1f1d353eaedbf23.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "logger.org.elasticsearch.xpack.security.authc": "debug", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/13d90ba227131aefbf4fcfd5992e662a.asciidoc b/docs/doc_examples/13d90ba227131aefbf4fcfd5992e662a.asciidoc new file mode 100644 index 000000000..e7f6c16c0 --- /dev/null +++ b/docs/doc_examples/13d90ba227131aefbf4fcfd5992e662a.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + bool: { + should: [ + { + match: { + "name.first": { + query: "shay", + _name: "first", + }, + }, + }, + { + match: { + "name.last": { + query: "banon", + _name: "last", + }, + }, + }, + ], + filter: { + terms: { + "name.last": ["banon", "kimchy"], + _name: "test", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/13d91782399ba1f291e103c18b5338cc.asciidoc b/docs/doc_examples/13d91782399ba1f291e103c18b5338cc.asciidoc new file mode 100644 index 000000000..c8f218cc1 --- /dev/null +++ b/docs/doc_examples/13d91782399ba1f291e103c18b5338cc.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.createFrom({ + source: "my-index", + dest: "my-new-index", + create_from: { + settings_override: { + index: { + number_of_shards: 5, + }, + }, + mappings_override: { + properties: { + field2: { + type: "boolean", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/13df08eefc9ba98e311793bbca74133b.asciidoc b/docs/doc_examples/13df08eefc9ba98e311793bbca74133b.asciidoc new file mode 100644 index 000000000..0e818c24a --- /dev/null +++ b/docs/doc_examples/13df08eefc9ba98e311793bbca74133b.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getUserProfile({ + uid: "u_79HkWkwmnBH5gqFKwoxggWPjEBOur1zLPXQPEl1VBW0_0", + data: "app1.key1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/13e3fefbf55f672926aa389d76fc8bea.asciidoc b/docs/doc_examples/13e3fefbf55f672926aa389d76fc8bea.asciidoc new file mode 100644 index 000000000..eacd1ceab --- /dev/null +++ b/docs/doc_examples/13e3fefbf55f672926aa389d76fc8bea.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.changePassword({ + username: "user1", + password: "new-test-password", +}); +console.log(response); +---- diff --git a/docs/doc_examples/13ebcb01ebf1b5d2b5c52739db47e30c.asciidoc b/docs/doc_examples/13ebcb01ebf1b5d2b5c52739db47e30c.asciidoc new file mode 100644 index 000000000..0d8d85d36 --- /dev/null +++ b/docs/doc_examples/13ebcb01ebf1b5d2b5c52739db47e30c.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.recovery({ + index: "index1,index2", + human: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/13ecdf99114098c76b050397d9c3d4e6.asciidoc b/docs/doc_examples/13ecdf99114098c76b050397d9c3d4e6.asciidoc new file mode 100644 index 000000000..cb18160a8 --- /dev/null +++ b/docs/doc_examples/13ecdf99114098c76b050397d9c3d4e6.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.sparseEmbedding({ + inference_id: "my-elser-model", + input: + "The sky above the port was the color of television tuned to a dead channel.", +}); +console.log(response); +---- diff --git a/docs/doc_examples/13fe12cdb73bc89f07a83f1e6b127511.asciidoc b/docs/doc_examples/13fe12cdb73bc89f07a83f1e6b127511.asciidoc new file mode 100644 index 000000000..672620810 --- /dev/null +++ b/docs/doc_examples/13fe12cdb73bc89f07a83f1e6b127511.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "google-vertex-ai-embeddings", + mappings: { + properties: { + content_embedding: { + type: "dense_vector", + dims: 768, + element_type: "float", + similarity: "dot_product", + }, + content: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/141ef0ebaa3b0772892b79b9bb85efb0.asciidoc b/docs/doc_examples/141ef0ebaa3b0772892b79b9bb85efb0.asciidoc new file mode 100644 index 000000000..5387dbba3 --- /dev/null +++ b/docs/doc_examples/141ef0ebaa3b0772892b79b9bb85efb0.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.update({ + inference_id: "my-inference-endpoint", + inference_config: { + service_settings: { + api_key: "", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/14254a0e725044faedf9370ead76f6ce.asciidoc b/docs/doc_examples/14254a0e725044faedf9370ead76f6ce.asciidoc new file mode 100644 index 000000000..88f3d5fcc --- /dev/null +++ b/docs/doc_examples/14254a0e725044faedf9370ead76f6ce.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + q: "user.id:elkbee", + size: 0, + terminate_after: 1, +}); +console.log(response); +---- diff --git a/docs/doc_examples/142de21c40e84e2e2d8d832e5b3b36db.asciidoc b/docs/doc_examples/142de21c40e84e2e2d8d832e5b3b36db.asciidoc new file mode 100644 index 000000000..0dfc47570 --- /dev/null +++ b/docs/doc_examples/142de21c40e84e2e2d8d832e5b3b36db.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.migrateToDataTiers(); +console.log(response); +---- diff --git a/docs/doc_examples/1445ca2e813ed1c25504107b4b11760e.asciidoc b/docs/doc_examples/1445ca2e813ed1c25504107b4b11760e.asciidoc new file mode 100644 index 000000000..f4e6078f1 --- /dev/null +++ b/docs/doc_examples/1445ca2e813ed1c25504107b4b11760e.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.follow({ + index: "server-metrics-follower", + wait_for_active_shards: 1, + remote_cluster: "leader", + leader_index: "server-metrics", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1452829804551d2d6acedd4e73b29637.asciidoc b/docs/doc_examples/1452829804551d2d6acedd4e73b29637.asciidoc new file mode 100644 index 000000000..782e6ffea --- /dev/null +++ b/docs/doc_examples/1452829804551d2d6acedd4e73b29637.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.createDataStream({ + name: "logs-foo-bar", +}); +console.log(response); +---- diff --git a/docs/doc_examples/146bd22fd0e7be2345619e8f11d3a4cb.asciidoc b/docs/doc_examples/146bd22fd0e7be2345619e8f11d3a4cb.asciidoc new file mode 100644 index 000000000..9924e6b33 --- /dev/null +++ b/docs/doc_examples/146bd22fd0e7be2345619e8f11d3a4cb.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.tasks({ + v: "true", + s: "time:desc", + h: "type,action,running_time,node,cancellable", +}); +console.log(response); +---- diff --git a/docs/doc_examples/14701dcc0cca9665fce2aace0cb62af7.asciidoc b/docs/doc_examples/14701dcc0cca9665fce2aace0cb62af7.asciidoc deleted file mode 100644 index 7bd77883a..000000000 --- a/docs/doc_examples/14701dcc0cca9665fce2aace0cb62af7.asciidoc +++ /dev/null @@ -1,22 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'twitter', - size: '0', - filter_path: 'hits.total', - body: { - query: { - range: { - likes: { - lt: 10 - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/147d341cb212dcc015c129a9c5dcf9c9.asciidoc b/docs/doc_examples/147d341cb212dcc015c129a9c5dcf9c9.asciidoc new file mode 100644 index 000000000..8c45eebd2 --- /dev/null +++ b/docs/doc_examples/147d341cb212dcc015c129a9c5dcf9c9.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.putTrainedModelAlias({ + model_id: "flight-delay-prediction-1574775339910", + model_alias: "flight_delay_model", +}); +console.log(response); +---- diff --git a/docs/doc_examples/148edc235fcfbc263561f87f5533e688.asciidoc b/docs/doc_examples/148edc235fcfbc263561f87f5533e688.asciidoc new file mode 100644 index 000000000..a19fe9d8f --- /dev/null +++ b/docs/doc_examples/148edc235fcfbc263561f87f5533e688.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + percolate: { + field: "query", + documents: [ + { + message: "bonsai tree", + }, + { + message: "new tree", + }, + { + message: "the office", + }, + { + message: "office tree", + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/14936b96cfb8ff999a833f615ba75495.asciidoc b/docs/doc_examples/14936b96cfb8ff999a833f615ba75495.asciidoc new file mode 100644 index 000000000..3bf2abb00 --- /dev/null +++ b/docs/doc_examples/14936b96cfb8ff999a833f615ba75495.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "bicycles,other_cycles", + query: { + bool: { + must: { + match: { + description: "dutch", + }, + }, + filter: { + term: { + cycle_type: "bicycle", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/149a0eea54cdf6ea3052af6dba2d2a63.asciidoc b/docs/doc_examples/149a0eea54cdf6ea3052af6dba2d2a63.asciidoc new file mode 100644 index 000000000..3624fa48e --- /dev/null +++ b/docs/doc_examples/149a0eea54cdf6ea3052af6dba2d2a63.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + warm: { + actions: { + set_priority: { + priority: 50, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/14a1db30e13eb1d03cfd9710ca847ebb.asciidoc b/docs/doc_examples/14a1db30e13eb1d03cfd9710ca847ebb.asciidoc new file mode 100644 index 000000000..eb061fc6a --- /dev/null +++ b/docs/doc_examples/14a1db30e13eb1d03cfd9710ca847ebb.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "my-data-stream", + operations: [ + { + create: {}, + }, + { + "@timestamp": "2099-05-06T16:21:15.000Z", + message: + '192.0.2.42 - - [06/May/2099:16:21:15 +0000] "GET /images/bg.jpg HTTP/1.0" 200 24736', + }, + { + create: {}, + }, + { + "@timestamp": "2099-05-06T16:25:42.000Z", + message: + '192.0.2.255 - - [06/May/2099:16:25:42 +0000] "GET /favicon.ico HTTP/1.0" 200 3638', + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/14a33c364873c2f930ca83d0a3005389.asciidoc b/docs/doc_examples/14a33c364873c2f930ca83d0a3005389.asciidoc new file mode 100644 index 000000000..fa4c966b7 --- /dev/null +++ b/docs/doc_examples/14a33c364873c2f930ca83d0a3005389.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.allocationExplain({ + index: "my-index", + shard: 0, + primary: false, +}); +console.log(response); +---- diff --git a/docs/doc_examples/14af7e2899e64f231068bded6aaf9ec5.asciidoc b/docs/doc_examples/14af7e2899e64f231068bded6aaf9ec5.asciidoc new file mode 100644 index 000000000..a9bb73e81 --- /dev/null +++ b/docs/doc_examples/14af7e2899e64f231068bded6aaf9ec5.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 2, + document: { + username: "marywhite", + email: "mary@white.com", + name: { + first: "Mary", + middle: "Alice", + last: "White", + }, + }, +}); +console.log(response); + +const response1 = await client.indices.getMapping({ + index: "my-index-000001", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/14afe65afee3d43f27aaaa5b37f26a31.asciidoc b/docs/doc_examples/14afe65afee3d43f27aaaa5b37f26a31.asciidoc new file mode 100644 index 000000000..c2989f48c --- /dev/null +++ b/docs/doc_examples/14afe65afee3d43f27aaaa5b37f26a31.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: { + type: "Point", + coordinates: [-77.03653, 38.897676], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/14b81f96297952970b78a3216e059596.asciidoc b/docs/doc_examples/14b81f96297952970b78a3216e059596.asciidoc new file mode 100644 index 000000000..ad822c276 --- /dev/null +++ b/docs/doc_examples/14b81f96297952970b78a3216e059596.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.asyncSearch.get({ + id: "FmRldE8zREVEUzA2ZVpUeGs2ejJFUFEaMkZ5QTVrSTZSaVN3WlNFVmtlWHJsdzoxMDc=", +}); +console.log(response); +---- diff --git a/docs/doc_examples/14f124294a4a0e3a657d1468c36161cd.asciidoc b/docs/doc_examples/14f124294a4a0e3a657d1468c36161cd.asciidoc new file mode 100644 index 000000000..57cb958e6 --- /dev/null +++ b/docs/doc_examples/14f124294a4a0e3a657d1468c36161cd.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "stats-index", + query: { + term: { + agg_metric: { + value: 702.3, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/14f2dab0583c5a9fcc39931d33194872.asciidoc b/docs/doc_examples/14f2dab0583c5a9fcc39931d33194872.asciidoc new file mode 100644 index 000000000..2c76f8696 --- /dev/null +++ b/docs/doc_examples/14f2dab0583c5a9fcc39931d33194872.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sample_weblogs_by_clientip", +}); +console.log(response); +---- diff --git a/docs/doc_examples/150b5fee5678bf8cdf0932da73eada80.asciidoc b/docs/doc_examples/150b5fee5678bf8cdf0932da73eada80.asciidoc new file mode 100644 index 000000000..f87ba72dc --- /dev/null +++ b/docs/doc_examples/150b5fee5678bf8cdf0932da73eada80.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.stats({ + metric: "indices", + index_metric: "fielddata", + fields: "field1,field2", +}); +console.log(response); + +const response1 = await client.nodes.stats({ + metric: "indices", + index_metric: "fielddata", + level: "indices", + fields: "field1,field2", +}); +console.log(response1); + +const response2 = await client.nodes.stats({ + metric: "indices", + index_metric: "fielddata", + level: "shards", + fields: "field1,field2", +}); +console.log(response2); + +const response3 = await client.nodes.stats({ + metric: "indices", + index_metric: "fielddata", + fields: "field*", +}); +console.log(response3); +---- diff --git a/docs/doc_examples/151d2b11807ec684b0c01aa89189a801.asciidoc b/docs/doc_examples/151d2b11807ec684b0c01aa89189a801.asciidoc new file mode 100644 index 000000000..90c13827f --- /dev/null +++ b/docs/doc_examples/151d2b11807ec684b0c01aa89189a801.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + query_string: { + fields: ["title", "content"], + query: "this that thus", + minimum_should_match: 2, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1522a9297151d7046e6345b9b27539ca.asciidoc b/docs/doc_examples/1522a9297151d7046e6345b9b27539ca.asciidoc new file mode 100644 index 000000000..2dab45f96 --- /dev/null +++ b/docs/doc_examples/1522a9297151d7046e6345b9b27539ca.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.connector.updateConfiguration({ + connector_id: "my-connector-id", + values: { + host: "127.0.0.1", + port: 5432, + username: "myuser", + password: "mypassword", + database: "chinook", + schema: "public", + tables: "album,artist", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/154d703732daf5c5fcd0122e6a50213f.asciidoc b/docs/doc_examples/154d703732daf5c5fcd0122e6a50213f.asciidoc new file mode 100644 index 000000000..5f79c3ef0 --- /dev/null +++ b/docs/doc_examples/154d703732daf5c5fcd0122e6a50213f.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-index-000001", + runtime: { + "measures.start": { + type: "long", + }, + "measures.end": { + type: "long", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/156bc64c94f9f3334fbce25165d2286a.asciidoc b/docs/doc_examples/156bc64c94f9f3334fbce25165d2286a.asciidoc new file mode 100644 index 000000000..84f57ba66 --- /dev/null +++ b/docs/doc_examples/156bc64c94f9f3334fbce25165d2286a.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + index: { + "sort.field": "date", + "sort.order": "desc", + }, + }, + mappings: { + properties: { + date: { + type: "date", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1570976f7807b88dc8a046b833be057b.asciidoc b/docs/doc_examples/1570976f7807b88dc8a046b833be057b.asciidoc new file mode 100644 index 000000000..64b22d4b2 --- /dev/null +++ b/docs/doc_examples/1570976f7807b88dc8a046b833be057b.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.nodes({ + v: "true", + s: "master,name", + h: "name,master,node.role,heap.percent,disk.used_percent,cpu", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1572696b97822d3332be51700e09672f.asciidoc b/docs/doc_examples/1572696b97822d3332be51700e09672f.asciidoc new file mode 100644 index 000000000..45ae77442 --- /dev/null +++ b/docs/doc_examples/1572696b97822d3332be51700e09672f.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "range_index", + query: { + range: { + time_frame: { + gte: "2015-10-31", + lte: "2015-11-01", + relation: "within", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1577e6e806b3283c9e99f1596d310754.asciidoc b/docs/doc_examples/1577e6e806b3283c9e99f1596d310754.asciidoc deleted file mode 100644 index cf35f86a2..000000000 --- a/docs/doc_examples/1577e6e806b3283c9e99f1596d310754.asciidoc +++ /dev/null @@ -1,17 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.index({ - index: 'test', - id: '1', - refresh: true, - body: { - text: 'words words', - flag: 'foo' - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/1598a0fec6b1ca78cadbaba65f465196.asciidoc b/docs/doc_examples/1598a0fec6b1ca78cadbaba65f465196.asciidoc new file mode 100644 index 000000000..739e06130 --- /dev/null +++ b/docs/doc_examples/1598a0fec6b1ca78cadbaba65f465196.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "my_tokenizer", + }, + }, + tokenizer: { + my_tokenizer: { + type: "pattern", + pattern: '"((?:\\\\"|[^"]|\\\\")+)"', + group: 1, + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.analyze({ + index: "my-index-000001", + analyzer: "my_analyzer", + text: '"value", "value with embedded \\" quote"', +}); +console.log(response1); +---- diff --git a/docs/doc_examples/15a34bfe0ef8ef6333c8c7b55c011e5d.asciidoc b/docs/doc_examples/15a34bfe0ef8ef6333c8c7b55c011e5d.asciidoc new file mode 100644 index 000000000..66c81a93b --- /dev/null +++ b/docs/doc_examples/15a34bfe0ef8ef6333c8c7b55c011e5d.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + filter: ["lowercase"], + text: "BaR", +}); +console.log(response); +---- diff --git a/docs/doc_examples/15ac33d641b376d9494075eb1f0d4066.asciidoc b/docs/doc_examples/15ac33d641b376d9494075eb1f0d4066.asciidoc new file mode 100644 index 000000000..73da91b2d --- /dev/null +++ b/docs/doc_examples/15ac33d641b376d9494075eb1f0d4066.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.cancelMigrateReindex({ + index: "my-data-stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/15c76cc8a038f686395053a240262929.asciidoc b/docs/doc_examples/15c76cc8a038f686395053a240262929.asciidoc new file mode 100644 index 000000000..8e3ec71ae --- /dev/null +++ b/docs/doc_examples/15c76cc8a038f686395053a240262929.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "classic_example", + settings: { + analysis: { + analyzer: { + classic_analyzer: { + tokenizer: "classic", + filter: ["classic"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/15d4be58359542775f4aff88e6d8adb5.asciidoc b/docs/doc_examples/15d4be58359542775f4aff88e6d8adb5.asciidoc new file mode 100644 index 000000000..4cc3244aa --- /dev/null +++ b/docs/doc_examples/15d4be58359542775f4aff88e6d8adb5.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.simulate({ + id: "my-pipeline", + docs: [ + { + _source: { + "my-keyword-field": "FOO", + }, + }, + { + _source: { + "my-keyword-field": "BAR", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/15d948d593d2624ac5e2b155052048f0.asciidoc b/docs/doc_examples/15d948d593d2624ac5e2b155052048f0.asciidoc new file mode 100644 index 000000000..882a10ec9 --- /dev/null +++ b/docs/doc_examples/15d948d593d2624ac5e2b155052048f0.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "whitespace", + filter: ["keyword_repeat", "stemmer"], + text: "jumping dog", +}); +console.log(response); +---- diff --git a/docs/doc_examples/15dad5338065baaaa7d475abe85f4c22.asciidoc b/docs/doc_examples/15dad5338065baaaa7d475abe85f4c22.asciidoc deleted file mode 100644 index 4e604ffd1..000000000 --- a/docs/doc_examples/15dad5338065baaaa7d475abe85f4c22.asciidoc +++ /dev/null @@ -1,29 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - sort: [ - { - _geo_distance: { - 'pin.location': [ - -70, - 40 - ], - order: 'asc', - unit: 'km' - } - } - ], - query: { - term: { - user: 'kimchy' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/15e90b82827c8512670820cf856a9c71.asciidoc b/docs/doc_examples/15e90b82827c8512670820cf856a9c71.asciidoc new file mode 100644 index 000000000..3cfb23d78 --- /dev/null +++ b/docs/doc_examples/15e90b82827c8512670820cf856a9c71.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "monthlyindex", + description: "monthly date-time index naming", + processors: [ + { + date_index_name: { + field: "date1", + index_name_prefix: "my-index-", + date_rounding: "M", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/15f769bbd7b5fddeb3353ae726b71b14.asciidoc b/docs/doc_examples/15f769bbd7b5fddeb3353ae726b71b14.asciidoc new file mode 100644 index 000000000..0db865fe6 --- /dev/null +++ b/docs/doc_examples/15f769bbd7b5fddeb3353ae726b71b14.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-bit-vectors", + query: { + script_score: { + query: { + match_all: {}, + }, + script: { + source: "dotProduct(params.query_vector, 'my_dense_vector')", + params: { + query_vector: [ + 0.23, 1.45, 3.67, 4.89, -0.56, 2.34, 3.21, 1.78, -2.45, 0.98, -0.12, + 3.45, 4.56, 2.78, 1.23, 0.67, 3.89, 4.12, -2.34, 1.56, 0.78, 3.21, + 4.12, 2.45, -1.67, 0.34, -3.45, 4.56, -2.78, 1.23, -0.67, 3.89, + -4.34, 2.12, -1.56, 0.78, -3.21, 4.45, 2.12, 1.67, + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1605be45a5711d1929d6ad2d1ae0f797.asciidoc b/docs/doc_examples/1605be45a5711d1929d6ad2d1ae0f797.asciidoc new file mode 100644 index 000000000..cd712f029 --- /dev/null +++ b/docs/doc_examples/1605be45a5711d1929d6ad2d1ae0f797.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.state({ + filter_path: "metadata.cluster_coordination.last_committed_config", +}); +console.log(response); +---- diff --git a/docs/doc_examples/160de80948e0c7db49b1c311848a66a2.asciidoc b/docs/doc_examples/160de80948e0c7db49b1c311848a66a2.asciidoc new file mode 100644 index 000000000..467ff2e89 --- /dev/null +++ b/docs/doc_examples/160de80948e0c7db49b1c311848a66a2.asciidoc @@ -0,0 +1,43 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.putWatch({ + id: "log_error_watch", + trigger: { + schedule: { + interval: "10s", + }, + }, + input: { + search: { + request: { + indices: ["logs"], + body: { + query: { + match: { + message: "error", + }, + }, + }, + }, + }, + }, + condition: { + compare: { + "ctx.payload.hits.total": { + gt: 0, + }, + }, + }, + actions: { + log_error: { + logging: { + text: "Found {{ctx.payload.hits.total}} errors in the logs", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/160f39a50847bad0be4be1529a95e4ce.asciidoc b/docs/doc_examples/160f39a50847bad0be4be1529a95e4ce.asciidoc new file mode 100644 index 000000000..39f9d4449 --- /dev/null +++ b/docs/doc_examples/160f39a50847bad0be4be1529a95e4ce.asciidoc @@ -0,0 +1,55 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "irish_example", + settings: { + analysis: { + filter: { + irish_hyphenation: { + type: "stop", + stopwords: ["h", "n", "t"], + ignore_case: true, + }, + irish_elision: { + type: "elision", + articles: ["d", "m", "b"], + articles_case: true, + }, + irish_stop: { + type: "stop", + stopwords: "_irish_", + }, + irish_lowercase: { + type: "lowercase", + language: "irish", + }, + irish_keywords: { + type: "keyword_marker", + keywords: ["sampla"], + }, + irish_stemmer: { + type: "stemmer", + language: "irish", + }, + }, + analyzer: { + rebuilt_irish: { + tokenizer: "standard", + filter: [ + "irish_hyphenation", + "irish_elision", + "irish_lowercase", + "irish_stop", + "irish_keywords", + "irish_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/16239fe9f0b0dcfd5ea64c08c6fed21d.asciidoc b/docs/doc_examples/16239fe9f0b0dcfd5ea64c08c6fed21d.asciidoc new file mode 100644 index 000000000..f045eced9 --- /dev/null +++ b/docs/doc_examples/16239fe9f0b0dcfd5ea64c08c6fed21d.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "issues", + mappings: { + properties: { + tags: { + type: "keyword", + }, + comments: { + type: "nested", + properties: { + username: { + type: "keyword", + }, + comment: { + type: "text", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/162b5b693b713f0bfab1209d59443c46.asciidoc b/docs/doc_examples/162b5b693b713f0bfab1209d59443c46.asciidoc index e110fd622..be7b89c0b 100644 --- a/docs/doc_examples/162b5b693b713f0bfab1209d59443c46.asciidoc +++ b/docs/doc_examples/162b5b693b713f0bfab1209d59443c46.asciidoc @@ -4,18 +4,15 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - constant_score: { - filter: { - term: { - status: 'active' - } - } - } - } - } -}) -console.log(response) + query: { + constant_score: { + filter: { + term: { + status: "active", + }, + }, + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/16351d99d0608789d04a0bb11a537098.asciidoc b/docs/doc_examples/16351d99d0608789d04a0bb11a537098.asciidoc new file mode 100644 index 000000000..88500c080 --- /dev/null +++ b/docs/doc_examples/16351d99d0608789d04a0bb11a537098.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "edge_ngram_example", + settings: { + analysis: { + analyzer: { + standard_edge_ngram: { + tokenizer: "standard", + filter: ["edge_ngram"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1637ef51d673b35cc8894ee80cd61c87.asciidoc b/docs/doc_examples/1637ef51d673b35cc8894ee80cd61c87.asciidoc new file mode 100644 index 000000000..da530b1a9 --- /dev/null +++ b/docs/doc_examples/1637ef51d673b35cc8894ee80cd61c87.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.nodes({ + v: "true", + s: "cpu:desc", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1648dd31d0fef01e7504ebeb687f4f30.asciidoc b/docs/doc_examples/1648dd31d0fef01e7504ebeb687f4f30.asciidoc new file mode 100644 index 000000000..0176b01af --- /dev/null +++ b/docs/doc_examples/1648dd31d0fef01e7504ebeb687f4f30.asciidoc @@ -0,0 +1,57 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "test", + id: 1, + refresh: "true", + document: { + url: "https://en.wikipedia.org/wiki/2016_Summer_Olympics", + content: "Rio 2016", + pagerank: 50.3, + url_length: 42, + topics: { + sports: 50, + brazil: 30, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "test", + id: 2, + refresh: "true", + document: { + url: "https://en.wikipedia.org/wiki/2016_Brazilian_Grand_Prix", + content: "Formula One motor race held on 13 November 2016", + pagerank: 50.3, + url_length: 47, + topics: { + sports: 35, + "formula one": 65, + brazil: 20, + }, + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "test", + id: 3, + refresh: "true", + document: { + url: "https://en.wikipedia.org/wiki/Deadpool_(film)", + content: "Deadpool is a 2016 American superhero film", + pagerank: 50.3, + url_length: 37, + topics: { + movies: 60, + "super hero": 65, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/16535685833419f0033545ffce4fdf00.asciidoc b/docs/doc_examples/16535685833419f0033545ffce4fdf00.asciidoc new file mode 100644 index 000000000..450c4fb80 --- /dev/null +++ b/docs/doc_examples/16535685833419f0033545ffce4fdf00.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "index2", + query: { + query_string: { + query: "running with scissors", + fields: ["comment", "comment.english"], + }, + }, + highlight: { + order: "score", + fields: { + "comment.english": { + type: "fvh", + matched_fields: ["comment"], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1659420311d907d9fc024b96f4150216.asciidoc b/docs/doc_examples/1659420311d907d9fc024b96f4150216.asciidoc new file mode 100644 index 000000000..01be05459 --- /dev/null +++ b/docs/doc_examples/1659420311d907d9fc024b96f4150216.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "whitespace", + filter: [ + { + type: "length", + min: 0, + max: 4, + }, + ], + text: "the quick brown fox jumps over the lazy dog", +}); +console.log(response); +---- diff --git a/docs/doc_examples/16634cfa7916cf4e8048a1d70e6240f2.asciidoc b/docs/doc_examples/16634cfa7916cf4e8048a1d70e6240f2.asciidoc new file mode 100644 index 000000000..8771d32f7 --- /dev/null +++ b/docs/doc_examples/16634cfa7916cf4e8048a1d70e6240f2.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.put({ + name: "my-example-app", + search_application: { + indices: ["example-index"], + template: { + script: { + lang: "mustache", + source: + '\n {\n "query": {\n "bool": {\n "must": [\n {{#query}}\n {{/query}}\n ],\n "filter": {{#toJson}}_es_filters{{/toJson}}\n }\n },\n "_source": {\n "includes": ["title", "plot"]\n },\n "highlight": {\n "fields": {\n "title": { "fragment_size": 0 },\n "plot": { "fragment_size": 200 }\n }\n },\n "aggs": {{#toJson}}_es_aggs{{/toJson}},\n "from": {{from}},\n "size": {{size}},\n "sort": {{#toJson}}_es_sort_fields{{/toJson}}\n }\n ', + params: { + query: "", + _es_filters: {}, + _es_aggs: {}, + _es_sort_fields: {}, + size: 10, + from: 0, + }, + dictionary: {}, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/166bcfc6d5d39defec7ad6aa44d0914b.asciidoc b/docs/doc_examples/166bcfc6d5d39defec7ad6aa44d0914b.asciidoc new file mode 100644 index 000000000..d8c815fd5 --- /dev/null +++ b/docs/doc_examples/166bcfc6d5d39defec7ad6aa44d0914b.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.tasks.list(); +console.log(response); + +const response1 = await client.tasks.list({ + nodes: "nodeId1,nodeId2", +}); +console.log(response1); + +const response2 = await client.tasks.list({ + nodes: "nodeId1,nodeId2", + actions: "cluster:*", +}); +console.log(response2); +---- diff --git a/docs/doc_examples/168bfdde773570cfc6dd3ab3574e413b.asciidoc b/docs/doc_examples/168bfdde773570cfc6dd3ab3574e413b.asciidoc deleted file mode 100644 index 66af05e4f..000000000 --- a/docs/doc_examples/168bfdde773570cfc6dd3ab3574e413b.asciidoc +++ /dev/null @@ -1,11 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - q: 'user:kimchy' -}) -console.log(response) ----- - diff --git a/docs/doc_examples/16985e5b17d2da0955a14fbe02e8dfca.asciidoc b/docs/doc_examples/16985e5b17d2da0955a14fbe02e8dfca.asciidoc new file mode 100644 index 000000000..5b89ae747 --- /dev/null +++ b/docs/doc_examples/16985e5b17d2da0955a14fbe02e8dfca.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.termvectors({ + index: "my-index-000001", + id: 1, + fields: ["text"], + offsets: true, + payloads: true, + positions: true, + term_statistics: true, + field_statistics: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/169b39bb889ecd47541bed3e48725488.asciidoc b/docs/doc_examples/169b39bb889ecd47541bed3e48725488.asciidoc new file mode 100644 index 000000000..2f62a2605 --- /dev/null +++ b/docs/doc_examples/169b39bb889ecd47541bed3e48725488.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "bug_reports", + query: { + term: { + labels: "urgent", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/16a7ce08b4a6b3af269f27eecc71d664.asciidoc b/docs/doc_examples/16a7ce08b4a6b3af269f27eecc71d664.asciidoc new file mode 100644 index 000000000..72589cd8f --- /dev/null +++ b/docs/doc_examples/16a7ce08b4a6b3af269f27eecc71d664.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.delete({ + index: "books", +}); +console.log(response); + +const response1 = await client.indices.delete({ + index: "my-explicit-mappings-books", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/170c8a3fb81a4e93cd3034a3b5a43ac9.asciidoc b/docs/doc_examples/170c8a3fb81a4e93cd3034a3b5a43ac9.asciidoc new file mode 100644 index 000000000..e9bbbd34f --- /dev/null +++ b/docs/doc_examples/170c8a3fb81a4e93cd3034a3b5a43ac9.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "test", + id: 1, + document: { + location: { + coordinates: [ + [46.25, 20.14], + [47.49, 19.04], + ], + type: "multipoint", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/172155ca4bf6dfcbd489453f50739396.asciidoc b/docs/doc_examples/172155ca4bf6dfcbd489453f50739396.asciidoc new file mode 100644 index 000000000..e380107dc --- /dev/null +++ b/docs/doc_examples/172155ca4bf6dfcbd489453f50739396.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.get({ + repository: "my_repository", + snapshot: "snapshot*", + size: 2, + sort: "name", +}); +console.log(response); +---- diff --git a/docs/doc_examples/17266cee5eaaddf08e5534bf580a1910.asciidoc b/docs/doc_examples/17266cee5eaaddf08e5534bf580a1910.asciidoc new file mode 100644 index 000000000..cf5d15d02 --- /dev/null +++ b/docs/doc_examples/17266cee5eaaddf08e5534bf580a1910.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.stats(); +console.log(response); +---- diff --git a/docs/doc_examples/172b18e435c400bed85227624de3acfd.asciidoc b/docs/doc_examples/172b18e435c400bed85227624de3acfd.asciidoc new file mode 100644 index 000000000..a13d0a955 --- /dev/null +++ b/docs/doc_examples/172b18e435c400bed85227624de3acfd.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "my_analyst_role", + refresh: "true", + cluster: ["monitor"], + indices: [ + { + names: ["index1", "index2"], + privileges: ["manage"], + }, + ], + applications: [ + { + application: "myapp", + privileges: ["read"], + resources: ["*"], + }, + ], + metadata: { + version: 1, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/172d150e56a225155a62c7b18bf8da67.asciidoc b/docs/doc_examples/172d150e56a225155a62c7b18bf8da67.asciidoc new file mode 100644 index 000000000..a4d21847f --- /dev/null +++ b/docs/doc_examples/172d150e56a225155a62c7b18bf8da67.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.query({ + format: "txt", + query: + "SELECT YEAR(release_date) AS year FROM library WHERE page_count > 300 AND author = 'Frank Herbert' GROUP BY year HAVING COUNT(*) > 0", +}); +console.log(response); +---- diff --git a/docs/doc_examples/17316a81c9dbdd120b7754116bf0461c.asciidoc b/docs/doc_examples/17316a81c9dbdd120b7754116bf0461c.asciidoc new file mode 100644 index 000000000..13415ca5b --- /dev/null +++ b/docs/doc_examples/17316a81c9dbdd120b7754116bf0461c.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.createApiKey({ + name: "my-connector-api-key", + role_descriptors: { + "my-connector-connector-role": { + cluster: ["monitor", "manage_connector"], + indices: [ + { + names: [ + "my-index_name", + ".search-acl-filter-my-index_name", + ".elastic-connectors*", + ], + privileges: ["all"], + allow_restricted_indices: false, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1736545c8b5674f6d311f3277eb387f1.asciidoc b/docs/doc_examples/1736545c8b5674f6d311f3277eb387f1.asciidoc new file mode 100644 index 000000000..de3df9694 --- /dev/null +++ b/docs/doc_examples/1736545c8b5674f6d311f3277eb387f1.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putDataLifecycle({ + name: "my-data-stream", + data_retention: "30d", +}); +console.log(response); +---- diff --git a/docs/doc_examples/173b190078621415a80e851eaf794e8a.asciidoc b/docs/doc_examples/173b190078621415a80e851eaf794e8a.asciidoc new file mode 100644 index 000000000..f09c6d4ed --- /dev/null +++ b/docs/doc_examples/173b190078621415a80e851eaf794e8a.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_english_analyzer: { + type: "standard", + max_token_length: 5, + stopwords: "_english_", + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.analyze({ + index: "my-index-000001", + analyzer: "my_english_analyzer", + text: "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone.", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/174b93c323aa8e9cc8ee2a3df5736810.asciidoc b/docs/doc_examples/174b93c323aa8e9cc8ee2a3df5736810.asciidoc new file mode 100644 index 000000000..99a0f8861 --- /dev/null +++ b/docs/doc_examples/174b93c323aa8e9cc8ee2a3df5736810.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.delegatePki({ + x509_certificate_chain: [ + "MIIDeDCCAmCgAwIBAgIUBzj/nGGKxP2iXawsSquHmQjCJmMwDQYJKoZIhvcNAQELBQAwUzErMCkGA1UEAxMiRWxhc3RpY3NlYXJjaCBUZXN0IEludGVybWVkaWF0ZSBDQTEWMBQGA1UECxMNRWxhc3RpY3NlYXJjaDEMMAoGA1UEChMDb3JnMB4XDTIzMDcxODE5MjkwNloXDTQzMDcxMzE5MjkwNlowSjEiMCAGA1UEAxMZRWxhc3RpY3NlYXJjaCBUZXN0IENsaWVudDEWMBQGA1UECxMNRWxhc3RpY3NlYXJjaDEMMAoGA1UEChMDb3JnMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAllHL4pQkkfwAm/oLkxYYO+r950DEy1bjH+4viCHzNADLCTWO+lOZJVlNx7QEzJE3QGMdif9CCBBxQFMapA7oUFCLq84fPSQQu5AnvvbltVD9nwVtCs+9ZGDjMKsz98RhSLMFIkxdxi6HkQ3Lfa4ZSI4lvba4oo+T/GveazBDS+NgmKyq00EOXt3tWi1G9vEVItommzXWfv0agJWzVnLMldwkPqsw0W7zrpyT7FZS4iLbQADGceOW8fiauOGMkscu9zAnDR/SbWl/chYioQOdw6ndFLn1YIFPd37xL0WsdsldTpn0vH3YfzgLMffT/3P6YlwBegWzsx6FnM/93Ecb4wIDAQABo00wSzAJBgNVHRMEAjAAMB0GA1UdDgQWBBQKNRwjW+Ad/FN1Rpoqme/5+jrFWzAfBgNVHSMEGDAWgBRcya0c0x/PaI7MbmJVIylWgLqXNjANBgkqhkiG9w0BAQsFAAOCAQEACZ3PF7Uqu47lplXHP6YlzYL2jL0D28hpj5lGtdha4Muw1m/BjDb0Pu8l0NQ1z3AP6AVcvjNDkQq6Y5jeSz0bwQlealQpYfo7EMXjOidrft1GbqOMFmTBLpLA9SvwYGobSTXWTkJzonqVaTcf80HpMgM2uEhodwTcvz6v1WEfeT/HMjmdIsq4ImrOL9RNrcZG6nWfw0HR3JNOgrbfyEztEI471jHznZ336OEcyX7gQuvHE8tOv5+oD1d7s3Xg1yuFp+Ynh+FfOi3hPCuaHA+7F6fLmzMDLVUBAllugst1C3U+L/paD7tqIa4ka+KNPCbSfwazmJrt4XNiivPR4hwH5g==", + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/17566e23c191f1004a2719f2c4242307.asciidoc b/docs/doc_examples/17566e23c191f1004a2719f2c4242307.asciidoc new file mode 100644 index 000000000..1cd460618 --- /dev/null +++ b/docs/doc_examples/17566e23c191f1004a2719f2c4242307.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.autoscaling.getAutoscalingCapacity(); +console.log(response); +---- diff --git a/docs/doc_examples/178be73b74ba9f297429e32267084ac7.asciidoc b/docs/doc_examples/178be73b74ba9f297429e32267084ac7.asciidoc new file mode 100644 index 000000000..2d564eeb6 --- /dev/null +++ b/docs/doc_examples/178be73b74ba9f297429e32267084ac7.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + span_or: { + clauses: [ + { + span_term: { + field: "value1", + }, + }, + { + span_term: { + field: "value2", + }, + }, + { + span_term: { + field: "value3", + }, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/178c920d5e8ec0071f77290fa059802c.asciidoc b/docs/doc_examples/178c920d5e8ec0071f77290fa059802c.asciidoc new file mode 100644 index 000000000..8df29abb5 --- /dev/null +++ b/docs/doc_examples/178c920d5e8ec0071f77290fa059802c.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "my-index-000001", + settings: { + index: { + refresh_interval: "1s", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/179f0a3e84ff4bbac18787a018eabf89.asciidoc b/docs/doc_examples/179f0a3e84ff4bbac18787a018eabf89.asciidoc index 7229d6d0a..f6c66f74b 100644 --- a/docs/doc_examples/179f0a3e84ff4bbac18787a018eabf89.asciidoc +++ b/docs/doc_examples/179f0a3e84ff4bbac18787a018eabf89.asciidoc @@ -4,21 +4,14 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - multi_match: { - query: 'Jon', - type: 'cross_fields', - analyzer: 'standard', - fields: [ - 'first', - 'last', - '*.edge' - ] - } - } - } -}) -console.log(response) + query: { + multi_match: { + query: "Jon", + type: "cross_fields", + analyzer: "standard", + fields: ["first", "last", "*.edge"], + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/17a1e308761afd3282f13d44d7be008a.asciidoc b/docs/doc_examples/17a1e308761afd3282f13d44d7be008a.asciidoc new file mode 100644 index 000000000..5ca1e8e58 --- /dev/null +++ b/docs/doc_examples/17a1e308761afd3282f13d44d7be008a.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "example", + mappings: { + properties: { + comment: { + type: "text", + term_vector: "with_positions_offsets", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/17b1647c8509543f2388c886f2584a20.asciidoc b/docs/doc_examples/17b1647c8509543f2388c886f2584a20.asciidoc new file mode 100644 index 000000000..e1f4fbd3c --- /dev/null +++ b/docs/doc_examples/17b1647c8509543f2388c886f2584a20.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + retriever: { + text_similarity_reranker: { + retriever: { + standard: { + query: { + match: { + text: "How often does the moon hide the sun?", + }, + }, + }, + }, + field: "text", + inference_id: "elastic-rerank", + inference_text: "How often does the moon hide the sun?", + rank_window_size: 100, + min_score: 0.5, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/17c2b0a6b0305804ff3b7fd3b4a68df3.asciidoc b/docs/doc_examples/17c2b0a6b0305804ff3b7fd3b4a68df3.asciidoc new file mode 100644 index 000000000..2f461ea4c --- /dev/null +++ b/docs/doc_examples/17c2b0a6b0305804ff3b7fd3b4a68df3.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.simulate({ + pipeline: { + description: "_description", + processors: [ + { + set: { + field: "field2", + value: "_value", + }, + }, + ], + }, + docs: [ + { + _index: "index", + _id: "id", + _source: { + foo: "bar", + }, + }, + { + _index: "index", + _id: "id", + _source: { + foo: "rab", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/17dd67a66c49f7eb618dd17430e48dfa.asciidoc b/docs/doc_examples/17dd67a66c49f7eb618dd17430e48dfa.asciidoc new file mode 100644 index 000000000..69f249727 --- /dev/null +++ b/docs/doc_examples/17dd67a66c49f7eb618dd17430e48dfa.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "index", + query: { + constant_score: { + filter: { + range: { + my_date: { + gte: "now-1h/m", + lte: "now/m", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/17de0020b228df961ad3c6b06233c948.asciidoc b/docs/doc_examples/17de0020b228df961ad3c6b06233c948.asciidoc deleted file mode 100644 index b7cbc2f04..000000000 --- a/docs/doc_examples/17de0020b228df961ad3c6b06233c948.asciidoc +++ /dev/null @@ -1,19 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.putMapping({ - index: 'my_index', - body: { - properties: { - user_id: { - type: 'keyword', - ignore_above: 100 - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/17e6f3fac556f08a78f7a876e71acb89.asciidoc b/docs/doc_examples/17e6f3fac556f08a78f7a876e71acb89.asciidoc new file mode 100644 index 000000000..8661cf147 --- /dev/null +++ b/docs/doc_examples/17e6f3fac556f08a78f7a876e71acb89.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "_all", + settings: { + settings: { + "index.unassigned.node_left.delayed_timeout": "5m", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/17f8a8990b0166befa3bc2b10fd28134.asciidoc b/docs/doc_examples/17f8a8990b0166befa3bc2b10fd28134.asciidoc new file mode 100644 index 000000000..8c4710ec7 --- /dev/null +++ b/docs/doc_examples/17f8a8990b0166befa3bc2b10fd28134.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: "match_value", + document: { + query: { + match: { + field: "value", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/17fb298fb1e47f7d946a772d68f4e2df.asciidoc b/docs/doc_examples/17fb298fb1e47f7d946a772d68f4e2df.asciidoc new file mode 100644 index 000000000..064b8e0d0 --- /dev/null +++ b/docs/doc_examples/17fb298fb1e47f7d946a772d68f4e2df.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.deleteByQuery({ + index: "my-data-stream", + query: { + match: { + "user.id": "vlb44hny", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/182df084f028479ecbe8d7648ddad892.asciidoc b/docs/doc_examples/182df084f028479ecbe8d7648ddad892.asciidoc new file mode 100644 index 000000000..aca2d7530 --- /dev/null +++ b/docs/doc_examples/182df084f028479ecbe8d7648ddad892.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.getStatus(); +console.log(response); +---- diff --git a/docs/doc_examples/186a7143d50e8c3ee01094e1a9ff0c0c.asciidoc b/docs/doc_examples/186a7143d50e8c3ee01094e1a9ff0c0c.asciidoc new file mode 100644 index 000000000..1bf5255a3 --- /dev/null +++ b/docs/doc_examples/186a7143d50e8c3ee01094e1a9ff0c0c.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "passage_vectors", + mappings: { + properties: { + full_text: { + type: "text", + }, + creation_time: { + type: "date", + }, + paragraph: { + type: "nested", + properties: { + vector: { + type: "dense_vector", + dims: 2, + index_options: { + type: "hnsw", + }, + }, + text: { + type: "text", + index: false, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/187733e50c60350f3f75921bea3b72c2.asciidoc b/docs/doc_examples/187733e50c60350f3f75921bea3b72c2.asciidoc new file mode 100644 index 000000000..803cd40e1 --- /dev/null +++ b/docs/doc_examples/187733e50c60350f3f75921bea3b72c2.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + scroll: "1m", + slice: { + field: "@timestamp", + id: 0, + max: 10, + }, + query: { + match: { + message: "foo", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/187e8786e0a90f1f6278cf89b670de0a.asciidoc b/docs/doc_examples/187e8786e0a90f1f6278cf89b670de0a.asciidoc new file mode 100644 index 000000000..7351ff9ee --- /dev/null +++ b/docs/doc_examples/187e8786e0a90f1f6278cf89b670de0a.asciidoc @@ -0,0 +1,40 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "german_example", + settings: { + analysis: { + filter: { + german_stop: { + type: "stop", + stopwords: "_german_", + }, + german_keywords: { + type: "keyword_marker", + keywords: ["Beispiel"], + }, + german_stemmer: { + type: "stemmer", + language: "light_german", + }, + }, + analyzer: { + rebuilt_german: { + tokenizer: "standard", + filter: [ + "lowercase", + "german_stop", + "german_keywords", + "german_normalization", + "german_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/188e6208cccb13027a5c1c95440841ee.asciidoc b/docs/doc_examples/188e6208cccb13027a5c1c95440841ee.asciidoc new file mode 100644 index 000000000..8a3fcd943 --- /dev/null +++ b/docs/doc_examples/188e6208cccb13027a5c1c95440841ee.asciidoc @@ -0,0 +1,61 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "logs", + refresh: "true", + operations: [ + { + index: { + _id: 1, + }, + }, + { + body: "warning: page could not be rendered", + }, + { + index: { + _id: 2, + }, + }, + { + body: "authentication error", + }, + { + index: { + _id: 3, + }, + }, + { + body: "warning: connection timed out", + }, + ], +}); +console.log(response); + +const response1 = await client.search({ + index: "logs", + size: 0, + aggs: { + messages: { + filters: { + filters: { + errors: { + match: { + body: "error", + }, + }, + warnings: { + match: { + body: "warning", + }, + }, + }, + }, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/189f0cd1ee2485cf11a2968f01d54e5b.asciidoc b/docs/doc_examples/189f0cd1ee2485cf11a2968f01d54e5b.asciidoc new file mode 100644 index 000000000..e085fe57c --- /dev/null +++ b/docs/doc_examples/189f0cd1ee2485cf11a2968f01d54e5b.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sales_per_month: { + date_histogram: { + field: "date", + calendar_interval: "month", + }, + aggs: { + sales: { + sum: { + field: "price", + }, + }, + sales_deriv: { + derivative: { + buckets_path: "sales", + unit: "day", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/18ddb7e7a4bcafd449df956e828ed7a8.asciidoc b/docs/doc_examples/18ddb7e7a4bcafd449df956e828ed7a8.asciidoc index 7172806a0..9d0141379 100644 --- a/docs/doc_examples/18ddb7e7a4bcafd449df956e828ed7a8.asciidoc +++ b/docs/doc_examples/18ddb7e7a4bcafd449df956e828ed7a8.asciidoc @@ -4,8 +4,7 @@ [source, js] ---- const response = await client.tasks.cancel({ - task_id: 'r1A2WoRbTwKZ516z6NEs5A:36619' -}) -console.log(response) + task_id: "r1A2WoRbTwKZ516z6NEs5A:36619", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/190a21e32db2125ddaea0f634e126a84.asciidoc b/docs/doc_examples/190a21e32db2125ddaea0f634e126a84.asciidoc new file mode 100644 index 000000000..f4a25c0ec --- /dev/null +++ b/docs/doc_examples/190a21e32db2125ddaea0f634e126a84.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.clone({ + index: "my_source_index", + target: "my_target_index", +}); +console.log(response); +---- diff --git a/docs/doc_examples/19174d872fd1e43cbfb7a96a33d13c96.asciidoc b/docs/doc_examples/19174d872fd1e43cbfb7a96a33d13c96.asciidoc new file mode 100644 index 000000000..b35d51a2c --- /dev/null +++ b/docs/doc_examples/19174d872fd1e43cbfb7a96a33d13c96.asciidoc @@ -0,0 +1,67 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "places", + mappings: { + properties: { + geometry: { + type: "shape", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "places", + refresh: "true", + operations: [ + { + index: { + _id: 1, + }, + }, + { + name: "NEMO Science Museum", + geometry: "POINT(491.2350 5237.4081)", + }, + { + index: { + _id: 2, + }, + }, + { + name: "Sportpark De Weeren", + geometry: { + type: "Polygon", + coordinates: [ + [ + [496.5305328369141, 5239.347642069457], + [496.6979026794433, 5239.172175893484], + [496.9425201416015, 5239.238958618537], + [496.7944622039794, 5239.420969150824], + [496.5305328369141, 5239.347642069457], + ], + ], + }, + }, + ], +}); +console.log(response1); + +const response2 = await client.search({ + index: "places", + size: 0, + aggs: { + centroid: { + cartesian_centroid: { + field: "geometry", + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/192fa1f6f51dfb640e9e15bb5cd7eebc.asciidoc b/docs/doc_examples/192fa1f6f51dfb640e9e15bb5cd7eebc.asciidoc new file mode 100644 index 000000000..1df059288 --- /dev/null +++ b/docs/doc_examples/192fa1f6f51dfb640e9e15bb5cd7eebc.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.retry({ + index: "my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/193234bb5dc6451fd15b584fbefd2446.asciidoc b/docs/doc_examples/193234bb5dc6451fd15b584fbefd2446.asciidoc new file mode 100644 index 000000000..4adf954e1 --- /dev/null +++ b/docs/doc_examples/193234bb5dc6451fd15b584fbefd2446.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "example1", + indices: [ + { + names: ["my-index-000001"], + privileges: ["read"], + query: { + template: { + source: { + term: { + "acl.username": "{{_user.username}}", + }, + }, + }, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/193704020a19714dec390452a4e75e8d.asciidoc b/docs/doc_examples/193704020a19714dec390452a4e75e8d.asciidoc new file mode 100644 index 000000000..1fcacfbd1 --- /dev/null +++ b/docs/doc_examples/193704020a19714dec390452a4e75e8d.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "books", +}); +console.log(response); +---- diff --git a/docs/doc_examples/193d86b6cc34e12c2be806d27816a35c.asciidoc b/docs/doc_examples/193d86b6cc34e12c2be806d27816a35c.asciidoc new file mode 100644 index 000000000..3dae701d0 --- /dev/null +++ b/docs/doc_examples/193d86b6cc34e12c2be806d27816a35c.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.search({ + name: "my_search_application", + params: { + size: 5, + query_string: "mountain climbing", + text_fields: [ + { + name: "title", + boost: 10, + }, + { + name: "description", + boost: 2, + }, + { + name: "state", + boost: 1, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/194bbac15e709174ac85b681f3a3d137.asciidoc b/docs/doc_examples/194bbac15e709174ac85b681f3a3d137.asciidoc new file mode 100644 index 000000000..2015c6183 --- /dev/null +++ b/docs/doc_examples/194bbac15e709174ac85b681f3a3d137.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "template_1", + index_patterns: ["template*"], + template: { + settings: { + number_of_shards: 1, + }, + aliases: { + alias1: {}, + alias2: { + filter: { + term: { + "user.id": "kimchy", + }, + }, + routing: "shard-1", + }, + "{index}-alias": {}, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/196aed02b11def364bab84e455c1a073.asciidoc b/docs/doc_examples/196aed02b11def364bab84e455c1a073.asciidoc new file mode 100644 index 000000000..4492ff082 --- /dev/null +++ b/docs/doc_examples/196aed02b11def364bab84e455c1a073.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "template_1", + index_patterns: ["logs-*"], + data_stream: {}, +}); +console.log(response); +---- diff --git a/docs/doc_examples/199f5165d876267080046c907e93483f.asciidoc b/docs/doc_examples/199f5165d876267080046c907e93483f.asciidoc new file mode 100644 index 000000000..d5e492b5a --- /dev/null +++ b/docs/doc_examples/199f5165d876267080046c907e93483f.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + index: "my-index-000001", + field: "my-field", + text: "this is a test", +}); +console.log(response); +---- diff --git a/docs/doc_examples/19c00c6b29bc7dbc5e92b3668da2da93.asciidoc b/docs/doc_examples/19c00c6b29bc7dbc5e92b3668da2da93.asciidoc new file mode 100644 index 000000000..ab8b0fd13 --- /dev/null +++ b/docs/doc_examples/19c00c6b29bc7dbc5e92b3668da2da93.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.simulate.ingest({ + docs: [ + { + _index: "my-index", + _id: "123", + _source: { + foo: "bar", + }, + }, + { + _index: "my-index", + _id: "456", + _source: { + foo: "rab", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/19ee488226d357d1576e7d3ae7a4693f.asciidoc b/docs/doc_examples/19ee488226d357d1576e7d3ae7a4693f.asciidoc new file mode 100644 index 000000000..aa4955985 --- /dev/null +++ b/docs/doc_examples/19ee488226d357d1576e7d3ae7a4693f.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + analyzer: "keyword", + text: "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone.", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1a1f3421717ff744ed83232729289bb0.asciidoc b/docs/doc_examples/1a1f3421717ff744ed83232729289bb0.asciidoc new file mode 100644 index 000000000..3aff383cc --- /dev/null +++ b/docs/doc_examples/1a1f3421717ff744ed83232729289bb0.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.slm.deleteLifecycle({ + policy_id: "daily-snapshots", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1a2890b90f3699fc2a4f27f94b145be9.asciidoc b/docs/doc_examples/1a2890b90f3699fc2a4f27f94b145be9.asciidoc new file mode 100644 index 000000000..f07d50ef1 --- /dev/null +++ b/docs/doc_examples/1a2890b90f3699fc2a4f27f94b145be9.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.slm.putLifecycle({ + policy_id: "nightly-cluster-state-snapshots", + schedule: "0 30 2 * * ?", + name: "", + repository: "my_secure_repository", + config: { + include_global_state: true, + indices: "-*", + }, + retention: { + expire_after: "30d", + min_count: 5, + max_count: 50, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1a3897cfb4f974c09d0d847baac8aa6d.asciidoc b/docs/doc_examples/1a3897cfb4f974c09d0d847baac8aa6d.asciidoc new file mode 100644 index 000000000..8ca806885 --- /dev/null +++ b/docs/doc_examples/1a3897cfb4f974c09d0d847baac8aa6d.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.stats({ + level: "shards", + human: "true", + expand_wildcards: "all", + filter_path: "indices.*.total.indexing.index_total", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1a3a4b8a4bfee4ab84ddd13d8835f560.asciidoc b/docs/doc_examples/1a3a4b8a4bfee4ab84ddd13d8835f560.asciidoc new file mode 100644 index 000000000..9ede96eef --- /dev/null +++ b/docs/doc_examples/1a3a4b8a4bfee4ab84ddd13d8835f560.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.startDataFrameAnalytics({ + id: "loganalytics", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1a4f8beb6847678880ca113ee6fb75ca.asciidoc b/docs/doc_examples/1a4f8beb6847678880ca113ee6fb75ca.asciidoc new file mode 100644 index 000000000..cf2015322 --- /dev/null +++ b/docs/doc_examples/1a4f8beb6847678880ca113ee6fb75ca.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "music", + pretty: "true", + suggest: { + "song-suggest": { + regex: "n[ever|i]r", + completion: { + field: "suggest", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1a56df055b94466ca76818e0858752c6.asciidoc b/docs/doc_examples/1a56df055b94466ca76818e0858752c6.asciidoc new file mode 100644 index 000000000..46718769b --- /dev/null +++ b/docs/doc_examples/1a56df055b94466ca76818e0858752c6.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "text_embedding", + inference_id: "openai_embeddings", + inference_config: { + service: "openai", + service_settings: { + api_key: "", + model_id: "text-embedding-ada-002", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1a6dbe5df488c4a16e2f1101ba8a25d9.asciidoc b/docs/doc_examples/1a6dbe5df488c4a16e2f1101ba8a25d9.asciidoc new file mode 100644 index 000000000..8a3b8bda4 --- /dev/null +++ b/docs/doc_examples/1a6dbe5df488c4a16e2f1101ba8a25d9.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "pattern", + text: "The foo_bar_size's default is 5.", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1a7483796087053ba55029d0dc2ab356.asciidoc b/docs/doc_examples/1a7483796087053ba55029d0dc2ab356.asciidoc new file mode 100644 index 000000000..a5696747c --- /dev/null +++ b/docs/doc_examples/1a7483796087053ba55029d0dc2ab356.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "mv", + refresh: "true", + document: { + a: [2, null, 1], + }, +}); +console.log(response); + +const response1 = await client.esql.query({ + query: "FROM mv | LIMIT 1", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/1a81fe0186369838531e116e85aa4ccd.asciidoc b/docs/doc_examples/1a81fe0186369838531e116e85aa4ccd.asciidoc new file mode 100644 index 000000000..ec6bd5524 --- /dev/null +++ b/docs/doc_examples/1a81fe0186369838531e116e85aa4ccd.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "shirts", + mappings: { + properties: { + brand: { + type: "keyword", + }, + color: { + type: "keyword", + }, + model: { + type: "keyword", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "shirts", + id: 1, + refresh: "true", + document: { + brand: "gucci", + color: "red", + model: "slim", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/1a8d92e93481c432a91f7c213099800a.asciidoc b/docs/doc_examples/1a8d92e93481c432a91f7c213099800a.asciidoc new file mode 100644 index 000000000..6e87b1d33 --- /dev/null +++ b/docs/doc_examples/1a8d92e93481c432a91f7c213099800a.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.queryApiKeys(); +console.log(response); +---- diff --git a/docs/doc_examples/1a9e03ce0355872a7db27fedc783fbec.asciidoc b/docs/doc_examples/1a9e03ce0355872a7db27fedc783fbec.asciidoc new file mode 100644 index 000000000..06c636ead --- /dev/null +++ b/docs/doc_examples/1a9e03ce0355872a7db27fedc783fbec.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "rerank", + inference_id: "google_vertex_ai_rerank", + inference_config: { + service: "googlevertexai", + service_settings: { + service_account_json: "", + project_id: "", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1a9efb56adb2cd84faa9825a129381b9.asciidoc b/docs/doc_examples/1a9efb56adb2cd84faa9825a129381b9.asciidoc new file mode 100644 index 000000000..1d19dc630 --- /dev/null +++ b/docs/doc_examples/1a9efb56adb2cd84faa9825a129381b9.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.rollup.rollupSearch({ + index: "sensor-1,sensor_rollup", + size: 0, + aggregations: { + max_temperature: { + max: { + field: "temperature", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1aa91d3d48140d6367b6cabca8737b8f.asciidoc b/docs/doc_examples/1aa91d3d48140d6367b6cabca8737b8f.asciidoc index 8881b660d..dec1346f8 100644 --- a/docs/doc_examples/1aa91d3d48140d6367b6cabca8737b8f.asciidoc +++ b/docs/doc_examples/1aa91d3d48140d6367b6cabca8737b8f.asciidoc @@ -4,40 +4,39 @@ [source, js] ---- const response = await client.bulk({ - body: [ + operations: [ { update: { - _id: '5', - _index: 'index1' - } + _id: "5", + _index: "index1", + }, }, { doc: { - my_field: 'foo' - } + my_field: "foo", + }, }, { update: { - _id: '6', - _index: 'index1' - } + _id: "6", + _index: "index1", + }, }, { doc: { - my_field: 'foo' - } + my_field: "foo", + }, }, { create: { - _id: '7', - _index: 'index1' - } + _id: "7", + _index: "index1", + }, }, { - my_field: 'foo' - } - ] -}) -console.log(response) + my_field: "foo", + }, + ], +}); +console.log(response); ---- - diff --git a/docs/doc_examples/1aa96eeaf63fc967e166d1a2fcdccccc.asciidoc b/docs/doc_examples/1aa96eeaf63fc967e166d1a2fcdccccc.asciidoc new file mode 100644 index 000000000..84fdefbae --- /dev/null +++ b/docs/doc_examples/1aa96eeaf63fc967e166d1a2fcdccccc.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000002", + mappings: { + properties: { + metrics: { + subobjects: false, + properties: { + time: { + type: "object", + properties: { + min: { + type: "long", + }, + max: { + type: "long", + }, + }, + }, + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.getMapping({ + index: "my-index-000002", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/1adee74383e5594e45c937177d75aa2a.asciidoc b/docs/doc_examples/1adee74383e5594e45c937177d75aa2a.asciidoc new file mode 100644 index 000000000..a3408bfb4 --- /dev/null +++ b/docs/doc_examples/1adee74383e5594e45c937177d75aa2a.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_index", + query: { + match_all: {}, + }, + sort: { + my_counter: "desc", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1b076ceb1ead9f6897c2f351f0e45f74.asciidoc b/docs/doc_examples/1b076ceb1ead9f6897c2f351f0e45f74.asciidoc new file mode 100644 index 000000000..49d0b45e5 --- /dev/null +++ b/docs/doc_examples/1b076ceb1ead9f6897c2f351f0e45f74.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.createApiKey({ + name: "my-restricted-api-key", + role_descriptors: { + "my-restricted-role-descriptor": { + indices: [ + { + names: ["my-search-app"], + privileges: ["read"], + }, + ], + restriction: { + workflows: ["search_application_query"], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1b0b29e5cd7550c648d0892378e93804.asciidoc b/docs/doc_examples/1b0b29e5cd7550c648d0892378e93804.asciidoc new file mode 100644 index 000000000..d8d5f1e85 --- /dev/null +++ b/docs/doc_examples/1b0b29e5cd7550c648d0892378e93804.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.deleteCalendarJob({ + calendar_id: "planned-outages", + job_id: "total-requests", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1b0dc9d076bbb58c6a2953ef4323d2fc.asciidoc b/docs/doc_examples/1b0dc9d076bbb58c6a2953ef4323d2fc.asciidoc new file mode 100644 index 000000000..e490ad820 --- /dev/null +++ b/docs/doc_examples/1b0dc9d076bbb58c6a2953ef4323d2fc.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.ackWatch({ + watch_id: "my_watch", + action_id: "test_index", +}); +console.log(response); + +const response1 = await client.watcher.getWatch({ + id: "my_watch", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/1b0f40959a7a4d124372f2bd3f7eac85.asciidoc b/docs/doc_examples/1b0f40959a7a4d124372f2bd3f7eac85.asciidoc new file mode 100644 index 000000000..fd884d183 --- /dev/null +++ b/docs/doc_examples/1b0f40959a7a4d124372f2bd3f7eac85.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "custom_fingerprint_example", + settings: { + analysis: { + analyzer: { + whitespace_: { + tokenizer: "whitespace", + filter: ["fingerprint_plus_concat"], + }, + }, + filter: { + fingerprint_plus_concat: { + type: "fingerprint", + max_output_size: 100, + separator: "+", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1b2ab75d3c8064fac6ecc63104396c02.asciidoc b/docs/doc_examples/1b2ab75d3c8064fac6ecc63104396c02.asciidoc new file mode 100644 index 000000000..1e7e3f102 --- /dev/null +++ b/docs/doc_examples/1b2ab75d3c8064fac6ecc63104396c02.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.putCalendarJob({ + calendar_id: "planned-outages", + job_id: "total-requests", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1b3762712c14a19e8c2956b4f530d327.asciidoc b/docs/doc_examples/1b3762712c14a19e8c2956b4f530d327.asciidoc new file mode 100644 index 000000000..b790c8e30 --- /dev/null +++ b/docs/doc_examples/1b3762712c14a19e8c2956b4f530d327.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.follow({ + index: "follower_index", + wait_for_active_shards: 1, + remote_cluster: "remote_cluster", + leader_index: "leader_index", + settings: { + "index.number_of_replicas": 0, + }, + max_read_request_operation_count: 1024, + max_outstanding_read_requests: 16, + max_read_request_size: "1024k", + max_write_request_operation_count: 32768, + max_write_request_size: "16k", + max_outstanding_write_requests: 8, + max_write_buffer_count: 512, + max_write_buffer_size: "512k", + max_retry_delay: "10s", + read_poll_timeout: "30s", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1b37e2237c9e3aaf84d56cc5c0bdb9ec.asciidoc b/docs/doc_examples/1b37e2237c9e3aaf84d56cc5c0bdb9ec.asciidoc new file mode 100644 index 000000000..908e50f66 --- /dev/null +++ b/docs/doc_examples/1b37e2237c9e3aaf84d56cc5c0bdb9ec.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "shrink-index", + policy: { + phases: { + warm: { + min_age: "5d", + actions: { + shrink: { + number_of_shards: 4, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1b47d988b218ee595430ec91eba91d80.asciidoc b/docs/doc_examples/1b47d988b218ee595430ec91eba91d80.asciidoc new file mode 100644 index 000000000..d139c002b --- /dev/null +++ b/docs/doc_examples/1b47d988b218ee595430ec91eba91d80.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "logs-foo", + index_patterns: ["logs-foo-*"], + data_stream: {}, + composed_of: ["logs-foo_component1", "logs-foo_component2"], + ignore_missing_component_templates: ["logs-foo_component2"], + priority: 500, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1b542e3ea87a742f95641d64dcfb1bdb.asciidoc b/docs/doc_examples/1b542e3ea87a742f95641d64dcfb1bdb.asciidoc deleted file mode 100644 index 29dddce33..000000000 --- a/docs/doc_examples/1b542e3ea87a742f95641d64dcfb1bdb.asciidoc +++ /dev/null @@ -1,12 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.count({ - index: 'twitter', - q: 'user:kimchy' -}) -console.log(response) ----- - diff --git a/docs/doc_examples/1b5c8d6e61930a308008b5b1ace2aa07.asciidoc b/docs/doc_examples/1b5c8d6e61930a308008b5b1ace2aa07.asciidoc new file mode 100644 index 000000000..fcb26b045 --- /dev/null +++ b/docs/doc_examples/1b5c8d6e61930a308008b5b1ace2aa07.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + match: { + "manager.name": "Alice White", + }, + }, + aggs: { + Employees: { + nested: { + path: "employees", + }, + aggs: { + "Employee Ages": { + histogram: { + field: "employees.age", + interval: 5, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1b8655e6ba99fe39933c6eafe78728b7.asciidoc b/docs/doc_examples/1b8655e6ba99fe39933c6eafe78728b7.asciidoc deleted file mode 100644 index 8901d4edc..000000000 --- a/docs/doc_examples/1b8655e6ba99fe39933c6eafe78728b7.asciidoc +++ /dev/null @@ -1,38 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response0 = await client.reindex({ - body: { - source: { - index: 'twitter', - slice: { - id: 0, - max: 2 - } - }, - dest: { - index: 'new_twitter' - } - } -}) -console.log(response0) - -const response1 = await client.reindex({ - body: { - source: { - index: 'twitter', - slice: { - id: 1, - max: 2 - } - }, - dest: { - index: 'new_twitter' - } - } -}) -console.log(response1) ----- - diff --git a/docs/doc_examples/1b8caf0a6741126c6d0ad83b56fce290.asciidoc b/docs/doc_examples/1b8caf0a6741126c6d0ad83b56fce290.asciidoc deleted file mode 100644 index 1eb89aaca..000000000 --- a/docs/doc_examples/1b8caf0a6741126c6d0ad83b56fce290.asciidoc +++ /dev/null @@ -1,31 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.putTemplate({ - name: 'template_1', - body: { - index_patterns: [ - 'te*' - ], - settings: { - number_of_shards: 1 - }, - aliases: { - alias1: {}, - alias2: { - filter: { - term: { - user: 'kimchy' - } - }, - routing: 'kimchy' - }, - '{index}-alias': {} - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/1b98b60d8e558fcccf9c550bdbf5b5c9.asciidoc b/docs/doc_examples/1b98b60d8e558fcccf9c550bdbf5b5c9.asciidoc new file mode 100644 index 000000000..8e56b7f06 --- /dev/null +++ b/docs/doc_examples/1b98b60d8e558fcccf9c550bdbf5b5c9.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "example3", + indices: [ + { + names: ["my-index-000001"], + privileges: ["read"], + query: { + template: { + source: + '{ "terms": { "group.statuses": {{#toJson}}_user.metadata.statuses{{/toJson}} }}', + }, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/1ba7afe23a26fe9ac7856d8c5bc1059d.asciidoc b/docs/doc_examples/1ba7afe23a26fe9ac7856d8c5bc1059d.asciidoc new file mode 100644 index 000000000..44005a3f3 --- /dev/null +++ b/docs/doc_examples/1ba7afe23a26fe9ac7856d8c5bc1059d.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "romanian_example", + settings: { + analysis: { + filter: { + romanian_stop: { + type: "stop", + stopwords: "_romanian_", + }, + romanian_keywords: { + type: "keyword_marker", + keywords: ["exemplu"], + }, + romanian_stemmer: { + type: "stemmer", + language: "romanian", + }, + }, + analyzer: { + rebuilt_romanian: { + tokenizer: "standard", + filter: [ + "lowercase", + "romanian_stop", + "romanian_keywords", + "romanian_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1bc731a4df952228af6dfa6b48627332.asciidoc b/docs/doc_examples/1bc731a4df952228af6dfa6b48627332.asciidoc deleted file mode 100644 index 0e51e68ad..000000000 --- a/docs/doc_examples/1bc731a4df952228af6dfa6b48627332.asciidoc +++ /dev/null @@ -1,25 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.reindex({ - body: { - max_docs: 10, - source: { - index: 'twitter', - query: { - function_score: { - random_score: {}, - min_score: 0.9 - } - } - }, - dest: { - index: 'random_twitter' - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/1bceb160ed2bcd51ee040caf21acf780.asciidoc b/docs/doc_examples/1bceb160ed2bcd51ee040caf21acf780.asciidoc new file mode 100644 index 000000000..25474a42e --- /dev/null +++ b/docs/doc_examples/1bceb160ed2bcd51ee040caf21acf780.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.put({ + name: "my-search-app", + search_application: { + indices: ["index1"], + template: { + script: { + lang: "mustache", + source: + '\n {\n "retriever": {\n "rrf": {\n "retrievers": [\n {{#text_fields}}\n {\n "standard": {\n "query": {\n "match": {\n "{{.}}": "{{query_string}}"\n }\n }\n }\n },\n {{/text_fields}}\n {{#elser_fields}}\n {\n "standard": {\n "query": {\n "sparse_vector": {\n "field": "ml.inference.{{.}}_expanded.predicted_value",\n "inference_id": "",\n "query": "{{query_string}}"\n }\n }\n }\n },\n {{/elser_fields}}\n ],\n "rank_window_size": {{rrf.rank_window_size}},\n "rank_constant": {{rrf.rank_constant}}\n }\n }\n }\n ', + params: { + elser_fields: ["title", "meta_description"], + text_fields: ["title", "meta_description"], + query_string: "", + rrf: { + rank_window_size: 100, + rank_constant: 60, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1c142bc8cac8d9dcb4f60e22902d434f.asciidoc b/docs/doc_examples/1c142bc8cac8d9dcb4f60e22902d434f.asciidoc new file mode 100644 index 000000000..76ee6237d --- /dev/null +++ b/docs/doc_examples/1c142bc8cac8d9dcb4f60e22902d434f.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + size: 0, + aggs: { + message_stats: { + string_stats: { + field: "message.keyword", + show_distribution: true, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1c1f2a6a193d9e64c37242b2824b3031.asciidoc b/docs/doc_examples/1c1f2a6a193d9e64c37242b2824b3031.asciidoc new file mode 100644 index 000000000..e9198e540 --- /dev/null +++ b/docs/doc_examples/1c1f2a6a193d9e64c37242b2824b3031.asciidoc @@ -0,0 +1,49 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putComponentTemplate({ + name: "source_template", + template: { + settings: { + index: { + number_of_replicas: 2, + number_of_shards: 2, + mode: "time_series", + routing_path: ["metricset"], + }, + }, + mappings: { + properties: { + "@timestamp": { + type: "date", + }, + metricset: { + type: "keyword", + time_series_dimension: true, + }, + k8s: { + properties: { + tx: { + type: "long", + }, + rx: { + type: "long", + }, + }, + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.putIndexTemplate({ + name: 1, + index_patterns: ["k8s*"], + composed_of: ["source_template"], + data_stream: {}, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/1c23507edd7a3c18538b68223378e4ab.asciidoc b/docs/doc_examples/1c23507edd7a3c18538b68223378e4ab.asciidoc deleted file mode 100644 index eacb07fb1..000000000 --- a/docs/doc_examples/1c23507edd7a3c18538b68223378e4ab.asciidoc +++ /dev/null @@ -1,11 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'twitter' -}) -console.log(response) ----- - diff --git a/docs/doc_examples/1c330f0fc9eac19d0edeb8c4017b9b93.asciidoc b/docs/doc_examples/1c330f0fc9eac19d0edeb8c4017b9b93.asciidoc new file mode 100644 index 000000000..0c57e6552 --- /dev/null +++ b/docs/doc_examples/1c330f0fc9eac19d0edeb8c4017b9b93.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "hugging_face_embeddings_pipeline", + processors: [ + { + inference: { + model_id: "hugging_face_embeddings", + input_output: { + input_field: "content", + output_field: "content_embedding", + }, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/1c3e3c4f2d268f1826a9b417e1868a58.asciidoc b/docs/doc_examples/1c3e3c4f2d268f1826a9b417e1868a58.asciidoc new file mode 100644 index 000000000..c7c693460 --- /dev/null +++ b/docs/doc_examples/1c3e3c4f2d268f1826a9b417e1868a58.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.update({ + index: "my-index-000001", + id: 1, + script: { + source: "ctx._source.tags.add(params['tag'])", + lang: "painless", + params: { + tag: "blue", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1c87b5bf682bc1e8809a657529e14b07.asciidoc b/docs/doc_examples/1c87b5bf682bc1e8809a657529e14b07.asciidoc new file mode 100644 index 000000000..f7d5b04ed --- /dev/null +++ b/docs/doc_examples/1c87b5bf682bc1e8809a657529e14b07.asciidoc @@ -0,0 +1,52 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "shapes", + mappings: { + properties: { + location: { + type: "geo_shape", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "shapes", + id: "deu", + document: { + location: { + type: "envelope", + coordinates: [ + [13, 53], + [14, 52], + ], + }, + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "example", + query: { + bool: { + filter: { + geo_shape: { + location: { + indexed_shape: { + index: "shapes", + id: "deu", + path: "location", + }, + }, + }, + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/1c8b6768c4eefc76fcb38708152f561b.asciidoc b/docs/doc_examples/1c8b6768c4eefc76fcb38708152f561b.asciidoc new file mode 100644 index 000000000..f7132ca37 --- /dev/null +++ b/docs/doc_examples/1c8b6768c4eefc76fcb38708152f561b.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.deleteDataFrameAnalytics({ + id: "loganalytics", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1c9dac4183a3532c91dbd1a46907729b.asciidoc b/docs/doc_examples/1c9dac4183a3532c91dbd1a46907729b.asciidoc new file mode 100644 index 000000000..b563e314b --- /dev/null +++ b/docs/doc_examples/1c9dac4183a3532c91dbd1a46907729b.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.delete({ + index: "music", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1cab9da122778a95061831265c250cc1.asciidoc b/docs/doc_examples/1cab9da122778a95061831265c250cc1.asciidoc new file mode 100644 index 000000000..2feadffb8 --- /dev/null +++ b/docs/doc_examples/1cab9da122778a95061831265c250cc1.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + runtime_mappings: { + tags: { + type: "keyword", + script: + "\n emit(doc['type'].value);\n if (doc['promoted'].value) {\n emit('hot');\n }\n ", + }, + }, + aggs: { + tags_count: { + value_count: { + field: "tags", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1cadbcf2cfeb312f73b7f098291356ac.asciidoc b/docs/doc_examples/1cadbcf2cfeb312f73b7f098291356ac.asciidoc new file mode 100644 index 000000000..591ffd22e --- /dev/null +++ b/docs/doc_examples/1cadbcf2cfeb312f73b7f098291356ac.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: "MULTIPOINT (102.0 2.0, 103.0 2.0)", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1cb3b45335ab1b9697c358104d44ea39.asciidoc b/docs/doc_examples/1cb3b45335ab1b9697c358104d44ea39.asciidoc new file mode 100644 index 000000000..90dc0b24f --- /dev/null +++ b/docs/doc_examples/1cb3b45335ab1b9697c358104d44ea39.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "xpack.security.transport.filter.enabled": false, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1cbecd19be22979aefb45b4f160e77ea.asciidoc b/docs/doc_examples/1cbecd19be22979aefb45b4f160e77ea.asciidoc new file mode 100644 index 000000000..91636c2e3 --- /dev/null +++ b/docs/doc_examples/1cbecd19be22979aefb45b4f160e77ea.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "google_vertex_ai_embeddings_pipeline", + processors: [ + { + inference: { + model_id: "google_vertex_ai_embeddings", + input_output: { + input_field: "content", + output_field: "content_embedding", + }, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/1cca4bb2f0ea7e43181be8bd965149d4.asciidoc b/docs/doc_examples/1cca4bb2f0ea7e43181be8bd965149d4.asciidoc new file mode 100644 index 000000000..6703e293f --- /dev/null +++ b/docs/doc_examples/1cca4bb2f0ea7e43181be8bd965149d4.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.get({ + id: "FmNJRUZ1YWZCU3dHY1BIOUhaenVSRkEaaXFlZ3h4c1RTWFNocDdnY2FSaERnUTozNDE=", + wait_for_completion_timeout: "2s", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1cd3b9d65576a9212eef898eb3105758.asciidoc b/docs/doc_examples/1cd3b9d65576a9212eef898eb3105758.asciidoc new file mode 100644 index 000000000..3aaca987e --- /dev/null +++ b/docs/doc_examples/1cd3b9d65576a9212eef898eb3105758.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "cluster.routing.allocation.enable": "primaries", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1cea60c47d5c0e150b4c8fff4cd75ffe.asciidoc b/docs/doc_examples/1cea60c47d5c0e150b4c8fff4cd75ffe.asciidoc new file mode 100644 index 000000000..b73c6cce1 --- /dev/null +++ b/docs/doc_examples/1cea60c47d5c0e150b4c8fff4cd75ffe.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.simulate({ + pipeline: { + processors: [ + { + script: { + description: "Set index based on `lang` field and `dataset` param", + lang: "painless", + source: + "\n ctx['_index'] = ctx['lang'] + '-' + params['dataset'];\n ", + params: { + dataset: "catalog", + }, + }, + }, + ], + }, + docs: [ + { + _index: "generic-index", + _source: { + lang: "fr", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/1ceaa211756e2db3d48c6bc4b1a861b0.asciidoc b/docs/doc_examples/1ceaa211756e2db3d48c6bc4b1a861b0.asciidoc new file mode 100644 index 000000000..c7e7c914b --- /dev/null +++ b/docs/doc_examples/1ceaa211756e2db3d48c6bc4b1a861b0.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-index*", + max_samples_per_key: 2, + size: 20, + query: + "\n sample\n [any where uptime > 0] by host,os\n [any where port > 100] by host,op_sys\n [any where bool == true] by host,os\n ", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1cecd4d87a92427175157d41859df2af.asciidoc b/docs/doc_examples/1cecd4d87a92427175157d41859df2af.asciidoc new file mode 100644 index 000000000..c49cfc24d --- /dev/null +++ b/docs/doc_examples/1cecd4d87a92427175157d41859df2af.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.allocationExplain({ + index: "my-index-000001", + shard: 0, + primary: false, + current_node: "my-node", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1cfa04e9654c1484e3d4c75bf439400a.asciidoc b/docs/doc_examples/1cfa04e9654c1484e3d4c75bf439400a.asciidoc new file mode 100644 index 000000000..2990d47a9 --- /dev/null +++ b/docs/doc_examples/1cfa04e9654c1484e3d4c75bf439400a.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: { + type: "polygon", + coordinates: [ + [ + [1000, -1001], + [1001, -1001], + [1001, -1000], + [1000, -1000], + [1000, -1001], + ], + [ + [1000.2, -1001.2], + [1000.8, -1001.2], + [1000.8, -1001.8], + [1000.2, -1001.8], + [1000.2, -1001.2], + ], + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1d252d9217c61c2c1cbe7a92f77b078f.asciidoc b/docs/doc_examples/1d252d9217c61c2c1cbe7a92f77b078f.asciidoc new file mode 100644 index 000000000..0963fa75c --- /dev/null +++ b/docs/doc_examples/1d252d9217c61c2c1cbe7a92f77b078f.asciidoc @@ -0,0 +1,71 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.queryApiKeys({ + size: 0, + query: { + bool: { + must: { + term: { + invalidated: false, + }, + }, + should: [ + { + range: { + expiration: { + gte: "now", + }, + }, + }, + { + bool: { + must_not: { + exists: { + field: "expiration", + }, + }, + }, + }, + ], + minimum_should_match: 1, + }, + }, + aggs: { + keys_by_username: { + composite: { + sources: [ + { + usernames: { + terms: { + field: "username", + }, + }, + }, + ], + }, + aggs: { + expires_soon: { + filter: { + range: { + expiration: { + lte: "now+30d/d", + }, + }, + }, + aggs: { + key_names: { + terms: { + field: "name", + }, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1d65cb6d055c46a1bde809687d835b71.asciidoc b/docs/doc_examples/1d65cb6d055c46a1bde809687d835b71.asciidoc deleted file mode 100644 index 122c24c08..000000000 --- a/docs/doc_examples/1d65cb6d055c46a1bde809687d835b71.asciidoc +++ /dev/null @@ -1,13 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.get({ - index: 'twitter', - id: '2', - routing: 'user1' -}) -console.log(response) ----- - diff --git a/docs/doc_examples/1d746272a7511bf91302a15b5c58ca0e.asciidoc b/docs/doc_examples/1d746272a7511bf91302a15b5c58ca0e.asciidoc new file mode 100644 index 000000000..0e8581267 --- /dev/null +++ b/docs/doc_examples/1d746272a7511bf91302a15b5c58ca0e.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "passage_vectors", + fields: ["full_text", "creation_time"], + _source: false, + knn: { + query_vector: [0.45, 45], + field: "paragraph.vector", + k: 2, + num_candidates: 2, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1d9b695a17cffd910c496c9b03c75d6f.asciidoc b/docs/doc_examples/1d9b695a17cffd910c496c9b03c75d6f.asciidoc new file mode 100644 index 000000000..4a91620c5 --- /dev/null +++ b/docs/doc_examples/1d9b695a17cffd910c496c9b03c75d6f.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "pre-dsl-ilm-policy", + policy: { + phases: { + hot: { + actions: { + rollover: { + max_primary_shard_size: "50gb", + }, + }, + }, + delete: { + min_age: "7d", + actions: { + delete: {}, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1da77e114459e0b77d78a3dcc8fae429.asciidoc b/docs/doc_examples/1da77e114459e0b77d78a3dcc8fae429.asciidoc deleted file mode 100644 index 69394b727..000000000 --- a/docs/doc_examples/1da77e114459e0b77d78a3dcc8fae429.asciidoc +++ /dev/null @@ -1,28 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response0 = await client.indices.create({ - index: 'twitter-1' -}) -console.log(response0) - -const response1 = await client.indices.create({ - index: 'twitter-2' -}) -console.log(response1) - -const response2 = await client.indices.putMapping({ - index: 'twitter-1,twitter-2', - body: { - properties: { - user_name: { - type: 'text' - } - } - } -}) -console.log(response2) ----- - diff --git a/docs/doc_examples/1dadb7efe27b6c0c231eb6535e413bd9.asciidoc b/docs/doc_examples/1dadb7efe27b6c0c231eb6535e413bd9.asciidoc new file mode 100644 index 000000000..a38e95486 --- /dev/null +++ b/docs/doc_examples/1dadb7efe27b6c0c231eb6535e413bd9.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "text_embedding", + inference_id: "azure_ai_studio_embeddings", + inference_config: { + service: "azureaistudio", + service_settings: { + api_key: "", + target: "", + provider: "", + endpoint_type: "", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1db086021e83205b6eab3b7765911cc2.asciidoc b/docs/doc_examples/1db086021e83205b6eab3b7765911cc2.asciidoc new file mode 100644 index 000000000..aa9237175 --- /dev/null +++ b/docs/doc_examples/1db086021e83205b6eab3b7765911cc2.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "parent_example", + mappings: { + properties: { + join: { + type: "join", + relations: { + question: "answer", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1db715eb00832686ecddb6603684fc26.asciidoc b/docs/doc_examples/1db715eb00832686ecddb6603684fc26.asciidoc new file mode 100644 index 000000000..d1849d8d6 --- /dev/null +++ b/docs/doc_examples/1db715eb00832686ecddb6603684fc26.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.enrollKibana(); +console.log(response); +---- diff --git a/docs/doc_examples/1dbb8cf17fbc45c87c7d2f75f15f9778.asciidoc b/docs/doc_examples/1dbb8cf17fbc45c87c7d2f75f15f9778.asciidoc new file mode 100644 index 000000000..c1a1fab23 --- /dev/null +++ b/docs/doc_examples/1dbb8cf17fbc45c87c7d2f75f15f9778.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.state({ + filter_path: "metadata.indices.*.stat*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1e08e054c761353f99211cd18e8ca47b.asciidoc b/docs/doc_examples/1e08e054c761353f99211cd18e8ca47b.asciidoc new file mode 100644 index 000000000..5f1410788 --- /dev/null +++ b/docs/doc_examples/1e08e054c761353f99211cd18e8ca47b.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.deleteModelSnapshot({ + job_id: "farequote", + snapshot_id: 1491948163, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1e0b85750d4e63ebbc927d4627c44bf8.asciidoc b/docs/doc_examples/1e0b85750d4e63ebbc927d4627c44bf8.asciidoc new file mode 100644 index 000000000..5afe39c96 --- /dev/null +++ b/docs/doc_examples/1e0b85750d4e63ebbc927d4627c44bf8.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.forcemerge({ + index: "my-index-000001", + only_expunge_deletes: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1e18a67caf8f06ff2710ec4a8b30f625.asciidoc b/docs/doc_examples/1e18a67caf8f06ff2710ec4a8b30f625.asciidoc new file mode 100644 index 000000000..3e50dd22f --- /dev/null +++ b/docs/doc_examples/1e18a67caf8f06ff2710ec4a8b30f625.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.state({ + filter_path: "metadata.indices.*.state,-metadata.indices.logstash-*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1e26353d546d733634187b8c3a7837a7.asciidoc b/docs/doc_examples/1e26353d546d733634187b8c3a7837a7.asciidoc new file mode 100644 index 000000000..6e8f3033b --- /dev/null +++ b/docs/doc_examples/1e26353d546d733634187b8c3a7837a7.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.connector.list({ + service_type: "sharepoint_online", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1e2c5cef7a3f254c71a33865eb4d7569.asciidoc b/docs/doc_examples/1e2c5cef7a3f254c71a33865eb4d7569.asciidoc new file mode 100644 index 000000000..12ec8e928 --- /dev/null +++ b/docs/doc_examples/1e2c5cef7a3f254c71a33865eb4d7569.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "items", + query: { + bool: { + must: { + match: { + name: "chocolate", + }, + }, + should: { + distance_feature: { + field: "production_date", + pivot: "7d", + origin: "now", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1e3384bc255729b65a6f0fc8011ff733.asciidoc b/docs/doc_examples/1e3384bc255729b65a6f0fc8011ff733.asciidoc new file mode 100644 index 000000000..702ce3c67 --- /dev/null +++ b/docs/doc_examples/1e3384bc255729b65a6f0fc8011ff733.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.segments({ + index: "my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1e3553a73da487017f7a95088b6aa957.asciidoc b/docs/doc_examples/1e3553a73da487017f7a95088b6aa957.asciidoc new file mode 100644 index 000000000..d142fea11 --- /dev/null +++ b/docs/doc_examples/1e3553a73da487017f7a95088b6aa957.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.clearCachedRoles({ + name: "*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1e49eba5b9042c1900a608fe5105ba43.asciidoc b/docs/doc_examples/1e49eba5b9042c1900a608fe5105ba43.asciidoc deleted file mode 100644 index 81a58db85..000000000 --- a/docs/doc_examples/1e49eba5b9042c1900a608fe5105ba43.asciidoc +++ /dev/null @@ -1,42 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response0 = await client.deleteByQuery({ - index: 'twitter', - body: { - slice: { - id: 0, - max: 2 - }, - query: { - range: { - likes: { - lt: 10 - } - } - } - } -}) -console.log(response0) - -const response1 = await client.deleteByQuery({ - index: 'twitter', - body: { - slice: { - id: 1, - max: 2 - }, - query: { - range: { - likes: { - lt: 10 - } - } - } - } -}) -console.log(response1) ----- - diff --git a/docs/doc_examples/1e4b17b830ead15087ccd96151a5ebde.asciidoc b/docs/doc_examples/1e4b17b830ead15087ccd96151a5ebde.asciidoc new file mode 100644 index 000000000..bb1d3322e --- /dev/null +++ b/docs/doc_examples/1e4b17b830ead15087ccd96151a5ebde.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + size: 0, + runtime_mappings: { + message_and_context: { + type: "keyword", + script: + "\n emit(doc['message.keyword'].value + ' ' + doc['context.keyword'].value)\n ", + }, + }, + aggs: { + message_stats: { + string_stats: { + field: "message_and_context", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1e547696f54582840040b1aa6661760c.asciidoc b/docs/doc_examples/1e547696f54582840040b1aa6661760c.asciidoc new file mode 100644 index 000000000..4d6375dbb --- /dev/null +++ b/docs/doc_examples/1e547696f54582840040b1aa6661760c.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.rollover({ + alias: "my-data-stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1e871f060dbe1a5c316ed205278804a8.asciidoc b/docs/doc_examples/1e871f060dbe1a5c316ed205278804a8.asciidoc new file mode 100644 index 000000000..25aa22960 --- /dev/null +++ b/docs/doc_examples/1e871f060dbe1a5c316ed205278804a8.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + countries: { + terms: { + field: "artist.country", + order: { + "rock>playback_stats.avg": "desc", + }, + }, + aggs: { + rock: { + filter: { + term: { + genre: "rock", + }, + }, + aggs: { + playback_stats: { + stats: { + field: "play_count", + }, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1e94a2bb95bc245bcfb87ac7d611cf49.asciidoc b/docs/doc_examples/1e94a2bb95bc245bcfb87ac7d611cf49.asciidoc new file mode 100644 index 000000000..218edbd2c --- /dev/null +++ b/docs/doc_examples/1e94a2bb95bc245bcfb87ac7d611cf49.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "latency", + size: 0, + aggs: { + load_time_outlier: { + percentiles: { + field: "load_time", + tdigest: { + execution_hint: "high_accuracy", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1e9cab0b2727624e22e8cf4e7ca498ac.asciidoc b/docs/doc_examples/1e9cab0b2727624e22e8cf4e7ca498ac.asciidoc new file mode 100644 index 000000000..acfb94c3a --- /dev/null +++ b/docs/doc_examples/1e9cab0b2727624e22e8cf4e7ca498ac.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.health({ + pretty: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1ea24f67fbbb6293d53caf2fe0c4b984.asciidoc b/docs/doc_examples/1ea24f67fbbb6293d53caf2fe0c4b984.asciidoc new file mode 100644 index 000000000..70528bb7e --- /dev/null +++ b/docs/doc_examples/1ea24f67fbbb6293d53caf2fe0c4b984.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + analyzer: "simple", + text: "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone.", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1ead35c954963e83f89872048dabdbe9.asciidoc b/docs/doc_examples/1ead35c954963e83f89872048dabdbe9.asciidoc new file mode 100644 index 000000000..347f3152e --- /dev/null +++ b/docs/doc_examples/1ead35c954963e83f89872048dabdbe9.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.queryRole({ + query: { + bool: { + must_not: { + term: { + "metadata._reserved": true, + }, + }, + }, + }, + sort: ["name"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/1eb9c6ecb827ca69f7b17f7d2a26eae9.asciidoc b/docs/doc_examples/1eb9c6ecb827ca69f7b17f7d2a26eae9.asciidoc new file mode 100644 index 000000000..979a1f33d --- /dev/null +++ b/docs/doc_examples/1eb9c6ecb827ca69f7b17f7d2a26eae9.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.renderSearchTemplate({ + source: { + query: { + term: { + "url.full": "{{#url}}{{host}}/{{page}}{{/url}}", + }, + }, + }, + params: { + host: "http://example.com/", + page: "hello-world", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1ec66f188f681598cb5d7df700b214e3.asciidoc b/docs/doc_examples/1ec66f188f681598cb5d7df700b214e3.asciidoc new file mode 100644 index 000000000..e44de075a --- /dev/null +++ b/docs/doc_examples/1ec66f188f681598cb5d7df700b214e3.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_custom_analyzer: { + type: "custom", + tokenizer: "standard", + filter: ["my_custom_keyword_marker_filter", "porter_stem"], + }, + }, + filter: { + my_custom_keyword_marker_filter: { + type: "keyword_marker", + keywords_path: "analysis/example_word_list.txt", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1ed26c7b445ab1c167bd9385e1f0066f.asciidoc b/docs/doc_examples/1ed26c7b445ab1c167bd9385e1f0066f.asciidoc new file mode 100644 index 000000000..68fc1d5ff --- /dev/null +++ b/docs/doc_examples/1ed26c7b445ab1c167bd9385e1f0066f.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.deleteAsync({ + id: "FkpMRkJGS1gzVDRlM3g4ZzMyRGlLbkEaTXlJZHdNT09TU2VTZVBoNDM3cFZMUToxMDM=", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1ed77bf308fa4ab328b36060e412f500.asciidoc b/docs/doc_examples/1ed77bf308fa4ab328b36060e412f500.asciidoc new file mode 100644 index 000000000..bba203b58 --- /dev/null +++ b/docs/doc_examples/1ed77bf308fa4ab328b36060e412f500.asciidoc @@ -0,0 +1,66 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "metrics_index", + mappings: { + properties: { + network: { + properties: { + name: { + type: "keyword", + }, + }, + }, + latency_histo: { + type: "histogram", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "metrics_index", + id: 1, + refresh: "true", + document: { + "network.name": "net-1", + latency_histo: { + values: [1, 3, 8, 12, 15], + counts: [3, 7, 23, 12, 6], + }, + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "metrics_index", + id: 2, + refresh: "true", + document: { + "network.name": "net-2", + latency_histo: { + values: [1, 6, 8, 12, 14], + counts: [8, 17, 8, 7, 6], + }, + }, +}); +console.log(response2); + +const response3 = await client.search({ + index: "metrics_index", + size: 0, + aggs: { + latency_buckets: { + histogram: { + field: "latency_histo", + interval: 5, + }, + }, + }, +}); +console.log(response3); +---- diff --git a/docs/doc_examples/1eea46b08610972b79fdc4649748455d.asciidoc b/docs/doc_examples/1eea46b08610972b79fdc4649748455d.asciidoc new file mode 100644 index 000000000..06268c2dd --- /dev/null +++ b/docs/doc_examples/1eea46b08610972b79fdc4649748455d.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + script_score: { + query: { + bool: { + filter: { + term: { + status: "published", + }, + }, + }, + }, + script: { + source: + "cosineSimilarity(params.query_vector, 'my_dense_vector') + 1.0", + params: { + query_vector: [4, 3.4, -0.2], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1ef5119db55a6f2b6fc0ab92f36e7f8e.asciidoc b/docs/doc_examples/1ef5119db55a6f2b6fc0ab92f36e7f8e.asciidoc new file mode 100644 index 000000000..3a1fca850 --- /dev/null +++ b/docs/doc_examples/1ef5119db55a6f2b6fc0ab92f36e7f8e.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + sort: [ + { + post_date: { + format: "strict_date_optional_time_nanos", + }, + }, + ], + query: { + term: { + user: "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1f00e73c144603e97f6c14ab15fa1913.asciidoc b/docs/doc_examples/1f00e73c144603e97f6c14ab15fa1913.asciidoc new file mode 100644 index 000000000..09187a87e --- /dev/null +++ b/docs/doc_examples/1f00e73c144603e97f6c14ab15fa1913.asciidoc @@ -0,0 +1,43 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "greek_example", + settings: { + analysis: { + filter: { + greek_stop: { + type: "stop", + stopwords: "_greek_", + }, + greek_lowercase: { + type: "lowercase", + language: "greek", + }, + greek_keywords: { + type: "keyword_marker", + keywords: ["παράδειγμα"], + }, + greek_stemmer: { + type: "stemmer", + language: "greek", + }, + }, + analyzer: { + rebuilt_greek: { + tokenizer: "standard", + filter: [ + "greek_lowercase", + "greek_stop", + "greek_keywords", + "greek_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1f13c7caef9c2fe0f73fce8795bbc9b0.asciidoc b/docs/doc_examples/1f13c7caef9c2fe0f73fce8795bbc9b0.asciidoc new file mode 100644 index 000000000..303c74cb4 --- /dev/null +++ b/docs/doc_examples/1f13c7caef9c2fe0f73fce8795bbc9b0.asciidoc @@ -0,0 +1,43 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + std_folded: { + type: "custom", + tokenizer: "standard", + filter: ["lowercase", "asciifolding"], + }, + }, + }, + }, + mappings: { + properties: { + my_text: { + type: "text", + analyzer: "std_folded", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.analyze({ + index: "my-index-000001", + analyzer: "std_folded", + text: "Is this déjà vu?", +}); +console.log(response1); + +const response2 = await client.indices.analyze({ + index: "my-index-000001", + field: "my_text", + text: "Is this déjà vu?", +}); +console.log(response2); +---- diff --git a/docs/doc_examples/1f336ecc62480c1d56351cc2f82d0d08.asciidoc b/docs/doc_examples/1f336ecc62480c1d56351cc2f82d0d08.asciidoc deleted file mode 100644 index 0de94149b..000000000 --- a/docs/doc_examples/1f336ecc62480c1d56351cc2f82d0d08.asciidoc +++ /dev/null @@ -1,17 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.index({ - index: 'twitter', - id: '1', - version: '2', - version_type: 'external', - body: { - message: 'elasticsearch now has versioning support, double cool!' - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/1f3dd84ab11bae09d3f99b1b3536e239.asciidoc b/docs/doc_examples/1f3dd84ab11bae09d3f99b1b3536e239.asciidoc new file mode 100644 index 000000000..93dc5492a --- /dev/null +++ b/docs/doc_examples/1f3dd84ab11bae09d3f99b1b3536e239.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.create({ + repository: "my_repository", + snapshot: "my_snapshot", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1f507659757e2844cefced25848540a0.asciidoc b/docs/doc_examples/1f507659757e2844cefced25848540a0.asciidoc new file mode 100644 index 000000000..2ff2c4ff7 --- /dev/null +++ b/docs/doc_examples/1f507659757e2844cefced25848540a0.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_locations", + query: { + bool: { + must: { + match_all: {}, + }, + filter: { + geo_distance: { + distance: "12km", + "pin.location": [-70, 40], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1f673e1a0de2970dc648618d5425a994.asciidoc b/docs/doc_examples/1f673e1a0de2970dc648618d5425a994.asciidoc new file mode 100644 index 000000000..ee01d35bf --- /dev/null +++ b/docs/doc_examples/1f673e1a0de2970dc648618d5425a994.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.refresh(); +console.log(response); + +const response1 = await client.search({ + index: "my-new-index-000001", + size: 0, + filter_path: "hits.total", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/1f6a190fa1aade1fb66680388f184ef9.asciidoc b/docs/doc_examples/1f6a190fa1aade1fb66680388f184ef9.asciidoc new file mode 100644 index 000000000..d58aa464e --- /dev/null +++ b/docs/doc_examples/1f6a190fa1aade1fb66680388f184ef9.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.validateQuery({ + index: "my-index-000001", + rewrite: "true", + all_shards: "true", + query: { + match: { + "user.id": { + query: "kimchy", + fuzziness: "auto", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1f6fe6833686e38c3711c6f2aa00a078.asciidoc b/docs/doc_examples/1f6fe6833686e38c3711c6f2aa00a078.asciidoc deleted file mode 100644 index 8d1852655..000000000 --- a/docs/doc_examples/1f6fe6833686e38c3711c6f2aa00a078.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'my_index', - body: { - mappings: { - properties: { - user_id: { - type: 'keyword', - ignore_above: 20 - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/1f8a6d2cc57ed8997a52354aca371aac.asciidoc b/docs/doc_examples/1f8a6d2cc57ed8997a52354aca371aac.asciidoc new file mode 100644 index 000000000..65d03d532 --- /dev/null +++ b/docs/doc_examples/1f8a6d2cc57ed8997a52354aca371aac.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "direct_pki_only", + roles: ["role_for_pki1_direct"], + rules: { + all: [ + { + field: { + "realm.name": "pki1", + }, + }, + { + field: { + "metadata.pki_delegated_by_user": null, + }, + }, + ], + }, + enabled: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1f900f7178e80051e75d4fd04467cf49.asciidoc b/docs/doc_examples/1f900f7178e80051e75d4fd04467cf49.asciidoc new file mode 100644 index 000000000..79c533e29 --- /dev/null +++ b/docs/doc_examples/1f900f7178e80051e75d4fd04467cf49.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + pipeline: "pipelineB", + document: { + field: "value", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1fb2c77c0988bc6545040b20e3afa7e9.asciidoc b/docs/doc_examples/1fb2c77c0988bc6545040b20e3afa7e9.asciidoc new file mode 100644 index 000000000..aced9c9d6 --- /dev/null +++ b/docs/doc_examples/1fb2c77c0988bc6545040b20e3afa7e9.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.createApiKey({ + name: "john-api-key", + expiration: "1d", + role_descriptors: { + "sharepoint-online-role": { + index: [ + { + names: ["sharepoint-search-application"], + privileges: ["read"], + query: { + template: { + params: { + access_control: ["john@example.co", "Engineering Members"], + }, + source: + '\n {\n "bool": {\n "should": [\n {\n "bool": {\n "must_not": {\n "exists": {\n "field": "_allow_access_control"\n }\n }\n }\n },\n {\n "terms": {\n "_allow_access_control.enum": {{#toJson}}access_control{{/toJson}}\n }\n }\n ]\n }\n }\n ', + }, + }, + }, + ], + restriction: { + workflows: ["search_application_query"], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1fddbd602a6acf896a393cdb500a2831.asciidoc b/docs/doc_examples/1fddbd602a6acf896a393cdb500a2831.asciidoc new file mode 100644 index 000000000..79f5ecce5 --- /dev/null +++ b/docs/doc_examples/1fddbd602a6acf896a393cdb500a2831.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + by_date: { + date_histogram: { + field: "date", + calendar_interval: "month", + }, + aggs: { + avg_number_of_sales_per_year: { + rate: { + field: "price", + unit: "year", + mode: "value_count", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1fe2ed1d65c4774755de44c9b9d6ed67.asciidoc b/docs/doc_examples/1fe2ed1d65c4774755de44c9b9d6ed67.asciidoc new file mode 100644 index 000000000..674ca8a30 --- /dev/null +++ b/docs/doc_examples/1fe2ed1d65c4774755de44c9b9d6ed67.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.stats({ + metric: "ingest", + filter_path: "nodes.*.ingest", +}); +console.log(response); +---- diff --git a/docs/doc_examples/1ff12523efbd59c213c676937757c460.asciidoc b/docs/doc_examples/1ff12523efbd59c213c676937757c460.asciidoc new file mode 100644 index 000000000..3673a4528 --- /dev/null +++ b/docs/doc_examples/1ff12523efbd59c213c676937757c460.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.invalidateApiKey({ + ids: ["VuaCfGcBCdbkQm-e5aOx"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/1ff296e868635fd102239871a331331b.asciidoc b/docs/doc_examples/1ff296e868635fd102239871a331331b.asciidoc new file mode 100644 index 000000000..831f54389 --- /dev/null +++ b/docs/doc_examples/1ff296e868635fd102239871a331331b.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + type_count: { + cardinality: { + field: "type", + precision_threshold: 100, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/1ff9b263b7c3e83278bb6a776a51590a.asciidoc b/docs/doc_examples/1ff9b263b7c3e83278bb6a776a51590a.asciidoc new file mode 100644 index 000000000..d795b84eb --- /dev/null +++ b/docs/doc_examples/1ff9b263b7c3e83278bb6a776a51590a.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + prices: { + histogram: { + field: "price", + interval: 50, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/20005d8a6555b259b299d862cd218701.asciidoc b/docs/doc_examples/20005d8a6555b259b299d862cd218701.asciidoc new file mode 100644 index 000000000..c6a7579d9 --- /dev/null +++ b/docs/doc_examples/20005d8a6555b259b299d862cd218701.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match: { + message: { + query: "this is a test", + operator: "and", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2006f577a113bda40905cf7b405bf1cf.asciidoc b/docs/doc_examples/2006f577a113bda40905cf7b405bf1cf.asciidoc new file mode 100644 index 000000000..cb8115039 --- /dev/null +++ b/docs/doc_examples/2006f577a113bda40905cf7b405bf1cf.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "my-pipeline", + processors: [ + { + set: { + description: "If 'url.scheme' is 'http', set 'url.insecure' to true", + if: "ctx.url?.scheme =~ /^http[^s]/", + field: "url.insecure", + value: true, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/2009f2d1ba0780a799a0fdce889c9739.asciidoc b/docs/doc_examples/2009f2d1ba0780a799a0fdce889c9739.asciidoc new file mode 100644 index 000000000..2bf348f71 --- /dev/null +++ b/docs/doc_examples/2009f2d1ba0780a799a0fdce889c9739.asciidoc @@ -0,0 +1,55 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "passage_vectors", + refresh: "true", + operations: [ + { + index: { + _id: "1", + }, + }, + { + full_text: "first paragraph another paragraph", + creation_time: "2019-05-04", + paragraph: [ + { + vector: [0.45, 45], + text: "first paragraph", + paragraph_id: "1", + }, + { + vector: [0.8, 0.6], + text: "another paragraph", + paragraph_id: "2", + }, + ], + }, + { + index: { + _id: "2", + }, + }, + { + full_text: "number one paragraph number two paragraph", + creation_time: "2020-05-04", + paragraph: [ + { + vector: [1.2, 4.5], + text: "number one paragraph", + paragraph_id: "1", + }, + { + vector: [-1, 42], + text: "number two paragraph", + paragraph_id: "2", + }, + ], + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/200f6d4cc7b9c300b8962a119e03873f.asciidoc b/docs/doc_examples/200f6d4cc7b9c300b8962a119e03873f.asciidoc new file mode 100644 index 000000000..536cf6cba --- /dev/null +++ b/docs/doc_examples/200f6d4cc7b9c300b8962a119e03873f.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getDataStream({ + name: "my-data-stream*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/20162e1dac807a7604f58dad814d1bc5.asciidoc b/docs/doc_examples/20162e1dac807a7604f58dad814d1bc5.asciidoc new file mode 100644 index 000000000..3b4eb8d29 --- /dev/null +++ b/docs/doc_examples/20162e1dac807a7604f58dad814d1bc5.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + en: { + tokenizer: "standard", + filter: ["my_en_US_dict_stemmer"], + }, + }, + filter: { + my_en_US_dict_stemmer: { + type: "hunspell", + locale: "en_US", + dedup: false, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/20179a8889e949d6a8ee5fbf2ba35c96.asciidoc b/docs/doc_examples/20179a8889e949d6a8ee5fbf2ba35c96.asciidoc new file mode 100644 index 000000000..3a746ae88 --- /dev/null +++ b/docs/doc_examples/20179a8889e949d6a8ee5fbf2ba35c96.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "google-vertex-ai-embeddings", + knn: { + field: "content_embedding", + query_vector_builder: { + text_embedding: { + model_id: "google_vertex_ai_embeddings", + model_text: "Calculate fuel cost", + }, + }, + k: 10, + num_candidates: 100, + }, + _source: ["id", "content"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/203c3bb334384bdfb11ff1101ccfba25.asciidoc b/docs/doc_examples/203c3bb334384bdfb11ff1101ccfba25.asciidoc new file mode 100644 index 000000000..45ad3be18 --- /dev/null +++ b/docs/doc_examples/203c3bb334384bdfb11ff1101ccfba25.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "test", + suggest: { + text: "obel prize", + simple_phrase: { + phrase: { + field: "title.trigram", + size: 1, + smoothing: { + laplace: { + alpha: 0.7, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/20407c847adb8393ce41dc656384afc4.asciidoc b/docs/doc_examples/20407c847adb8393ce41dc656384afc4.asciidoc new file mode 100644 index 000000000..ce827a730 --- /dev/null +++ b/docs/doc_examples/20407c847adb8393ce41dc656384afc4.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "passage_vectors", + fields: ["creation_time", "full_text"], + _source: false, + knn: { + query_vector: [0.45, 45], + field: "paragraph.vector", + k: 2, + num_candidates: 2, + filter: { + bool: { + filter: [ + { + range: { + creation_time: { + gte: "2019-05-01", + lte: "2019-05-05", + }, + }, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2051ffe025550ab6645bfd525eaed3c4.asciidoc b/docs/doc_examples/2051ffe025550ab6645bfd525eaed3c4.asciidoc new file mode 100644 index 000000000..1b42fb168 --- /dev/null +++ b/docs/doc_examples/2051ffe025550ab6645bfd525eaed3c4.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_locations", + query: { + bool: { + must: { + match_all: {}, + }, + filter: { + geo_bounding_box: { + "pin.location": { + top_left: "POINT (-74.1 40.73)", + bottom_right: "POINT (-71.12 40.01)", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2063713516847eef5d1dbf4ca1e877b0.asciidoc b/docs/doc_examples/2063713516847eef5d1dbf4ca1e877b0.asciidoc new file mode 100644 index 000000000..c9405986c --- /dev/null +++ b/docs/doc_examples/2063713516847eef5d1dbf4ca1e877b0.asciidoc @@ -0,0 +1,93 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "museums", + mappings: { + properties: { + location: { + type: "geo_point", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "museums", + refresh: "true", + operations: [ + { + index: { + _id: 1, + }, + }, + { + location: "POINT (4.912350 52.374081)", + name: "NEMO Science Museum", + }, + { + index: { + _id: 2, + }, + }, + { + location: "POINT (4.901618 52.369219)", + name: "Museum Het Rembrandthuis", + }, + { + index: { + _id: 3, + }, + }, + { + location: "POINT (4.914722 52.371667)", + name: "Nederlands Scheepvaartmuseum", + }, + { + index: { + _id: 4, + }, + }, + { + location: "POINT (4.405200 51.222900)", + name: "Letterenhuis", + }, + { + index: { + _id: 5, + }, + }, + { + location: "POINT (2.336389 48.861111)", + name: "Musée du Louvre", + }, + { + index: { + _id: 6, + }, + }, + { + location: "POINT (2.327000 48.860000)", + name: "Musée d'Orsay", + }, + ], +}); +console.log(response1); + +const response2 = await client.search({ + index: "museums", + size: 0, + aggregations: { + "large-grid": { + geohex_grid: { + field: "location", + precision: 4, + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/206c723296be8ef8d58aef3ee01f5ba2.asciidoc b/docs/doc_examples/206c723296be8ef8d58aef3ee01f5ba2.asciidoc new file mode 100644 index 000000000..f33acb605 --- /dev/null +++ b/docs/doc_examples/206c723296be8ef8d58aef3ee01f5ba2.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + my_date_histo: { + date_histogram: { + field: "timestamp", + calendar_interval: "day", + }, + aggs: { + the_deriv: { + derivative: { + buckets_path: "_count", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/206d57bf0cb022c8229894e7753eca83.asciidoc b/docs/doc_examples/206d57bf0cb022c8229894e7753eca83.asciidoc new file mode 100644 index 000000000..f739baea2 --- /dev/null +++ b/docs/doc_examples/206d57bf0cb022c8229894e7753eca83.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "example", + query: { + bool: { + must: { + match_all: {}, + }, + filter: { + geo_shape: { + location: { + shape: { + type: "envelope", + coordinates: [ + [13, 53], + [14, 52], + ], + }, + relation: "within", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2081739da0c69de8af6f5bf9e94433e6.asciidoc b/docs/doc_examples/2081739da0c69de8af6f5bf9e94433e6.asciidoc new file mode 100644 index 000000000..2730d46b0 --- /dev/null +++ b/docs/doc_examples/2081739da0c69de8af6f5bf9e94433e6.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: + "MULTILINESTRING ((102.0 2.0, 103.0 2.0, 103.0 3.0, 102.0 3.0), (100.0 0.0, 101.0 0.0, 101.0 1.0, 100.0 1.0), (100.2 0.2, 100.8 0.2, 100.8 0.8, 100.2 0.8))", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/208c2b41bd1659aae8f02fa3e3b7378a.asciidoc b/docs/doc_examples/208c2b41bd1659aae8f02fa3e3b7378a.asciidoc new file mode 100644 index 000000000..e782bce3f --- /dev/null +++ b/docs/doc_examples/208c2b41bd1659aae8f02fa3e3b7378a.asciidoc @@ -0,0 +1,48 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + first_name: { + type: "text", + copy_to: "full_name", + }, + last_name: { + type: "text", + copy_to: "full_name", + }, + full_name: { + type: "text", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + first_name: "John", + last_name: "Smith", + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + query: { + match: { + full_name: { + query: "John Smith", + operator: "and", + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/209a9190082498f0b7daa26f8834846b.asciidoc b/docs/doc_examples/209a9190082498f0b7daa26f8834846b.asciidoc new file mode 100644 index 000000000..4b01230ea --- /dev/null +++ b/docs/doc_examples/209a9190082498f0b7daa26f8834846b.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-index-000001", + properties: { + title: { + type: "text", + norms: false, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/20bc71cc5bbe04184e27827f3777a406.asciidoc b/docs/doc_examples/20bc71cc5bbe04184e27827f3777a406.asciidoc new file mode 100644 index 000000000..d94a2c5aa --- /dev/null +++ b/docs/doc_examples/20bc71cc5bbe04184e27827f3777a406.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + fields: ["@timestamp", "day_of_week"], + _source: false, +}); +console.log(response); +---- diff --git a/docs/doc_examples/20c595907b4afbf26bd60e816a6ddf6a.asciidoc b/docs/doc_examples/20c595907b4afbf26bd60e816a6ddf6a.asciidoc new file mode 100644 index 000000000..0ffd26450 --- /dev/null +++ b/docs/doc_examples/20c595907b4afbf26bd60e816a6ddf6a.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.search({ + name: "my_search_application", + params: { + query_string: "kayaking", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/20e3b181114e00c943a27a9bbcf85f15.asciidoc b/docs/doc_examples/20e3b181114e00c943a27a9bbcf85f15.asciidoc new file mode 100644 index 000000000..970d578eb --- /dev/null +++ b/docs/doc_examples/20e3b181114e00c943a27a9bbcf85f15.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.getRecords({ + job_id: "low_request_rate", + sort: "record_score", + desc: true, + start: 1454944100000, +}); +console.log(response); +---- diff --git a/docs/doc_examples/20f62d0540bf6261549bd286416eae28.asciidoc b/docs/doc_examples/20f62d0540bf6261549bd286416eae28.asciidoc new file mode 100644 index 000000000..2f0f9c3a6 --- /dev/null +++ b/docs/doc_examples/20f62d0540bf6261549bd286416eae28.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.enrich.putPolicy({ + name: "my-policy", + match: { + indices: "users", + match_field: "email", + enrich_fields: ["first_name", "last_name", "city", "zip", "state"], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2105f2d1d81977054a93163a175793ce.asciidoc b/docs/doc_examples/2105f2d1d81977054a93163a175793ce.asciidoc new file mode 100644 index 000000000..8f1c83690 --- /dev/null +++ b/docs/doc_examples/2105f2d1d81977054a93163a175793ce.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.status(); +console.log(response); +---- diff --git a/docs/doc_examples/210cf5c76bff517f48e80fa1c2d63907.asciidoc b/docs/doc_examples/210cf5c76bff517f48e80fa1c2d63907.asciidoc deleted file mode 100644 index 54576e75d..000000000 --- a/docs/doc_examples/210cf5c76bff517f48e80fa1c2d63907.asciidoc +++ /dev/null @@ -1,11 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.getMapping({ - index: 'my_index' -}) -console.log(response) ----- - diff --git a/docs/doc_examples/2155c920d7d860f3ee7542f2211b4fec.asciidoc b/docs/doc_examples/2155c920d7d860f3ee7542f2211b4fec.asciidoc new file mode 100644 index 000000000..516c7c90c --- /dev/null +++ b/docs/doc_examples/2155c920d7d860f3ee7542f2211b4fec.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + text_expansion: { + "": { + model_id: "the model to produce the token weights", + model_text: "the query string", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/21565b72da426776e445b1a166f6e104.asciidoc b/docs/doc_examples/21565b72da426776e445b1a166f6e104.asciidoc new file mode 100644 index 000000000..04fba14ee --- /dev/null +++ b/docs/doc_examples/21565b72da426776e445b1a166f6e104.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + "my-join-field": { + type: "join", + relations: { + parent: "child", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/216848930c2d344fe0bed0daa70c35b9.asciidoc b/docs/doc_examples/216848930c2d344fe0bed0daa70c35b9.asciidoc index 841913ded..3e6ff2d49 100644 --- a/docs/doc_examples/216848930c2d344fe0bed0daa70c35b9.asciidoc +++ b/docs/doc_examples/216848930c2d344fe0bed0daa70c35b9.asciidoc @@ -4,9 +4,8 @@ [source, js] ---- const response = await client.tasks.list({ - detailed: 'true', - actions: '*/delete/byquery' -}) -console.log(response) + detailed: "true", + actions: "*/delete/byquery", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/216a6573ab4ab023e5dcac4eaa08c3c8.asciidoc b/docs/doc_examples/216a6573ab4ab023e5dcac4eaa08c3c8.asciidoc new file mode 100644 index 000000000..2b66cb86d --- /dev/null +++ b/docs/doc_examples/216a6573ab4ab023e5dcac4eaa08c3c8.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.verifyRepository({ + name: "my_unverified_backup", +}); +console.log(response); +---- diff --git a/docs/doc_examples/216e24f05cbb82c1718713fbab8623d2.asciidoc b/docs/doc_examples/216e24f05cbb82c1718713fbab8623d2.asciidoc new file mode 100644 index 000000000..dbaac1815 --- /dev/null +++ b/docs/doc_examples/216e24f05cbb82c1718713fbab8623d2.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "geoip", + description: "Add ip geolocation info", + processors: [ + { + geoip: { + field: "ip", + target_field: "geo", + database_file: "GeoLite2-Country.mmdb", + }, + }, + ], +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: "my_id", + pipeline: "geoip", + document: { + ip: "89.160.20.128", + }, +}); +console.log(response1); + +const response2 = await client.get({ + index: "my-index-000001", + id: "my_id", +}); +console.log(response2); +---- diff --git a/docs/doc_examples/21715c32c140feeab04b38ff6d6de111.asciidoc b/docs/doc_examples/21715c32c140feeab04b38ff6d6de111.asciidoc new file mode 100644 index 000000000..c37e742e2 --- /dev/null +++ b/docs/doc_examples/21715c32c140feeab04b38ff6d6de111.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getMapping({ + index: "my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2185c9dfc62a59313df1702ec1c3513e.asciidoc b/docs/doc_examples/2185c9dfc62a59313df1702ec1c3513e.asciidoc new file mode 100644 index 000000000..c268c702d --- /dev/null +++ b/docs/doc_examples/2185c9dfc62a59313df1702ec1c3513e.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "latency", + size: 0, + aggs: { + load_time_outlier: { + percentiles: { + field: "load_time", + percents: [95, 99, 99.9], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/218b9009f120e8ad33f710e019179562.asciidoc b/docs/doc_examples/218b9009f120e8ad33f710e019179562.asciidoc new file mode 100644 index 000000000..0ac6636b6 --- /dev/null +++ b/docs/doc_examples/218b9009f120e8ad33f710e019179562.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.getRepository({ + name: "my_repository", +}); +console.log(response); +---- diff --git a/docs/doc_examples/21a226d91d8edd209f6a821064e83918.asciidoc b/docs/doc_examples/21a226d91d8edd209f6a821064e83918.asciidoc new file mode 100644 index 000000000..44d76f5ca --- /dev/null +++ b/docs/doc_examples/21a226d91d8edd209f6a821064e83918.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + query: { + match: { + type: "t-shirt", + }, + }, + aggs: { + all_products: { + global: {}, + aggs: { + avg_price: { + avg: { + field: "price", + }, + }, + }, + }, + t_shirts: { + avg: { + field: "price", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/21bb03ca9123de3237c1c76934f9f172.asciidoc b/docs/doc_examples/21bb03ca9123de3237c1c76934f9f172.asciidoc new file mode 100644 index 000000000..b791b1658 --- /dev/null +++ b/docs/doc_examples/21bb03ca9123de3237c1c76934f9f172.asciidoc @@ -0,0 +1,40 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "logs", + id: 4, + refresh: "true", + document: { + body: "info: user Bob logged out", + }, +}); +console.log(response); + +const response1 = await client.search({ + index: "logs", + size: 0, + aggs: { + messages: { + filters: { + other_bucket_key: "other_messages", + filters: { + errors: { + match: { + body: "error", + }, + }, + warnings: { + match: { + body: "warning", + }, + }, + }, + }, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/21c1e6ee886140ce0cd67184dd19b981.asciidoc b/docs/doc_examples/21c1e6ee886140ce0cd67184dd19b981.asciidoc new file mode 100644 index 000000000..eb6345b43 --- /dev/null +++ b/docs/doc_examples/21c1e6ee886140ce0cd67184dd19b981.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.danglingIndices.listDanglingIndices(); +console.log(response); +---- diff --git a/docs/doc_examples/21cd01cb90d3ea1acd0ab22d7edd2c88.asciidoc b/docs/doc_examples/21cd01cb90d3ea1acd0ab22d7edd2c88.asciidoc new file mode 100644 index 000000000..295b4ed6c --- /dev/null +++ b/docs/doc_examples/21cd01cb90d3ea1acd0ab22d7edd2c88.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "text_embedding", + inference_id: "azure_ai_studio_embeddings", + inference_config: { + service: "azureaistudio", + service_settings: { + api_key: "", + target: "", + provider: "", + endpoint_type: "", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/21d0ab6e420bfe7a1639db6af5b2e9c0.asciidoc b/docs/doc_examples/21d0ab6e420bfe7a1639db6af5b2e9c0.asciidoc new file mode 100644 index 000000000..1e800bb8e --- /dev/null +++ b/docs/doc_examples/21d0ab6e420bfe7a1639db6af5b2e9c0.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "reviews", + filter_path: "aggregations", + size: 0, + runtime_mappings: { + "rating.out_of_ten": { + type: "long", + script: { + source: "emit(doc['rating'].value * params.scaleFactor)", + params: { + scaleFactor: 2, + }, + }, + }, + }, + aggs: { + review_average: { + avg: { + field: "rating.out_of_ten", + }, + }, + review_variability: { + median_absolute_deviation: { + field: "rating.out_of_ten", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/21d41e8cbd107fbdf0901f885834dafc.asciidoc b/docs/doc_examples/21d41e8cbd107fbdf0901f885834dafc.asciidoc new file mode 100644 index 000000000..01d52961a --- /dev/null +++ b/docs/doc_examples/21d41e8cbd107fbdf0901f885834dafc.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, + }, + }, + mappings: { + properties: { + card: { + type: "wildcard", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "idx", + id: 1, + document: { + card: ["king", "ace", "ace", "jack"], + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/21d5fe55ca32b10b118224ea1a8a2e04.asciidoc b/docs/doc_examples/21d5fe55ca32b10b118224ea1a8a2e04.asciidoc new file mode 100644 index 000000000..2540558a2 --- /dev/null +++ b/docs/doc_examples/21d5fe55ca32b10b118224ea1a8a2e04.asciidoc @@ -0,0 +1,77 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "correlate_latency", + size: 0, + filter_path: "aggregations", + aggs: { + buckets: { + terms: { + field: "version", + size: 2, + }, + aggs: { + latency_ranges: { + range: { + field: "latency", + ranges: [ + { + to: 0, + }, + { + from: 0, + to: 105, + }, + { + from: 105, + to: 225, + }, + { + from: 225, + to: 445, + }, + { + from: 445, + to: 665, + }, + { + from: 665, + to: 885, + }, + { + from: 885, + to: 1115, + }, + { + from: 1115, + to: 1335, + }, + { + from: 1335, + to: 1555, + }, + { + from: 1555, + to: 1775, + }, + { + from: 1775, + }, + ], + }, + }, + ks_test: { + bucket_count_ks_test: { + buckets_path: "latency_ranges>_count", + alternative: ["less", "greater", "two_sided"], + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/21e95d29bc37deb5689a654aa323b4ba.asciidoc b/docs/doc_examples/21e95d29bc37deb5689a654aa323b4ba.asciidoc new file mode 100644 index 000000000..48dedaf2a --- /dev/null +++ b/docs/doc_examples/21e95d29bc37deb5689a654aa323b4ba.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "admins", + roles: ["monitoring", "user"], + rules: { + field: { + groups: "cn=admins,dc=example,dc=com", + }, + }, + enabled: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/221e9b14567f950008459af77757750e.asciidoc b/docs/doc_examples/221e9b14567f950008459af77757750e.asciidoc new file mode 100644 index 000000000..9fe26fa18 --- /dev/null +++ b/docs/doc_examples/221e9b14567f950008459af77757750e.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.putWatch({ + id: "cluster_health_watch", + trigger: { + schedule: { + interval: "10s", + }, + }, + input: { + http: { + request: { + host: "localhost", + port: 9200, + path: "/_cluster/health", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2224143c45dfc83a2d10b98cd4f94bb5.asciidoc b/docs/doc_examples/2224143c45dfc83a2d10b98cd4f94bb5.asciidoc new file mode 100644 index 000000000..7fdf65e5f --- /dev/null +++ b/docs/doc_examples/2224143c45dfc83a2d10b98cd4f94bb5.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + query: { + bool: { + must_not: [ + { + nested: { + path: "comments", + query: { + term: { + "comments.author": "nik9000", + }, + }, + }, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/222e49c924ca8bac7b41bc952a39261c.asciidoc b/docs/doc_examples/222e49c924ca8bac7b41bc952a39261c.asciidoc new file mode 100644 index 000000000..b4bcd4bbb --- /dev/null +++ b/docs/doc_examples/222e49c924ca8bac7b41bc952a39261c.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + size: 3, + query: { + bool: { + should: [ + { + match: { + title: { + query: "mountain lake", + boost: 1, + }, + }, + }, + { + semantic: { + field: "title_semantic", + query: "mountain lake", + boost: 2, + }, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/22334f4b24bb8977d3e1bf2ffdc29d3f.asciidoc b/docs/doc_examples/22334f4b24bb8977d3e1bf2ffdc29d3f.asciidoc index 246f3de57..36c63a01a 100644 --- a/docs/doc_examples/22334f4b24bb8977d3e1bf2ffdc29d3f.asciidoc +++ b/docs/doc_examples/22334f4b24bb8977d3e1bf2ffdc29d3f.asciidoc @@ -4,61 +4,58 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - nested: { - path: 'parent', - query: { - bool: { - must: { - range: { - 'parent.age': { - gte: 21 - } - } + query: { + nested: { + path: "parent", + query: { + bool: { + must: { + range: { + "parent.age": { + gte: 21, + }, }, - filter: { - nested: { - path: 'parent.child', - query: { - match: { - 'parent.child.name': 'matt' - } - } - } - } - } - } - } + }, + filter: { + nested: { + path: "parent.child", + query: { + match: { + "parent.child.name": "matt", + }, + }, + }, + }, + }, + }, }, - sort: [ - { - 'parent.child.age': { - mode: 'min', - order: 'asc', + }, + sort: [ + { + "parent.child.age": { + mode: "min", + order: "asc", + nested: { + path: "parent", + filter: { + range: { + "parent.age": { + gte: 21, + }, + }, + }, nested: { - path: 'parent', + path: "parent.child", filter: { - range: { - 'parent.age': { - gte: 21 - } - } + match: { + "parent.child.name": "matt", + }, }, - nested: { - path: 'parent.child', - filter: { - match: { - 'parent.child.name': 'matt' - } - } - } - } - } - } - ] - } -}) -console.log(response) + }, + }, + }, + }, + ], +}); +console.log(response); ---- - diff --git a/docs/doc_examples/2238ac4170275f6cfc2af49c3f014cbc.asciidoc b/docs/doc_examples/2238ac4170275f6cfc2af49c3f014cbc.asciidoc new file mode 100644 index 000000000..4955d26be --- /dev/null +++ b/docs/doc_examples/2238ac4170275f6cfc2af49c3f014cbc.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "exams", + size: 0, + runtime_mappings: { + "grade.corrected": { + type: "double", + script: { + source: "emit(Math.min(100, doc['grade'].value * params.correction))", + params: { + correction: 1.2, + }, + }, + }, + }, + aggs: { + grades_stats: { + extended_stats: { + field: "grade.corrected", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/22619a4111f66e1b7231693b8f8d069a.asciidoc b/docs/doc_examples/22619a4111f66e1b7231693b8f8d069a.asciidoc new file mode 100644 index 000000000..99cef9cd2 --- /dev/null +++ b/docs/doc_examples/22619a4111f66e1b7231693b8f8d069a.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.queryWatches({ + size: 100, +}); +console.log(response); +---- diff --git a/docs/doc_examples/22882d4eb8b99f44c8e0d3a2c893fc4b.asciidoc b/docs/doc_examples/22882d4eb8b99f44c8e0d3a2c893fc4b.asciidoc new file mode 100644 index 000000000..9ec51573d --- /dev/null +++ b/docs/doc_examples/22882d4eb8b99f44c8e0d3a2c893fc4b.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + "my-small": { + type: "keyword", + ignore_above: 2, + }, + "my-large": { + type: "keyword", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + refresh: "true", + document: { + "my-small": ["ok", "bad"], + "my-large": "ok content", + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + fields: ["my-*"], + _source: false, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/229b83cbcd8efa1b0288a728a2abacb4.asciidoc b/docs/doc_examples/229b83cbcd8efa1b0288a728a2abacb4.asciidoc new file mode 100644 index 000000000..43d8b3d9a --- /dev/null +++ b/docs/doc_examples/229b83cbcd8efa1b0288a728a2abacb4.asciidoc @@ -0,0 +1,73 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + location: { + type: "point", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + text: "Point as an object using GeoJSON format", + location: { + type: "Point", + coordinates: [-71.34, 41.12], + }, + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "my-index-000001", + id: 2, + document: { + text: "Point as a WKT POINT primitive", + location: "POINT (-71.34 41.12)", + }, +}); +console.log(response2); + +const response3 = await client.index({ + index: "my-index-000001", + id: 3, + document: { + text: "Point as an object with 'x' and 'y' keys", + location: { + x: -71.34, + y: 41.12, + }, + }, +}); +console.log(response3); + +const response4 = await client.index({ + index: "my-index-000001", + id: 4, + document: { + text: "Point as an array", + location: [-71.34, 41.12], + }, +}); +console.log(response4); + +const response5 = await client.index({ + index: "my-index-000001", + id: 5, + document: { + text: "Point as a string", + location: "-71.34,41.12", + }, +}); +console.log(response5); +---- diff --git a/docs/doc_examples/22b176a184517cf1b5801f5eb4f17f97.asciidoc b/docs/doc_examples/22b176a184517cf1b5801f5eb4f17f97.asciidoc new file mode 100644 index 000000000..a0c450d19 --- /dev/null +++ b/docs/doc_examples/22b176a184517cf1b5801f5eb4f17f97.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.rollover({ + alias: "datastream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/22cb99d4e6ba3101a2d9f59764a90877.asciidoc b/docs/doc_examples/22cb99d4e6ba3101a2d9f59764a90877.asciidoc new file mode 100644 index 000000000..27b5e33c7 --- /dev/null +++ b/docs/doc_examples/22cb99d4e6ba3101a2d9f59764a90877.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: "POINT (-77.03653 38.897676)", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/22d8e92b4100f8e4f52260ef8d3aa2b2.asciidoc b/docs/doc_examples/22d8e92b4100f8e4f52260ef8d3aa2b2.asciidoc new file mode 100644 index 000000000..064052311 --- /dev/null +++ b/docs/doc_examples/22d8e92b4100f8e4f52260ef8d3aa2b2.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + name: { + type: "text", + }, + blob: { + type: "binary", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + name: "Some binary blob", + blob: "U29tZSBiaW5hcnkgYmxvYg==", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/22dd833336fa22c8a8f67bb754ffba9a.asciidoc b/docs/doc_examples/22dd833336fa22c8a8f67bb754ffba9a.asciidoc new file mode 100644 index 000000000..852e26c4a --- /dev/null +++ b/docs/doc_examples/22dd833336fa22c8a8f67bb754ffba9a.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "azure-openai-embeddings", + knn: { + field: "content_embedding", + query_vector_builder: { + text_embedding: { + model_id: "azure_openai_embeddings", + model_text: "Calculate fuel cost", + }, + }, + k: 10, + num_candidates: 100, + }, + _source: ["id", "content"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/22dde5fe7ac5d85d52115641a68b3c55.asciidoc b/docs/doc_examples/22dde5fe7ac5d85d52115641a68b3c55.asciidoc new file mode 100644 index 000000000..64ba4366e --- /dev/null +++ b/docs/doc_examples/22dde5fe7ac5d85d52115641a68b3c55.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "whitespace", + filter: [ + "lowercase", + { + type: "stop", + stopwords: ["a", "is", "this"], + }, + ], + text: "this is a test", +}); +console.log(response); +---- diff --git a/docs/doc_examples/22ef90a7fb057728d2115f0c6f551819.asciidoc b/docs/doc_examples/22ef90a7fb057728d2115f0c6f551819.asciidoc new file mode 100644 index 000000000..bb51df733 --- /dev/null +++ b/docs/doc_examples/22ef90a7fb057728d2115f0c6f551819.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + aggs: { + price_ranges: { + range: { + field: "price", + ranges: [ + { + to: 100, + }, + { + from: 100, + to: 200, + }, + { + from: 200, + }, + ], + }, + aggs: { + price_stats: { + stats: { + field: "price", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/23074748d6c978176df5b04265e88938.asciidoc b/docs/doc_examples/23074748d6c978176df5b04265e88938.asciidoc new file mode 100644 index 000000000..a7648d1a2 --- /dev/null +++ b/docs/doc_examples/23074748d6c978176df5b04265e88938.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getSettings({ + index: "my-index-000001", + name: "index.routing.allocation.include._tier_preference", + flat_settings: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2308c9948cbebd2092eec03b11281005.asciidoc b/docs/doc_examples/2308c9948cbebd2092eec03b11281005.asciidoc new file mode 100644 index 000000000..84f81083d --- /dev/null +++ b/docs/doc_examples/2308c9948cbebd2092eec03b11281005.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.createRepository({ + name: "my_fs_backup", + repository: { + type: "fs", + settings: { + location: "E:\\Mount\\Backups\\My_fs_backup_location", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2310d84ebf113f2a3ed14cc53172ae4a.asciidoc b/docs/doc_examples/2310d84ebf113f2a3ed14cc53172ae4a.asciidoc new file mode 100644 index 000000000..60e82f591 --- /dev/null +++ b/docs/doc_examples/2310d84ebf113f2a3ed14cc53172ae4a.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + query: { + text_expansion: { + "ml.tokens": { + model_id: ".elser_model_2", + model_text: "How is the weather in Jamaica?", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/231aa0bb39c35fe199d28fe0e4a62b2e.asciidoc b/docs/doc_examples/231aa0bb39c35fe199d28fe0e4a62b2e.asciidoc deleted file mode 100644 index ab3c92126..000000000 --- a/docs/doc_examples/231aa0bb39c35fe199d28fe0e4a62b2e.asciidoc +++ /dev/null @@ -1,18 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'bank', - body: { - query: { - match_phrase: { - address: 'mill lane' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/2342a56279106ea643026df657bf7f88.asciidoc b/docs/doc_examples/2342a56279106ea643026df657bf7f88.asciidoc new file mode 100644 index 000000000..2369e4df7 --- /dev/null +++ b/docs/doc_examples/2342a56279106ea643026df657bf7f88.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "index", + settings: { + index: { + similarity: { + my_similarity: { + type: "DFR", + basic_model: "g", + after_effect: "l", + normalization: "h2", + "normalization.h2.c": "3.0", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/234cec3ead32d7ed71afbe1edfea23df.asciidoc b/docs/doc_examples/234cec3ead32d7ed71afbe1edfea23df.asciidoc new file mode 100644 index 000000000..8997d01a6 --- /dev/null +++ b/docs/doc_examples/234cec3ead32d7ed71afbe1edfea23df.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + has_parent: { + parent_type: "parent", + score: true, + query: { + function_score: { + script_score: { + script: "_score * doc['view_count'].value", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/236f50d89a07b83119af72e367e685da.asciidoc b/docs/doc_examples/236f50d89a07b83119af72e367e685da.asciidoc new file mode 100644 index 000000000..b79fd83ff --- /dev/null +++ b/docs/doc_examples/236f50d89a07b83119af72e367e685da.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + hot: { + actions: { + rollover: { + max_primary_shard_size: "50gb", + max_age: "30d", + min_primary_shard_size: "1gb", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/239f615e0009c5cb1dc4e82ec4c0dab5.asciidoc b/docs/doc_examples/239f615e0009c5cb1dc4e82ec4c0dab5.asciidoc new file mode 100644 index 000000000..abeca2dfd --- /dev/null +++ b/docs/doc_examples/239f615e0009c5cb1dc4e82ec4c0dab5.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.putWatch({ + id: "cluster_health_watch", + trigger: { + schedule: { + interval: "10s", + }, + }, + input: { + http: { + request: { + host: "localhost", + port: 9200, + path: "/_cluster/health", + auth: { + basic: { + username: "elastic", + password: "x-pack-test-password", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/23ab0f1023b1b2cd5cdf2a8f9ccfd57b.asciidoc b/docs/doc_examples/23ab0f1023b1b2cd5cdf2a8f9ccfd57b.asciidoc deleted file mode 100644 index 439bf360b..000000000 --- a/docs/doc_examples/23ab0f1023b1b2cd5cdf2a8f9ccfd57b.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'test1', - body: { - mappings: { - properties: { - user: { - type: 'keyword' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/23b062c157235246d7c347b9047b2435.asciidoc b/docs/doc_examples/23b062c157235246d7c347b9047b2435.asciidoc new file mode 100644 index 000000000..93cb410f7 --- /dev/null +++ b/docs/doc_examples/23b062c157235246d7c347b9047b2435.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "mapping1", + roles: ["user"], + enabled: true, + rules: { + field: { + username: "*", + }, + }, + metadata: { + version: 1, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/23c4ae62f7035f2796e0ac3c7c4c20a9.asciidoc b/docs/doc_examples/23c4ae62f7035f2796e0ac3c7c4c20a9.asciidoc new file mode 100644 index 000000000..1709625f7 --- /dev/null +++ b/docs/doc_examples/23c4ae62f7035f2796e0ac3c7c4c20a9.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + warm: { + actions: { + migrate: {}, + allocate: { + number_of_replicas: 1, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2408020186af569a76a30eccadaed0d5.asciidoc b/docs/doc_examples/2408020186af569a76a30eccadaed0d5.asciidoc new file mode 100644 index 000000000..3b2bfd706 --- /dev/null +++ b/docs/doc_examples/2408020186af569a76a30eccadaed0d5.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.simulate({ + pipeline: { + processors: [ + { + script: { + description: "Extract 'tags' from 'env' field", + lang: "painless", + source: + "\n String[] envSplit = ctx['env'].splitOnToken(params['delimiter']);\n ArrayList tags = new ArrayList();\n tags.add(envSplit[params['position']].trim());\n ctx['tags'] = tags;\n ", + params: { + delimiter: "-", + position: 1, + }, + }, + }, + ], + }, + docs: [ + { + _source: { + env: "es01-prod", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/24275847128b68da6e14233aa1259fb9.asciidoc b/docs/doc_examples/24275847128b68da6e14233aa1259fb9.asciidoc new file mode 100644 index 000000000..b556c6936 --- /dev/null +++ b/docs/doc_examples/24275847128b68da6e14233aa1259fb9.asciidoc @@ -0,0 +1,43 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + match: { + message: "GET /search", + }, + }, + collapse: { + field: "user.id", + inner_hits: [ + { + name: "largest_responses", + size: 3, + sort: [ + { + "http.response.bytes": { + order: "desc", + }, + }, + ], + }, + { + name: "most_recent", + size: 3, + sort: [ + { + "@timestamp": { + order: "desc", + }, + }, + ], + }, + ], + }, + sort: ["http.response.bytes"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/242a26ced0e5706e48dcda19a4003094.asciidoc b/docs/doc_examples/242a26ced0e5706e48dcda19a4003094.asciidoc new file mode 100644 index 000000000..4542b8481 --- /dev/null +++ b/docs/doc_examples/242a26ced0e5706e48dcda19a4003094.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + source: { + remote: { + host: "http://otherhost:9200/", + username: "user", + password: "pass", + }, + index: "my-index-000001", + query: { + match: { + test: "data", + }, + }, + }, + dest: { + index: "my-new-index-000001", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/246763219ec06172f7aa57bba28d344a.asciidoc b/docs/doc_examples/246763219ec06172f7aa57bba28d344a.asciidoc new file mode 100644 index 000000000..deabe9511 --- /dev/null +++ b/docs/doc_examples/246763219ec06172f7aa57bba28d344a.asciidoc @@ -0,0 +1,67 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-rank-vectors-bit", + mappings: { + properties: { + my_vector: { + type: "rank_vectors", + element_type: "bit", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "my-rank-vectors-bit", + refresh: "true", + operations: [ + { + index: { + _id: "1", + }, + }, + { + my_vector: [127, -127, 0, 1, 42], + }, + { + index: { + _id: "2", + }, + }, + { + my_vector: "8100012a7f", + }, + ], +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-rank-vectors-bit", + query: { + script_score: { + query: { + match_all: {}, + }, + script: { + source: "maxSimDotProduct(params.query_vector, 'my_vector')", + params: { + query_vector: [ + [ + 0.35, 0.77, 0.95, 0.15, 0.11, 0.08, 0.58, 0.06, 0.44, 0.52, 0.21, + 0.62, 0.65, 0.16, 0.64, 0.39, 0.93, 0.06, 0.93, 0.31, 0.92, 0, + 0.66, 0.86, 0.92, 0.03, 0.81, 0.31, 0.2, 0.92, 0.95, 0.64, 0.19, + 0.26, 0.77, 0.64, 0.78, 0.32, 0.97, 0.84, + ], + ], + }, + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/2468ab381257d759d8a88af1141f6f9c.asciidoc b/docs/doc_examples/2468ab381257d759d8a88af1141f6f9c.asciidoc deleted file mode 100644 index 61aef030b..000000000 --- a/docs/doc_examples/2468ab381257d759d8a88af1141f6f9c.asciidoc +++ /dev/null @@ -1,12 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.existsSource({ - index: 'twitter', - id: '1' -}) -console.log(response) ----- - diff --git a/docs/doc_examples/2493c25e1ef944bc4de0f726470bcdec.asciidoc b/docs/doc_examples/2493c25e1ef944bc4de0f726470bcdec.asciidoc new file mode 100644 index 000000000..3496eecab --- /dev/null +++ b/docs/doc_examples/2493c25e1ef944bc4de0f726470bcdec.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.asyncSearch.submit({ + index: "kibana_sample_data_ecommerce", + size: 0, + aggs: { + my_agg: { + frequent_item_sets: { + minimum_set_size: 3, + fields: [ + { + field: "category.keyword", + }, + { + field: "geoip.city_name", + exclude: "other", + }, + ], + size: 3, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/249bf48252c8cea47ef872541c8a884c.asciidoc b/docs/doc_examples/249bf48252c8cea47ef872541c8a884c.asciidoc new file mode 100644 index 000000000..53430547f --- /dev/null +++ b/docs/doc_examples/249bf48252c8cea47ef872541c8a884c.asciidoc @@ -0,0 +1,44 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.grantApiKey({ + grant_type: "password", + username: "test_admin", + password: "x-pack-test-password", + api_key: { + name: "my-api-key", + expiration: "1d", + role_descriptors: { + "role-a": { + cluster: ["all"], + indices: [ + { + names: ["index-a*"], + privileges: ["read"], + }, + ], + }, + "role-b": { + cluster: ["all"], + indices: [ + { + names: ["index-b*"], + privileges: ["all"], + }, + ], + }, + }, + metadata: { + application: "my-application", + environment: { + level: 1, + trusted: true, + tags: ["dev", "staging"], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/24a037008e0fc2550ecb6a5d36c04a93.asciidoc b/docs/doc_examples/24a037008e0fc2550ecb6a5d36c04a93.asciidoc new file mode 100644 index 000000000..6972b07c9 --- /dev/null +++ b/docs/doc_examples/24a037008e0fc2550ecb6a5d36c04a93.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + runtime_mappings: { + "date.day_of_week": { + type: "keyword", + script: + "emit(doc['date'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ENGLISH))", + }, + }, + aggs: { + day_of_week: { + terms: { + field: "date.day_of_week", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/24ad3c234f69f55a3fbe2d488e70178a.asciidoc b/docs/doc_examples/24ad3c234f69f55a3fbe2d488e70178a.asciidoc new file mode 100644 index 000000000..a18387f52 --- /dev/null +++ b/docs/doc_examples/24ad3c234f69f55a3fbe2d488e70178a.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.evaluateDataFrame({ + index: "student_performance_mathematics_reg", + query: { + term: { + "ml.is_training": { + value: true, + }, + }, + }, + evaluation: { + regression: { + actual_field: "G3", + predicted_field: "ml.G3_prediction", + metrics: { + r_squared: {}, + mse: {}, + msle: {}, + huber: {}, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/24aee6033bf77a68ced74e3fd9d34283.asciidoc b/docs/doc_examples/24aee6033bf77a68ced74e3fd9d34283.asciidoc new file mode 100644 index 000000000..d00bf24fb --- /dev/null +++ b/docs/doc_examples/24aee6033bf77a68ced74e3fd9d34283.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getTemplate({ + name: "template_1,template_2", +}); +console.log(response); +---- diff --git a/docs/doc_examples/24bdccb07bba7e7e6ff45d3d4cd83064.asciidoc b/docs/doc_examples/24bdccb07bba7e7e6ff45d3d4cd83064.asciidoc new file mode 100644 index 000000000..2dfb3e83e --- /dev/null +++ b/docs/doc_examples/24bdccb07bba7e7e6ff45d3d4cd83064.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.updateByQuery({ + index: "my-data-stream", + pipeline: "my-pipeline", +}); +console.log(response); + +const response1 = await client.reindex({ + source: { + index: "my-data-stream", + }, + dest: { + index: "my-new-data-stream", + op_type: "create", + pipeline: "my-pipeline", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/24d66b2ebdf662d8b03e17214e65c825.asciidoc b/docs/doc_examples/24d66b2ebdf662d8b03e17214e65c825.asciidoc new file mode 100644 index 000000000..a20bfd230 --- /dev/null +++ b/docs/doc_examples/24d66b2ebdf662d8b03e17214e65c825.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "xpack.profiling.templates.enabled": false, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/24d806d1803158dacd4dda73c4204d3e.asciidoc b/docs/doc_examples/24d806d1803158dacd4dda73c4204d3e.asciidoc new file mode 100644 index 000000000..741ebc94c --- /dev/null +++ b/docs/doc_examples/24d806d1803158dacd4dda73c4204d3e.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "GET", + path: "/_query_rules/my-ruleset/_rule/my-rule1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/24f4dfdf9922d5aa79151675b7767742.asciidoc b/docs/doc_examples/24f4dfdf9922d5aa79151675b7767742.asciidoc new file mode 100644 index 000000000..a340d8afe --- /dev/null +++ b/docs/doc_examples/24f4dfdf9922d5aa79151675b7767742.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + scroll: "1m", + size: 100, + query: { + match: { + message: "foo", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/251ea12c1248385ab409906ac64d9ee9.asciidoc b/docs/doc_examples/251ea12c1248385ab409906ac64d9ee9.asciidoc deleted file mode 100644 index 839a8e655..000000000 --- a/docs/doc_examples/251ea12c1248385ab409906ac64d9ee9.asciidoc +++ /dev/null @@ -1,28 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'bank', - body: { - query: { - bool: { - must: { - match_all: {} - }, - filter: { - range: { - balance: { - gte: 20000, - lte: 30000 - } - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/253140cb1e270e5ee23e15dbaeaaa0ea.asciidoc b/docs/doc_examples/253140cb1e270e5ee23e15dbaeaaa0ea.asciidoc new file mode 100644 index 000000000..651e42711 --- /dev/null +++ b/docs/doc_examples/253140cb1e270e5ee23e15dbaeaaa0ea.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.dataStreamsStats({ + human: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/25576b6773322f0929d4c635a940dba0.asciidoc b/docs/doc_examples/25576b6773322f0929d4c635a940dba0.asciidoc new file mode 100644 index 000000000..5752db18f --- /dev/null +++ b/docs/doc_examples/25576b6773322f0929d4c635a940dba0.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + query_string: { + fields: ["title", "content"], + query: "this OR that OR thus", + type: "cross_fields", + minimum_should_match: 2, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/256eba7a77c8890a43afeda8ce8a3225.asciidoc b/docs/doc_examples/256eba7a77c8890a43afeda8ce8a3225.asciidoc new file mode 100644 index 000000000..4a308455d --- /dev/null +++ b/docs/doc_examples/256eba7a77c8890a43afeda8ce8a3225.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "my-text-embeddings-pipeline", + description: "Text embedding pipeline", + processors: [ + { + inference: { + model_id: "sentence-transformers__msmarco-minilm-l-12-v3", + target_field: "my_embeddings", + field_map: { + my_text_field: "text_field", + }, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/25737fd456fd317cc4cc2db76b6cf28e.asciidoc b/docs/doc_examples/25737fd456fd317cc4cc2db76b6cf28e.asciidoc new file mode 100644 index 000000000..f621c76df --- /dev/null +++ b/docs/doc_examples/25737fd456fd317cc4cc2db76b6cf28e.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "test-000001", + aliases: { + "test-alias": { + is_write_index: true, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2592e5361f7ea3b3dd1840f63d760dae.asciidoc b/docs/doc_examples/2592e5361f7ea3b3dd1840f63d760dae.asciidoc new file mode 100644 index 000000000..2803a9d85 --- /dev/null +++ b/docs/doc_examples/2592e5361f7ea3b3dd1840f63d760dae.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + more_like_this: { + fields: ["name.first", "name.last"], + like: [ + { + _index: "marvel", + doc: { + name: { + first: "Ben", + last: "Grimm", + }, + _doc: "You got no idea what I'd... what I'd give to be invisible.", + }, + }, + { + _index: "marvel", + _id: "2", + }, + ], + min_term_freq: 1, + max_query_terms: 12, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/25981b7b3d55b87e1484586d57b695b1.asciidoc b/docs/doc_examples/25981b7b3d55b87e1484586d57b695b1.asciidoc new file mode 100644 index 000000000..53895bd8a --- /dev/null +++ b/docs/doc_examples/25981b7b3d55b87e1484586d57b695b1.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "products", + id: 1567, + document: { + product: "r2d2", + details: "A resourceful astromech droid", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/25a0dad6547d432f5a3d394528f1c138.asciidoc b/docs/doc_examples/25a0dad6547d432f5a3d394528f1c138.asciidoc new file mode 100644 index 000000000..2c1e36514 --- /dev/null +++ b/docs/doc_examples/25a0dad6547d432f5a3d394528f1c138.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.get({ + index: "my-index-000001", + id: 2, + routing: "user1", + stored_fields: "tags,counter", +}); +console.log(response); +---- diff --git a/docs/doc_examples/25ae1a698f867ba5139605cc952436c0.asciidoc b/docs/doc_examples/25ae1a698f867ba5139605cc952436c0.asciidoc new file mode 100644 index 000000000..69d44a527 --- /dev/null +++ b/docs/doc_examples/25ae1a698f867ba5139605cc952436c0.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "place", + pretty: "true", + suggest: { + place_suggestion: { + prefix: "tim", + completion: { + field: "suggest", + size: 10, + contexts: { + place_type: [ + { + context: "cafe", + }, + { + context: "restaurants", + boost: 2, + }, + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/25c0e66a433a0cd596e0641b752ff6d7.asciidoc b/docs/doc_examples/25c0e66a433a0cd596e0641b752ff6d7.asciidoc new file mode 100644 index 000000000..455f478b2 --- /dev/null +++ b/docs/doc_examples/25c0e66a433a0cd596e0641b752ff6d7.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.shards({ + h: "index,shard,prirep,state,unassigned.reason", +}); +console.log(response); +---- diff --git a/docs/doc_examples/25cb9e1da00dfd971065ce182467434d.asciidoc b/docs/doc_examples/25cb9e1da00dfd971065ce182467434d.asciidoc new file mode 100644 index 000000000..d3c98e64c --- /dev/null +++ b/docs/doc_examples/25cb9e1da00dfd971065ce182467434d.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.deleteVotingConfigExclusions(); +console.log(response); +---- diff --git a/docs/doc_examples/25d40d3049e57e2bb70c2c5b88bd7b87.asciidoc b/docs/doc_examples/25d40d3049e57e2bb70c2c5b88bd7b87.asciidoc new file mode 100644 index 000000000..03cd13b25 --- /dev/null +++ b/docs/doc_examples/25d40d3049e57e2bb70c2c5b88bd7b87.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "_all", + settings: { + settings: { + "index.unassigned.node_left.delayed_timeout": "0", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/25ecfe423548ac1d7cc86de4a18c48c6.asciidoc b/docs/doc_examples/25ecfe423548ac1d7cc86de4a18c48c6.asciidoc new file mode 100644 index 000000000..ed4157ca2 --- /dev/null +++ b/docs/doc_examples/25ecfe423548ac1d7cc86de4a18c48c6.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "standard", + char_filter: ["my_char_filter"], + }, + }, + char_filter: { + my_char_filter: { + type: "pattern_replace", + pattern: "(\\d+)-(?=\\d)", + replacement: "$1_", + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.analyze({ + index: "my-index-000001", + analyzer: "my_analyzer", + text: "My credit card is 123-456-789", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/25ed47fcb890fcf8d8518ae067362d18.asciidoc b/docs/doc_examples/25ed47fcb890fcf8d8518ae067362d18.asciidoc new file mode 100644 index 000000000..cb23e8e5b --- /dev/null +++ b/docs/doc_examples/25ed47fcb890fcf8d8518ae067362d18.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "reviews", + size: 0, + aggs: { + review_average: { + avg: { + field: "rating", + }, + }, + review_variability: { + median_absolute_deviation: { + field: "rating", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/261480571394632db40e88fbb6c59c2f.asciidoc b/docs/doc_examples/261480571394632db40e88fbb6c59c2f.asciidoc new file mode 100644 index 000000000..003b37404 --- /dev/null +++ b/docs/doc_examples/261480571394632db40e88fbb6c59c2f.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.deleteRoleMapping({ + name: "mapping1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/26168987f799cdc4ee4151c85ba7afc5.asciidoc b/docs/doc_examples/26168987f799cdc4ee4151c85ba7afc5.asciidoc new file mode 100644 index 000000000..c6e55f91d --- /dev/null +++ b/docs/doc_examples/26168987f799cdc4ee4151c85ba7afc5.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + filter_path: "aggregations", + aggs: { + "my-num-field-stats": { + stats: { + field: "my-num-field", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/262196e4323dfc1f8e6daf77d7ba3b6a.asciidoc b/docs/doc_examples/262196e4323dfc1f8e6daf77d7ba3b6a.asciidoc new file mode 100644 index 000000000..2dab9854b --- /dev/null +++ b/docs/doc_examples/262196e4323dfc1f8e6daf77d7ba3b6a.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.createRepository({ + name: "my_gcs_repository", + repository: { + type: "gcs", + settings: { + bucket: "my_other_bucket", + base_path: "dev", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2623eb122cc0299b42fc9eca6e7f5e56.asciidoc b/docs/doc_examples/2623eb122cc0299b42fc9eca6e7f5e56.asciidoc new file mode 100644 index 000000000..240f9faa0 --- /dev/null +++ b/docs/doc_examples/2623eb122cc0299b42fc9eca6e7f5e56.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getBuiltinPrivileges(); +console.log(response); +---- diff --git a/docs/doc_examples/262a778d754add491fbc9c721ac25bf0.asciidoc b/docs/doc_examples/262a778d754add491fbc9c721ac25bf0.asciidoc new file mode 100644 index 000000000..225854c1a --- /dev/null +++ b/docs/doc_examples/262a778d754add491fbc9c721ac25bf0.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + analyzer: "whitespace", + text: "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone.", +}); +console.log(response); +---- diff --git a/docs/doc_examples/26419320085434680142567d5fda9c35.asciidoc b/docs/doc_examples/26419320085434680142567d5fda9c35.asciidoc new file mode 100644 index 000000000..533c239b2 --- /dev/null +++ b/docs/doc_examples/26419320085434680142567d5fda9c35.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "network-traffic", + size: 0, + aggs: { + "ipv4-subnets": { + ip_prefix: { + field: "ipv4", + prefix_length: 24, + min_doc_count: 3, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2643b8c512cb3f3449259cdf498c6ab5.asciidoc b/docs/doc_examples/2643b8c512cb3f3449259cdf498c6ab5.asciidoc new file mode 100644 index 000000000..b01e6d6b0 --- /dev/null +++ b/docs/doc_examples/2643b8c512cb3f3449259cdf498c6ab5.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_buckets: { + composite: { + sources: [ + { + date: { + date_histogram: { + field: "timestamp", + calendar_interval: "1d", + }, + }, + }, + { + product: { + terms: { + field: "product", + }, + }, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2646710ece0c4c843aebeacd370d0396.asciidoc b/docs/doc_examples/2646710ece0c4c843aebeacd370d0396.asciidoc new file mode 100644 index 000000000..0c73b9820 --- /dev/null +++ b/docs/doc_examples/2646710ece0c4c843aebeacd370d0396.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-byte-quantized-index", + mappings: { + properties: { + my_vector: { + type: "dense_vector", + dims: 3, + index: true, + index_options: { + type: "int8_hnsw", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/268151ed1f0e12586e66e614b61d7981.asciidoc b/docs/doc_examples/268151ed1f0e12586e66e614b61d7981.asciidoc new file mode 100644 index 000000000..e80c1d982 --- /dev/null +++ b/docs/doc_examples/268151ed1f0e12586e66e614b61d7981.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + bool: { + must: { + match_all: {}, + }, + filter: { + geo_polygon: { + "person.location": { + points: ["drn5x1g8cu2y", "30, -80", "20, -90"], + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/26abfc49c238c2b5d259983ac38dbcee.asciidoc b/docs/doc_examples/26abfc49c238c2b5d259983ac38dbcee.asciidoc new file mode 100644 index 000000000..d6c0ddfef --- /dev/null +++ b/docs/doc_examples/26abfc49c238c2b5d259983ac38dbcee.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "index", + query: { + simple_query_string: { + fields: ["body"], + quote_field_suffix: ".exact", + query: '"ski"', + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/26bd8c027c82cd72c007c10fa66dc97f.asciidoc b/docs/doc_examples/26bd8c027c82cd72c007c10fa66dc97f.asciidoc new file mode 100644 index 000000000..79dc2c50d --- /dev/null +++ b/docs/doc_examples/26bd8c027c82cd72c007c10fa66dc97f.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.restore({ + repository: "my_repository", + snapshot: "my_snapshot_2099.05.06", + indices: "*", + include_global_state: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/26d3ab748a855eb383e992eb1ff79662.asciidoc b/docs/doc_examples/26d3ab748a855eb383e992eb1ff79662.asciidoc new file mode 100644 index 000000000..151eb6f26 --- /dev/null +++ b/docs/doc_examples/26d3ab748a855eb383e992eb1ff79662.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.delete({ + id: "FkpMRkJGS1gzVDRlM3g4ZzMyRGlLbkEaTXlJZHdNT09TU2VTZVBoNDM3cFZMUToxMDM=", +}); +console.log(response); +---- diff --git a/docs/doc_examples/26f237f9bf14e8b972cc33ff6aebefa2.asciidoc b/docs/doc_examples/26f237f9bf14e8b972cc33ff6aebefa2.asciidoc new file mode 100644 index 000000000..5982da74d --- /dev/null +++ b/docs/doc_examples/26f237f9bf14e8b972cc33ff6aebefa2.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "whitespace", + filter: ["stemmer"], + text: "fox running and jumping", +}); +console.log(response); +---- diff --git a/docs/doc_examples/270549e6b062228312c4e7a54a2c2209.asciidoc b/docs/doc_examples/270549e6b062228312c4e7a54a2c2209.asciidoc new file mode 100644 index 000000000..5aec6e804 --- /dev/null +++ b/docs/doc_examples/270549e6b062228312c4e7a54a2c2209.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.hotThreads(); +console.log(response); +---- diff --git a/docs/doc_examples/2716453454dbf9c6dde2ea6850a62214.asciidoc b/docs/doc_examples/2716453454dbf9c6dde2ea6850a62214.asciidoc new file mode 100644 index 000000000..925eec77b --- /dev/null +++ b/docs/doc_examples/2716453454dbf9c6dde2ea6850a62214.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "trips", + mappings: { + properties: { + distance: { + type: "long", + }, + route_length_miles: { + type: "alias", + path: "distance", + }, + transit_mode: { + type: "keyword", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.search({ + query: { + range: { + route_length_miles: { + gte: 39, + }, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/271fe0b452b62189505ce4a1d6f8bde1.asciidoc b/docs/doc_examples/271fe0b452b62189505ce4a1d6f8bde1.asciidoc new file mode 100644 index 000000000..449a9a930 --- /dev/null +++ b/docs/doc_examples/271fe0b452b62189505ce4a1d6f8bde1.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "latency", + size: 0, + aggs: { + load_time_outlier: { + percentiles: { + field: "load_time", + keyed: false, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2720e613d520ce352b62e990c2d283f7.asciidoc b/docs/doc_examples/2720e613d520ce352b62e990c2d283f7.asciidoc new file mode 100644 index 000000000..0f57c9f94 --- /dev/null +++ b/docs/doc_examples/2720e613d520ce352b62e990c2d283f7.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.removePolicy({ + index: "my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/272e27bf1fcc4fe5dbd4092679dd0342.asciidoc b/docs/doc_examples/272e27bf1fcc4fe5dbd4092679dd0342.asciidoc new file mode 100644 index 000000000..5a65c9753 --- /dev/null +++ b/docs/doc_examples/272e27bf1fcc4fe5dbd4092679dd0342.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.addBlock({ + index: ".ml-anomalies-custom-example", + block: "write", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2731a8577ad734a732d784c5dcb1225d.asciidoc b/docs/doc_examples/2731a8577ad734a732d784c5dcb1225d.asciidoc new file mode 100644 index 000000000..db47f4d61 --- /dev/null +++ b/docs/doc_examples/2731a8577ad734a732d784c5dcb1225d.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "norwegian_example", + settings: { + analysis: { + filter: { + norwegian_stop: { + type: "stop", + stopwords: "_norwegian_", + }, + norwegian_keywords: { + type: "keyword_marker", + keywords: ["eksempel"], + }, + norwegian_stemmer: { + type: "stemmer", + language: "norwegian", + }, + }, + analyzer: { + rebuilt_norwegian: { + tokenizer: "standard", + filter: [ + "lowercase", + "norwegian_stop", + "norwegian_keywords", + "norwegian_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/27384266370152add76471dd0332a2f1.asciidoc b/docs/doc_examples/27384266370152add76471dd0332a2f1.asciidoc new file mode 100644 index 000000000..4f87cfbc8 --- /dev/null +++ b/docs/doc_examples/27384266370152add76471dd0332a2f1.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.updateTransform({ + transform_id: "simple-kibana-ecomm-pivot", + source: { + index: "kibana_sample_data_ecommerce", + query: { + term: { + "geoip.continent_name": { + value: "Asia", + }, + }, + }, + }, + description: "Maximum priced ecommerce data by customer_id in Asia", + dest: { + index: "kibana_sample_data_ecommerce_transform_v2", + pipeline: "add_timestamp_pipeline", + }, + frequency: "15m", + sync: { + time: { + field: "order_date", + delay: "120s", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2740b69e7246ac6d1ad249382f21d534.asciidoc b/docs/doc_examples/2740b69e7246ac6d1ad249382f21d534.asciidoc new file mode 100644 index 000000000..4c09df81b --- /dev/null +++ b/docs/doc_examples/2740b69e7246ac6d1ad249382f21d534.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index", + mappings: { + properties: { + "my-agg-metric-field": { + type: "aggregate_metric_double", + metrics: ["min", "max", "sum", "value_count"], + default_metric: "max", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/274feaaa727e0ddf61b3c0f093182839.asciidoc b/docs/doc_examples/274feaaa727e0ddf61b3c0f093182839.asciidoc new file mode 100644 index 000000000..322ec637b --- /dev/null +++ b/docs/doc_examples/274feaaa727e0ddf61b3c0f093182839.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + runtime_mappings: { + duration: { + type: "long", + script: { + source: + "\n emit(doc['measures.end'].value - doc['measures.start'].value);\n ", + }, + }, + }, + aggs: { + duration_stats: { + stats: { + field: "duration", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/275ec358d5d1e4b9ff06cb4ae7e47650.asciidoc b/docs/doc_examples/275ec358d5d1e4b9ff06cb4ae7e47650.asciidoc new file mode 100644 index 000000000..f6b5783c4 --- /dev/null +++ b/docs/doc_examples/275ec358d5d1e4b9ff06cb4ae7e47650.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getIndexTemplate({ + name: "temp*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/27600d6a78623b69689d4218618e4278.asciidoc b/docs/doc_examples/27600d6a78623b69689d4218618e4278.asciidoc new file mode 100644 index 000000000..7565a7fce --- /dev/null +++ b/docs/doc_examples/27600d6a78623b69689d4218618e4278.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_index", + query: { + term: { + my_counter: 18446744073709552000, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/276e5b71ff5c6879a9b819076ad82301.asciidoc b/docs/doc_examples/276e5b71ff5c6879a9b819076ad82301.asciidoc new file mode 100644 index 000000000..40494f10d --- /dev/null +++ b/docs/doc_examples/276e5b71ff5c6879a9b819076ad82301.asciidoc @@ -0,0 +1,55 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + my_dense_vector: { + type: "dense_vector", + index: false, + dims: 3, + }, + my_byte_dense_vector: { + type: "dense_vector", + index: false, + dims: 3, + element_type: "byte", + }, + status: { + type: "keyword", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + my_dense_vector: [0.5, 10, 6], + my_byte_dense_vector: [0, 10, 6], + status: "published", + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "my-index-000001", + id: 2, + document: { + my_dense_vector: [-0.5, 10, 10], + my_byte_dense_vector: [0, 10, 10], + status: "published", + }, +}); +console.log(response2); + +const response3 = await client.indices.refresh({ + index: "my-index-000001", +}); +console.log(response3); +---- diff --git a/docs/doc_examples/277fefe2b623af61f8274f73efc97aed.asciidoc b/docs/doc_examples/277fefe2b623af61f8274f73efc97aed.asciidoc new file mode 100644 index 000000000..93632b514 --- /dev/null +++ b/docs/doc_examples/277fefe2b623af61f8274f73efc97aed.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.scriptsPainlessExecute({ + script: { + source: + '\n String response=dissect(\'%{clientip} %{ident} %{auth} [%{@timestamp}] "%{verb} %{request} HTTP/%{httpversion}" %{response} %{size}\').extract(doc["message"].value)?.response;\n if (response != null) emit(Integer.parseInt(response)); \n ', + }, + context: "long_field", + context_setup: { + index: "my-index", + document: { + message: + '247.37.0.0 - - [30/Apr/2020:14:31:22 -0500] "GET /images/hm_nbg.jpg HTTP/1.0" 304 0', + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/278d5bfa1a01f91d5c84679ef1bca390.asciidoc b/docs/doc_examples/278d5bfa1a01f91d5c84679ef1bca390.asciidoc new file mode 100644 index 000000000..a8cdb26df --- /dev/null +++ b/docs/doc_examples/278d5bfa1a01f91d5c84679ef1bca390.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.get({ + index: "products", + id: 1567, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2793fa53b7d269852aa74f6bf57e34dc.asciidoc b/docs/doc_examples/2793fa53b7d269852aa74f6bf57e34dc.asciidoc new file mode 100644 index 000000000..bce452553 --- /dev/null +++ b/docs/doc_examples/2793fa53b7d269852aa74f6bf57e34dc.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "ngram_custom_example", + settings: { + index: { + max_ngram_diff: 2, + }, + analysis: { + analyzer: { + default: { + tokenizer: "whitespace", + filter: ["3_5_grams"], + }, + }, + filter: { + "3_5_grams": { + type: "ngram", + min_gram: 3, + max_gram: 5, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/279e2b29261971999923fdc658bba8ff.asciidoc b/docs/doc_examples/279e2b29261971999923fdc658bba8ff.asciidoc new file mode 100644 index 000000000..830bae7cd --- /dev/null +++ b/docs/doc_examples/279e2b29261971999923fdc658bba8ff.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + _source: { + includes: ["obj1.*", "obj2.*"], + excludes: ["*.description"], + }, + query: { + term: { + "user.id": "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/27f9f604e7a48799fa30529cbc0ff619.asciidoc b/docs/doc_examples/27f9f604e7a48799fa30529cbc0ff619.asciidoc new file mode 100644 index 000000000..74236de4d --- /dev/null +++ b/docs/doc_examples/27f9f604e7a48799fa30529cbc0ff619.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "delimited_payload_example", + settings: { + analysis: { + analyzer: { + whitespace_plus_delimited: { + tokenizer: "whitespace", + filter: ["plus_delimited"], + }, + }, + filter: { + plus_delimited: { + type: "delimited_payload", + delimiter: "+", + encoding: "int", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/282e9e845b606f29a5bba174ae4c4c4d.asciidoc b/docs/doc_examples/282e9e845b606f29a5bba174ae4c4c4d.asciidoc new file mode 100644 index 000000000..749b74ca4 --- /dev/null +++ b/docs/doc_examples/282e9e845b606f29a5bba174ae4c4c4d.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.createApiKey({ + name: "my-restricted-api-key", + expiration: "7d", + role_descriptors: { + "my-restricted-role-descriptor": { + indices: [ + { + names: ["website-product-search"], + privileges: ["read"], + }, + ], + restriction: { + workflows: ["search_application_query"], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/28415647fced5f983b42f8435332a625.asciidoc b/docs/doc_examples/28415647fced5f983b42f8435332a625.asciidoc new file mode 100644 index 000000000..bc11f9997 --- /dev/null +++ b/docs/doc_examples/28415647fced5f983b42f8435332a625.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.simulate({ + pipeline: { + processors: [ + { + lowercase: { + field: "my-keyword-field", + }, + }, + ], + }, + docs: [ + { + _source: { + "my-keyword-field": "FOO", + }, + }, + { + _source: { + "my-keyword-field": "BAR", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/28543836b62b5622a402e6f7731d68f0.asciidoc b/docs/doc_examples/28543836b62b5622a402e6f7731d68f0.asciidoc new file mode 100644 index 000000000..c8cd51511 --- /dev/null +++ b/docs/doc_examples/28543836b62b5622a402e6f7731d68f0.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.downsample({ + index: ".ds-my-data-stream-2023.07.26-000001", + target_index: ".ds-my-data-stream-2023.07.26-000001-downsample", + config: { + fixed_interval: "1h", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2856a5ceff1861aa9a78099f1c517fe7.asciidoc b/docs/doc_examples/2856a5ceff1861aa9a78099f1c517fe7.asciidoc new file mode 100644 index 000000000..85471c3a9 --- /dev/null +++ b/docs/doc_examples/2856a5ceff1861aa9a78099f1c517fe7.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getMapping({ + index: ".watches", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2864a24608b3ac59d21f604f8a31d131.asciidoc b/docs/doc_examples/2864a24608b3ac59d21f604f8a31d131.asciidoc new file mode 100644 index 000000000..7e2ac94ce --- /dev/null +++ b/docs/doc_examples/2864a24608b3ac59d21f604f8a31d131.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "jwt_role1", + refresh: "true", + cluster: ["manage"], + indices: [ + { + names: ["*"], + privileges: ["read"], + }, + ], + run_as: ["user123_runas"], + metadata: { + version: 1, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2864d04bf99860ed5dbe1458f1ab5f78.asciidoc b/docs/doc_examples/2864d04bf99860ed5dbe1458f1ab5f78.asciidoc new file mode 100644 index 000000000..4463693d5 --- /dev/null +++ b/docs/doc_examples/2864d04bf99860ed5dbe1458f1ab5f78.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.autoscaling.putAutoscalingPolicy({ + name: "", + policy: { + roles: [], + deciders: { + fixed: {}, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2879d7bf4167194b102bf97117327164.asciidoc b/docs/doc_examples/2879d7bf4167194b102bf97117327164.asciidoc new file mode 100644 index 000000000..c26a84b6d --- /dev/null +++ b/docs/doc_examples/2879d7bf4167194b102bf97117327164.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "keyword", + char_filter: ["html_strip"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2884eacac3ad05ff794f5296ec7427e7.asciidoc b/docs/doc_examples/2884eacac3ad05ff794f5296ec7427e7.asciidoc new file mode 100644 index 000000000..a9a3ee25f --- /dev/null +++ b/docs/doc_examples/2884eacac3ad05ff794f5296ec7427e7.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-image-index", + size: 3, + query: { + knn: { + field: "image-vector", + query_vector: [-5, 9, -12], + k: 10, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2891aa10ee9d474780adf94d5607f2db.asciidoc b/docs/doc_examples/2891aa10ee9d474780adf94d5607f2db.asciidoc index 755d0b234..ccbd72aca 100644 --- a/docs/doc_examples/2891aa10ee9d474780adf94d5607f2db.asciidoc +++ b/docs/doc_examples/2891aa10ee9d474780adf94d5607f2db.asciidoc @@ -4,17 +4,14 @@ [source, js] ---- const response = await client.search({ - index: 'index_long,index_double', - body: { - sort: [ - { - field: { - numeric_type: 'double' - } - } - ] - } -}) -console.log(response) + index: "index_long,index_double", + sort: [ + { + field: { + numeric_type: "double", + }, + }, + ], +}); +console.log(response); ---- - diff --git a/docs/doc_examples/2897ccc2a3bf3d0cd89328ee4413fae5.asciidoc b/docs/doc_examples/2897ccc2a3bf3d0cd89328ee4413fae5.asciidoc new file mode 100644 index 000000000..d1ba94ce5 --- /dev/null +++ b/docs/doc_examples/2897ccc2a3bf3d0cd89328ee4413fae5.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.asyncSearch.get({ + id: "FklQYndoTDJ2VEFlMEVBTzFJMGhJVFEaLVlKYndBWWZSMUdicUc4WVlEaFl4ZzoxNTU=", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2898cf033b5bdefdbe3723af850b25c5.asciidoc b/docs/doc_examples/2898cf033b5bdefdbe3723af850b25c5.asciidoc new file mode 100644 index 000000000..fdf8b3f45 --- /dev/null +++ b/docs/doc_examples/2898cf033b5bdefdbe3723af850b25c5.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + match: { + message: "GET /search", + }, + }, + collapse: { + field: "user.id", + inner_hits: { + name: "most_recent", + size: 5, + sort: [ + { + "@timestamp": "desc", + }, + ], + }, + max_concurrent_group_searches: 4, + }, + sort: [ + { + "http.response.bytes": { + order: "desc", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/28aad2c5942bfb221c2bf1bbdc01658e.asciidoc b/docs/doc_examples/28aad2c5942bfb221c2bf1bbdc01658e.asciidoc deleted file mode 100644 index 5068af57e..000000000 --- a/docs/doc_examples/28aad2c5942bfb221c2bf1bbdc01658e.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - query_string: { - fields: [ - 'city.*' - ], - query: 'this AND that OR thus' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/28ac880057135e46b3b00c7f3976538c.asciidoc b/docs/doc_examples/28ac880057135e46b3b00c7f3976538c.asciidoc new file mode 100644 index 000000000..326307582 --- /dev/null +++ b/docs/doc_examples/28ac880057135e46b3b00c7f3976538c.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "test", + settings: { + "index.routing.allocation.include._ip": "192.168.2.*", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/291110f4cac02f4610d0853f5800a70d.asciidoc b/docs/doc_examples/291110f4cac02f4610d0853f5800a70d.asciidoc new file mode 100644 index 000000000..bf8fdb708 --- /dev/null +++ b/docs/doc_examples/291110f4cac02f4610d0853f5800a70d.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "exams", + size: 0, + aggs: { + weighted_grade: { + weighted_avg: { + value: { + field: "grade", + missing: 2, + }, + weight: { + field: "weight", + missing: 3, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2932e6f71e247cf52e11d2f38f114ddf.asciidoc b/docs/doc_examples/2932e6f71e247cf52e11d2f38f114ddf.asciidoc new file mode 100644 index 000000000..e8724553d --- /dev/null +++ b/docs/doc_examples/2932e6f71e247cf52e11d2f38f114ddf.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + slices: 5, + refresh: "true", + source: { + index: "my-index-000001", + }, + dest: { + index: "my-new-index-000001", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/295b3aaeb223612afdd991744dc9c873.asciidoc b/docs/doc_examples/295b3aaeb223612afdd991744dc9c873.asciidoc new file mode 100644 index 000000000..efdcdd845 --- /dev/null +++ b/docs/doc_examples/295b3aaeb223612afdd991744dc9c873.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "my_test_scores_pipeline", + description: "Calculates the total test score", + processors: [ + { + script: { + source: "ctx.total_score = (ctx.math_score + ctx.verbal_score)", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/2968ffb8135f77ba3a9b876dd4918119.asciidoc b/docs/doc_examples/2968ffb8135f77ba3a9b876dd4918119.asciidoc new file mode 100644 index 000000000..7a6220c32 --- /dev/null +++ b/docs/doc_examples/2968ffb8135f77ba3a9b876dd4918119.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + wait_for_completion: "false", + source: { + index: "test-data", + size: 50, + }, + dest: { + index: "azure-ai-studio-embeddings", + pipeline: "azure_ai_studio_embeddings_pipeline", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/29783e5de3a5f3c985cbf11094cf49a0.asciidoc b/docs/doc_examples/29783e5de3a5f3c985cbf11094cf49a0.asciidoc new file mode 100644 index 000000000..5a876f211 --- /dev/null +++ b/docs/doc_examples/29783e5de3a5f3c985cbf11094cf49a0.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "whitespace", + filter: ["keyword_repeat", "stemmer", "remove_duplicates"], + text: "fox running and jumping", + explain: true, + attributes: "keyword", +}); +console.log(response); +---- diff --git a/docs/doc_examples/29824032d7d64512d17458fdd687b1f6.asciidoc b/docs/doc_examples/29824032d7d64512d17458fdd687b1f6.asciidoc new file mode 100644 index 000000000..2baa04ac3 --- /dev/null +++ b/docs/doc_examples/29824032d7d64512d17458fdd687b1f6.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.tasks.list({ + parent_task_id: "oTUltX4IQMOUUVeiohTt8A:123", +}); +console.log(response); +---- diff --git a/docs/doc_examples/29953082744b7a36e437b392a6391c81.asciidoc b/docs/doc_examples/29953082744b7a36e437b392a6391c81.asciidoc new file mode 100644 index 000000000..42b500db9 --- /dev/null +++ b/docs/doc_examples/29953082744b7a36e437b392a6391c81.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.renderSearchTemplate({ + id: "my-search-template", + params: { + from: 20, + size: 10, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/299900fb08da80fe455cf3f1bb7d62ee.asciidoc b/docs/doc_examples/299900fb08da80fe455cf3f1bb7d62ee.asciidoc new file mode 100644 index 000000000..2ace94737 --- /dev/null +++ b/docs/doc_examples/299900fb08da80fe455cf3f1bb7d62ee.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getFieldMapping({ + index: "publications", + fields: "title", +}); +console.log(response); +---- diff --git a/docs/doc_examples/29aeabacb1fdf5b083d5f091b6d1bd44.asciidoc b/docs/doc_examples/29aeabacb1fdf5b083d5f091b6d1bd44.asciidoc new file mode 100644 index 000000000..b98cfe9a8 --- /dev/null +++ b/docs/doc_examples/29aeabacb1fdf5b083d5f091b6d1bd44.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.migrateReindex({ + reindex: { + source: { + index: "my-data-stream", + }, + mode: "upgrade", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/29d9df958de292cec50daaf31844b573.asciidoc b/docs/doc_examples/29d9df958de292cec50daaf31844b573.asciidoc new file mode 100644 index 000000000..d0e279e5b --- /dev/null +++ b/docs/doc_examples/29d9df958de292cec50daaf31844b573.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getFieldMapping({ + index: "my-index-000001,my-index-000002", + fields: "message", +}); +console.log(response); +---- diff --git a/docs/doc_examples/29e002ab596bae58712eb048ac1768d1.asciidoc b/docs/doc_examples/29e002ab596bae58712eb048ac1768d1.asciidoc new file mode 100644 index 000000000..351d18c99 --- /dev/null +++ b/docs/doc_examples/29e002ab596bae58712eb048ac1768d1.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + routing: "xyz", + document: { + "@timestamp": "2099-11-15T13:12:00", + message: "You know for search!", + "user.id": "xyz", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2a1de18774f9c68cafa169847832b2bc.asciidoc b/docs/doc_examples/2a1de18774f9c68cafa169847832b2bc.asciidoc deleted file mode 100644 index c0ae902fa..000000000 --- a/docs/doc_examples/2a1de18774f9c68cafa169847832b2bc.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'my_index', - body: { - mappings: { - properties: { - full_text: { - type: 'text' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/2a1eece9a59ac1773edcf0a932c26de0.asciidoc b/docs/doc_examples/2a1eece9a59ac1773edcf0a932c26de0.asciidoc new file mode 100644 index 000000000..b3ca912d0 --- /dev/null +++ b/docs/doc_examples/2a1eece9a59ac1773edcf0a932c26de0.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.oidcLogout({ + token: + "dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==", + refresh_token: "vLBPvmAB6KvwvJZr27cS", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2a21674c40f9b182a8944769d20b2357.asciidoc b/docs/doc_examples/2a21674c40f9b182a8944769d20b2357.asciidoc new file mode 100644 index 000000000..07c3eb29d --- /dev/null +++ b/docs/doc_examples/2a21674c40f9b182a8944769d20b2357.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-rank-vectors-float", + query: { + script_score: { + query: { + match_all: {}, + }, + script: { + source: "maxSimDotProduct(params.query_vector, 'my_vector')", + params: { + query_vector: [ + [0.5, 10, 6], + [-0.5, 10, 10], + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2a247e36a86a373bcbf478ac9a588f44.asciidoc b/docs/doc_examples/2a247e36a86a373bcbf478ac9a588f44.asciidoc new file mode 100644 index 000000000..c4260ab6c --- /dev/null +++ b/docs/doc_examples/2a247e36a86a373bcbf478ac9a588f44.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + routing: "kimchy", + document: { + "@timestamp": "2099-11-15T13:12:00", + message: "GET /search HTTP/1.1 200 1070000", + user: { + id: "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2a287d213a812b98d8353c563a058cfc.asciidoc b/docs/doc_examples/2a287d213a812b98d8353c563a058cfc.asciidoc new file mode 100644 index 000000000..cf586f9f0 --- /dev/null +++ b/docs/doc_examples/2a287d213a812b98d8353c563a058cfc.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "latency", + size: 0, + aggs: { + load_time_boxplot: { + boxplot: { + field: "load_time", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2a44d254e6e32abe97515fd2eb34705d.asciidoc b/docs/doc_examples/2a44d254e6e32abe97515fd2eb34705d.asciidoc new file mode 100644 index 000000000..a7fd770cf --- /dev/null +++ b/docs/doc_examples/2a44d254e6e32abe97515fd2eb34705d.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.getAsync({ + id: "FnR0TDhyWUVmUmVtWXRWZER4MXZiNFEad2F5UDk2ZVdTVHV1S0xDUy00SklUdzozMTU=", + wait_for_completion_timeout: "2s", + format: "json", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2a47d11c6e19c9da5104e738359ea8a8.asciidoc b/docs/doc_examples/2a47d11c6e19c9da5104e738359ea8a8.asciidoc new file mode 100644 index 000000000..186d8540d --- /dev/null +++ b/docs/doc_examples/2a47d11c6e19c9da5104e738359ea8a8.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.start(); +console.log(response); +---- diff --git a/docs/doc_examples/2a5f7e7d6b92c66e52616845146d2820.asciidoc b/docs/doc_examples/2a5f7e7d6b92c66e52616845146d2820.asciidoc new file mode 100644 index 000000000..c51f168fc --- /dev/null +++ b/docs/doc_examples/2a5f7e7d6b92c66e52616845146d2820.asciidoc @@ -0,0 +1,38 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.previewTransform({ + id: "index_compare", + source: { + index: ["index1", "index2"], + query: { + match_all: {}, + }, + }, + dest: { + index: "compare", + }, + pivot: { + group_by: { + "unique-id": { + terms: { + field: "", + }, + }, + }, + aggregations: { + compare: { + scripted_metric: { + map_script: "state.doc = new HashMap(params['_source'])", + combine_script: "return state", + reduce_script: + ' \n if (states.size() != 2) {\n return "count_mismatch"\n }\n if (states.get(0).equals(states.get(1))) {\n return "match"\n } else {\n return "mismatch"\n }\n ', + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2a67608dadbf220a2f040f3a79d3677d.asciidoc b/docs/doc_examples/2a67608dadbf220a2f040f3a79d3677d.asciidoc new file mode 100644 index 000000000..93ccfa9d8 --- /dev/null +++ b/docs/doc_examples/2a67608dadbf220a2f040f3a79d3677d.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "attachment", + description: "Extract attachment information including original binary", + processors: [ + { + attachment: { + field: "data", + remove_binary: false, + }, + }, + ], +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: "my_id", + pipeline: "attachment", + document: { + data: "e1xydGYxXGFuc2kNCkxvcmVtIGlwc3VtIGRvbG9yIHNpdCBhbWV0DQpccGFyIH0=", + }, +}); +console.log(response1); + +const response2 = await client.get({ + index: "my-index-000001", + id: "my_id", +}); +console.log(response2); +---- diff --git a/docs/doc_examples/2a70194ebd2f01a3229a5092513676b3.asciidoc b/docs/doc_examples/2a70194ebd2f01a3229a5092513676b3.asciidoc new file mode 100644 index 000000000..27d8630e1 --- /dev/null +++ b/docs/doc_examples/2a70194ebd2f01a3229a5092513676b3.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "keyword", + char_filter: ["my_custom_html_strip_char_filter"], + }, + }, + char_filter: { + my_custom_html_strip_char_filter: { + type: "html_strip", + escaped_tags: ["b"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2a71e2d7f7179dd76183d30789046808.asciidoc b/docs/doc_examples/2a71e2d7f7179dd76183d30789046808.asciidoc new file mode 100644 index 000000000..d83750efc --- /dev/null +++ b/docs/doc_examples/2a71e2d7f7179dd76183d30789046808.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "mv", + refresh: "true", + operations: [ + { + index: {}, + }, + { + a: 1, + b: [2, 1], + }, + { + index: {}, + }, + { + a: 2, + b: 3, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/2a91e1fb8ad93a188fa9d77ec01bc431.asciidoc b/docs/doc_examples/2a91e1fb8ad93a188fa9d77ec01bc431.asciidoc new file mode 100644 index 000000000..4a364d79c --- /dev/null +++ b/docs/doc_examples/2a91e1fb8ad93a188fa9d77ec01bc431.asciidoc @@ -0,0 +1,53 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "test", + mappings: { + properties: { + comments: { + type: "nested", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "test", + id: 1, + refresh: "true", + document: { + title: "Test title", + comments: [ + { + author: "kimchy", + number: 1, + }, + { + author: "nik9000", + number: 2, + }, + ], + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "test", + query: { + nested: { + path: "comments", + query: { + match: { + "comments.number": 2, + }, + }, + inner_hits: {}, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/2a9747bcfaf1f9491ebd410b3fcb6798.asciidoc b/docs/doc_examples/2a9747bcfaf1f9491ebd410b3fcb6798.asciidoc new file mode 100644 index 000000000..bf26b94bf --- /dev/null +++ b/docs/doc_examples/2a9747bcfaf1f9491ebd410b3fcb6798.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + query_string: { + query: "(new york city) OR (big apple)", + default_field: "content", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2a9d3119a9e26e29220be436b9382955.asciidoc b/docs/doc_examples/2a9d3119a9e26e29220be436b9382955.asciidoc new file mode 100644 index 000000000..e32079ad0 --- /dev/null +++ b/docs/doc_examples/2a9d3119a9e26e29220be436b9382955.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "mistral-embeddings", + mappings: { + properties: { + content_embedding: { + type: "dense_vector", + dims: 1024, + element_type: "float", + similarity: "dot_product", + }, + content: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2aa548b692fc2fe7b6f0d90eb8b2ae29.asciidoc b/docs/doc_examples/2aa548b692fc2fe7b6f0d90eb8b2ae29.asciidoc new file mode 100644 index 000000000..649f56d45 --- /dev/null +++ b/docs/doc_examples/2aa548b692fc2fe7b6f0d90eb8b2ae29.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.deleteWatch({ + id: "my_watch", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2abfe0d3f5593d23d2dfa608b1e2532a.asciidoc b/docs/doc_examples/2abfe0d3f5593d23d2dfa608b1e2532a.asciidoc new file mode 100644 index 000000000..3c45e67f9 --- /dev/null +++ b/docs/doc_examples/2abfe0d3f5593d23d2dfa608b1e2532a.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_buckets: { + composite: { + sources: [ + { + user_name: { + terms: { + field: "user_name", + }, + }, + }, + { + date: { + date_histogram: { + field: "timestamp", + calendar_interval: "1d", + order: "desc", + }, + }, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2ac37c3c572170ded67f1d5a0c8151ab.asciidoc b/docs/doc_examples/2ac37c3c572170ded67f1d5a0c8151ab.asciidoc new file mode 100644 index 000000000..f3d8511aa --- /dev/null +++ b/docs/doc_examples/2ac37c3c572170ded67f1d5a0c8151ab.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-data-stream", + tiebreaker_field: "event.sequence", + query: + '\n process where process.name == "cmd.exe" and stringContains(process.executable, "System32")\n ', +}); +console.log(response); +---- diff --git a/docs/doc_examples/2ac7efe3919ee0c7971f5d502f482662.asciidoc b/docs/doc_examples/2ac7efe3919ee0c7971f5d502f482662.asciidoc new file mode 100644 index 000000000..22b4c7525 --- /dev/null +++ b/docs/doc_examples/2ac7efe3919ee0c7971f5d502f482662.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + script_score: { + query: { + bool: { + filter: { + term: { + status: "published", + }, + }, + }, + }, + script: { + source: "1 / (1 + l1norm(params.queryVector, 'my_dense_vector'))", + params: { + queryVector: [4, 3.4, -0.2], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2acf75803494fef29f9ca70671aa6be1.asciidoc b/docs/doc_examples/2acf75803494fef29f9ca70671aa6be1.asciidoc new file mode 100644 index 000000000..d5715eeeb --- /dev/null +++ b/docs/doc_examples/2acf75803494fef29f9ca70671aa6be1.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.bulkDeleteRole({ + names: ["my_admin_role", "superuser"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/2ad35a13262f98574a48f88b4a838512.asciidoc b/docs/doc_examples/2ad35a13262f98574a48f88b4a838512.asciidoc new file mode 100644 index 000000000..b774c6457 --- /dev/null +++ b/docs/doc_examples/2ad35a13262f98574a48f88b4a838512.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.get({ + index: "current_year", + id: 1, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2ade05fb3fb06a67df25e097dfadb045.asciidoc b/docs/doc_examples/2ade05fb3fb06a67df25e097dfadb045.asciidoc new file mode 100644 index 000000000..02eb444d9 --- /dev/null +++ b/docs/doc_examples/2ade05fb3fb06a67df25e097dfadb045.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.get({ + index: "my-index-000001", + id: "my_id", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2aec92bc31bc24bce58d983738f9e0fe.asciidoc b/docs/doc_examples/2aec92bc31bc24bce58d983738f9e0fe.asciidoc new file mode 100644 index 000000000..a2acce444 --- /dev/null +++ b/docs/doc_examples/2aec92bc31bc24bce58d983738f9e0fe.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + matrixstats: { + matrix_stats: { + fields: ["poverty", "income"], + missing: { + income: 50000, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2afc1231679898bd864d06679d9e951b.asciidoc b/docs/doc_examples/2afc1231679898bd864d06679d9e951b.asciidoc new file mode 100644 index 000000000..61ad3f6d7 --- /dev/null +++ b/docs/doc_examples/2afc1231679898bd864d06679d9e951b.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + histo: { + date_histogram: { + field: "date", + calendar_interval: "day", + }, + aggs: { + categories: { + terms: { + field: "category", + }, + }, + min_bucket_selector: { + bucket_selector: { + buckets_path: { + count: "categories._bucket_count", + }, + script: { + source: "params.count != 0", + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2afd49985950cbcccf727fa858d00067.asciidoc b/docs/doc_examples/2afd49985950cbcccf727fa858d00067.asciidoc new file mode 100644 index 000000000..38aa159a8 --- /dev/null +++ b/docs/doc_examples/2afd49985950cbcccf727fa858d00067.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "test-index", + query: { + match: { + my_field: "Which country is Paris in?", + }, + }, + highlight: { + fields: { + my_field: { + type: "semantic", + number_of_fragments: 2, + order: "score", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2afdf0d83724953aa2875b5fb37d60cc.asciidoc b/docs/doc_examples/2afdf0d83724953aa2875b5fb37d60cc.asciidoc new file mode 100644 index 000000000..32a8ae35c --- /dev/null +++ b/docs/doc_examples/2afdf0d83724953aa2875b5fb37d60cc.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.esql.asyncQueryGet({ + id: "FmNJRUZ1YWZCU3dHY1BIOUhaenVSRkEaaXFlZ3h4c1RTWFNocDdnY2FSaERnUTozNDE=", + wait_for_completion_timeout: "30s", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2b1c560f00d9bcf5caaf56c03f6b5962.asciidoc b/docs/doc_examples/2b1c560f00d9bcf5caaf56c03f6b5962.asciidoc new file mode 100644 index 000000000..3e9c8d773 --- /dev/null +++ b/docs/doc_examples/2b1c560f00d9bcf5caaf56c03f6b5962.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "GET", + path: "/_connector/_sync_job", + querystring: { + job_type: "full,incremental", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2b47be4b712147a429102aef386470ee.asciidoc b/docs/doc_examples/2b47be4b712147a429102aef386470ee.asciidoc new file mode 100644 index 000000000..87f348ef1 --- /dev/null +++ b/docs/doc_examples/2b47be4b712147a429102aef386470ee.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-data-stream", + query: + '\n sequence by process.pid\n [process where process.name == "regsvr32.exe"]\n [library where dll.name == "scrobj.dll"]\n [network where true]\n ', +}); +console.log(response); +---- diff --git a/docs/doc_examples/2b59b014349d45bf894aca90b2b1fbe0.asciidoc b/docs/doc_examples/2b59b014349d45bf894aca90b2b1fbe0.asciidoc new file mode 100644 index 000000000..e5501b1a8 --- /dev/null +++ b/docs/doc_examples/2b59b014349d45bf894aca90b2b1fbe0.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.deleteDataStream({ + name: "my-data-stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2b5a5f8689f04d095fa86570130ee4d4.asciidoc b/docs/doc_examples/2b5a5f8689f04d095fa86570130ee4d4.asciidoc new file mode 100644 index 000000000..94216bc5f --- /dev/null +++ b/docs/doc_examples/2b5a5f8689f04d095fa86570130ee4d4.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + my_id: { + type: "keyword", + }, + my_join_field: { + type: "join", + relations: { + question: "answer", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2b5c69778eb3daba9fbd7242bcc2daf9.asciidoc b/docs/doc_examples/2b5c69778eb3daba9fbd7242bcc2daf9.asciidoc new file mode 100644 index 000000000..bb81adede --- /dev/null +++ b/docs/doc_examples/2b5c69778eb3daba9fbd7242bcc2daf9.asciidoc @@ -0,0 +1,41 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.queryApiKeys({ + size: 0, + query: { + bool: { + filter: { + term: { + invalidated: true, + }, + }, + }, + }, + aggs: { + invalidated_keys: { + composite: { + sources: [ + { + username: { + terms: { + field: "username", + }, + }, + }, + { + key_name: { + terms: { + field: "name", + }, + }, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2b7687e3d7c06824950e00618c297864.asciidoc b/docs/doc_examples/2b7687e3d7c06824950e00618c297864.asciidoc new file mode 100644 index 000000000..10757782b --- /dev/null +++ b/docs/doc_examples/2b7687e3d7c06824950e00618c297864.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.resolveCluster({ + name: "my-index*,clust*:my-index*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2ba15c066d55a9b26d49b09471151cb4.asciidoc b/docs/doc_examples/2ba15c066d55a9b26d49b09471151cb4.asciidoc new file mode 100644 index 000000000..792e02cbe --- /dev/null +++ b/docs/doc_examples/2ba15c066d55a9b26d49b09471151cb4.asciidoc @@ -0,0 +1,66 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "emails", + refresh: "true", + operations: [ + { + index: { + _id: 1, + }, + }, + { + accounts: ["hillary", "sidney"], + }, + { + index: { + _id: 2, + }, + }, + { + accounts: ["hillary", "donald"], + }, + { + index: { + _id: 3, + }, + }, + { + accounts: ["vladimir", "donald"], + }, + ], +}); +console.log(response); + +const response1 = await client.search({ + index: "emails", + size: 0, + aggs: { + interactions: { + adjacency_matrix: { + filters: { + grpA: { + terms: { + accounts: ["hillary", "sidney"], + }, + }, + grpB: { + terms: { + accounts: ["donald", "mitt"], + }, + }, + grpC: { + terms: { + accounts: ["vladimir", "nigel"], + }, + }, + }, + }, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/2bacdcb278705d944f367cfb984cf4d2.asciidoc b/docs/doc_examples/2bacdcb278705d944f367cfb984cf4d2.asciidoc new file mode 100644 index 000000000..ae2742968 --- /dev/null +++ b/docs/doc_examples/2bacdcb278705d944f367cfb984cf4d2.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + sort: [ + { + post_date: { + order: "asc", + format: "strict_date_optional_time_nanos", + }, + }, + "user", + { + name: "desc", + }, + { + age: "desc", + }, + "_score", + ], + query: { + term: { + user: "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2bc1d52efec2076dc9fc2a3a2d90e8ab.asciidoc b/docs/doc_examples/2bc1d52efec2076dc9fc2a3a2d90e8ab.asciidoc new file mode 100644 index 000000000..18da0ad1c --- /dev/null +++ b/docs/doc_examples/2bc1d52efec2076dc9fc2a3a2d90e8ab.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "latency", + size: 0, + aggs: { + load_time_boxplot: { + boxplot: { + field: "load_time", + execution_hint: "high_accuracy", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2bc57cd3f32b59b0b44ca63b19cdfcc0.asciidoc b/docs/doc_examples/2bc57cd3f32b59b0b44ca63b19cdfcc0.asciidoc new file mode 100644 index 000000000..96f64fa4d --- /dev/null +++ b/docs/doc_examples/2bc57cd3f32b59b0b44ca63b19cdfcc0.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "image-index", + knn: { + field: "image-vector", + query_vector: [1, 5, -20], + k: 5, + num_candidates: 50, + similarity: 36, + filter: { + term: { + "file-type": "png", + }, + }, + }, + fields: ["title"], + _source: false, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2c079d1ae4819a0c206b9e1aa5623523.asciidoc b/docs/doc_examples/2c079d1ae4819a0c206b9e1aa5623523.asciidoc new file mode 100644 index 000000000..9e9cb47ce --- /dev/null +++ b/docs/doc_examples/2c079d1ae4819a0c206b9e1aa5623523.asciidoc @@ -0,0 +1,77 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + attributes: { + type: "passthrough", + priority: 10, + properties: { + id: { + type: "keyword", + }, + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + attributes: { + id: "foo", + zone: 10, + }, + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + query: { + bool: { + must: [ + { + match: { + id: "foo", + }, + }, + { + match: { + zone: 10, + }, + }, + ], + }, + }, +}); +console.log(response2); + +const response3 = await client.search({ + index: "my-index-000001", + query: { + bool: { + must: [ + { + match: { + "attributes.id": "foo", + }, + }, + { + match: { + "attributes.zone": 10, + }, + }, + ], + }, + }, +}); +console.log(response3); +---- diff --git a/docs/doc_examples/2c090fe7ec7b66b3f5c178d71c46323b.asciidoc b/docs/doc_examples/2c090fe7ec7b66b3f5c178d71c46323b.asciidoc new file mode 100644 index 000000000..edd0fc3cd --- /dev/null +++ b/docs/doc_examples/2c090fe7ec7b66b3f5c178d71c46323b.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.stats({ + metric: "fielddata", + human: "true", + fields: "my_join_field", +}); +console.log(response); + +const response1 = await client.nodes.stats({ + metric: "indices", + index_metric: "fielddata", + human: "true", + fields: "my_join_field", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/2c0dbdcf400cde5d36f7c9e6c1101011.asciidoc b/docs/doc_examples/2c0dbdcf400cde5d36f7c9e6c1101011.asciidoc new file mode 100644 index 000000000..02df5c874 --- /dev/null +++ b/docs/doc_examples/2c0dbdcf400cde5d36f7c9e6c1101011.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.health({ + v: "true", + ts: "false", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2c1e16e9ac24cfea979af2a69900d3c2.asciidoc b/docs/doc_examples/2c1e16e9ac24cfea979af2a69900d3c2.asciidoc new file mode 100644 index 000000000..be41e40e4 --- /dev/null +++ b/docs/doc_examples/2c1e16e9ac24cfea979af2a69900d3c2.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.synonyms.putSynonymRule({ + set_id: "my-synonyms-set", + rule_id: "test-1", + synonyms: "hello, hi, howdy", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2c27a8eb6528126f37a843d434cd88b6.asciidoc b/docs/doc_examples/2c27a8eb6528126f37a843d434cd88b6.asciidoc new file mode 100644 index 000000000..a3ecc0a0d --- /dev/null +++ b/docs/doc_examples/2c27a8eb6528126f37a843d434cd88b6.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: [ + { + type: "synonym_graph", + synonyms: ["dns, domain name system"], + }, + ], + text: "domain name system is fragile", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2c3207c0c985d253b2ecccc14e69e25a.asciidoc b/docs/doc_examples/2c3207c0c985d253b2ecccc14e69e25a.asciidoc new file mode 100644 index 000000000..914a06153 --- /dev/null +++ b/docs/doc_examples/2c3207c0c985d253b2ecccc14e69e25a.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.addBlock({ + index: ".ds-my-data-stream-2023.07.26-000001", + block: "write", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2c3dff44904d3d73ff47f1afe89c7f86.asciidoc b/docs/doc_examples/2c3dff44904d3d73ff47f1afe89c7f86.asciidoc new file mode 100644 index 000000000..b4a13111d --- /dev/null +++ b/docs/doc_examples/2c3dff44904d3d73ff47f1afe89c7f86.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.updateByQuery({ + index: "my-index-000001", + query: { + term: { + "user.id": "kimchy", + }, + }, + max_docs: 1, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2c44657adf550b8ade5cf5334106d38b.asciidoc b/docs/doc_examples/2c44657adf550b8ade5cf5334106d38b.asciidoc new file mode 100644 index 000000000..60ba299c3 --- /dev/null +++ b/docs/doc_examples/2c44657adf550b8ade5cf5334106d38b.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + runtime_mappings: { + "http.clientip": { + type: "ip", + script: + "\n String clientip=grok('%{COMMONAPACHELOG}').extract(doc[\"message\"].value)?.clientip;\n if (clientip != null) emit(clientip);\n ", + }, + }, + query: { + match: { + "http.clientip": "40.135.0.0", + }, + }, + fields: ["http.clientip"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/2c602b4ee8f22cda2cdf19bad31da0af.asciidoc b/docs/doc_examples/2c602b4ee8f22cda2cdf19bad31da0af.asciidoc new file mode 100644 index 000000000..8289c7259 --- /dev/null +++ b/docs/doc_examples/2c602b4ee8f22cda2cdf19bad31da0af.asciidoc @@ -0,0 +1,78 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.info(); +console.log(response); + +const response1 = await client.nodes.info({ + node_id: "_all", +}); +console.log(response1); + +const response2 = await client.nodes.info({ + node_id: "_local", +}); +console.log(response2); + +const response3 = await client.nodes.info({ + node_id: "_master", +}); +console.log(response3); + +const response4 = await client.nodes.info({ + node_id: "node_name_goes_here", +}); +console.log(response4); + +const response5 = await client.nodes.info({ + node_id: "node_name_goes_*", +}); +console.log(response5); + +const response6 = await client.nodes.info({ + node_id: "10.0.0.3,10.0.0.4", +}); +console.log(response6); + +const response7 = await client.nodes.info({ + node_id: "10.0.0.*", +}); +console.log(response7); + +const response8 = await client.nodes.info({ + node_id: "_all,master:false", +}); +console.log(response8); + +const response9 = await client.nodes.info({ + node_id: "data:true,ingest:true", +}); +console.log(response9); + +const response10 = await client.nodes.info({ + node_id: "coordinating_only:true", +}); +console.log(response10); + +const response11 = await client.nodes.info({ + node_id: "master:true,voting_only:false", +}); +console.log(response11); + +const response12 = await client.nodes.info({ + node_id: "rack:2", +}); +console.log(response12); + +const response13 = await client.nodes.info({ + node_id: "ra*:2", +}); +console.log(response13); + +const response14 = await client.nodes.info({ + node_id: "ra*:2*", +}); +console.log(response14); +---- diff --git a/docs/doc_examples/2c86840a46242a38cf82024a9321be46.asciidoc b/docs/doc_examples/2c86840a46242a38cf82024a9321be46.asciidoc new file mode 100644 index 000000000..3bda37c4b --- /dev/null +++ b/docs/doc_examples/2c86840a46242a38cf82024a9321be46.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-explicit-mappings-books", + mappings: { + dynamic: false, + properties: { + name: { + type: "text", + }, + author: { + type: "text", + }, + release_date: { + type: "date", + format: "yyyy-MM-dd", + }, + page_count: { + type: "integer", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2ceded6ee764adf1aaaac0a1cd25ed5f.asciidoc b/docs/doc_examples/2ceded6ee764adf1aaaac0a1cd25ed5f.asciidoc new file mode 100644 index 000000000..dd5e94075 --- /dev/null +++ b/docs/doc_examples/2ceded6ee764adf1aaaac0a1cd25ed5f.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.indices({ + v: "true", + health: "red", + h: "index,status,health", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2d01a9e5550b525496757f1bd7f0e706.asciidoc b/docs/doc_examples/2d01a9e5550b525496757f1bd7f0e706.asciidoc new file mode 100644 index 000000000..865aa0e5d --- /dev/null +++ b/docs/doc_examples/2d01a9e5550b525496757f1bd7f0e706.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + timeout: "5m", + document: { + "@timestamp": "2099-11-15T13:12:00", + message: "GET /search HTTP/1.1 200 1070000", + user: { + id: "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2d0244c020075595acb625aa5ba8f455.asciidoc b/docs/doc_examples/2d0244c020075595acb625aa5ba8f455.asciidoc new file mode 100644 index 000000000..e7a2117c4 --- /dev/null +++ b/docs/doc_examples/2d0244c020075595acb625aa5ba8f455.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "idx_keep", + id: 1, + document: { + path: { + to: [ + { + foo: [3, 2, 1], + }, + { + foo: [30, 20, 10], + }, + ], + bar: "baz", + }, + ids: [200, 100, 300, 100], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2d150ff3b6b991b58fea6aa5cc669aa3.asciidoc b/docs/doc_examples/2d150ff3b6b991b58fea6aa5cc669aa3.asciidoc new file mode 100644 index 000000000..4deb98ab1 --- /dev/null +++ b/docs/doc_examples/2d150ff3b6b991b58fea6aa5cc669aa3.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match_phrase: { + message: { + query: "this is a test", + analyzer: "my_analyzer", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2d2f5ec97aa34ff7822a6a1ed08ef335.asciidoc b/docs/doc_examples/2d2f5ec97aa34ff7822a6a1ed08ef335.asciidoc new file mode 100644 index 000000000..4a0b8c958 --- /dev/null +++ b/docs/doc_examples/2d2f5ec97aa34ff7822a6a1ed08ef335.asciidoc @@ -0,0 +1,58 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "test", + refresh: "true", + operations: [ + { + index: { + _index: "test1", + }, + }, + { + s: 1, + m: 3.1415, + }, + { + index: { + _index: "test1", + }, + }, + { + s: 2, + m: 1, + }, + { + index: { + _index: "test2", + }, + }, + { + s: 3.1, + m: 2.71828, + }, + ], +}); +console.log(response); + +const response1 = await client.search({ + index: "test*", + filter_path: "aggregations", + aggs: { + tm: { + top_metrics: { + metrics: { + field: "m", + }, + sort: { + s: "asc", + }, + }, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/2d37b02cbf6d30ae11bf239a54ec9423.asciidoc b/docs/doc_examples/2d37b02cbf6d30ae11bf239a54ec9423.asciidoc new file mode 100644 index 000000000..3c73b9c1a --- /dev/null +++ b/docs/doc_examples/2d37b02cbf6d30ae11bf239a54ec9423.asciidoc @@ -0,0 +1,85 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "my-index-000001", + refresh: "true", + operations: [ + { + index: {}, + }, + { + "@timestamp": 1516729294000, + model_number: "QVKC92Q", + measures: { + voltage: "5.2", + start: "300", + end: "8675309", + }, + }, + { + index: {}, + }, + { + "@timestamp": 1516642894000, + model_number: "QVKC92Q", + measures: { + voltage: "5.8", + start: "300", + end: "8675309", + }, + }, + { + index: {}, + }, + { + "@timestamp": 1516556494000, + model_number: "QVKC92Q", + measures: { + voltage: "5.1", + start: "300", + end: "8675309", + }, + }, + { + index: {}, + }, + { + "@timestamp": 1516470094000, + model_number: "QVKC92Q", + measures: { + voltage: "5.6", + start: "300", + end: "8675309", + }, + }, + { + index: {}, + }, + { + "@timestamp": 1516383694000, + model_number: "HG537PU", + measures: { + voltage: "4.2", + start: "400", + end: "8625309", + }, + }, + { + index: {}, + }, + { + "@timestamp": 1516297294000, + model_number: "HG537PU", + measures: { + voltage: "4.0", + start: "400", + end: "8625309", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/2d60e3bdfee7afbddee149f40450b8b5.asciidoc b/docs/doc_examples/2d60e3bdfee7afbddee149f40450b8b5.asciidoc new file mode 100644 index 000000000..6beaa8b94 --- /dev/null +++ b/docs/doc_examples/2d60e3bdfee7afbddee149f40450b8b5.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.validateQuery({ + index: "my-index-000001", + query: { + query_string: { + query: "@timestamp:foo", + lenient: false, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2d8fcb03de417a71e7888bbdd948a692.asciidoc b/docs/doc_examples/2d8fcb03de417a71e7888bbdd948a692.asciidoc new file mode 100644 index 000000000..fbb1460ce --- /dev/null +++ b/docs/doc_examples/2d8fcb03de417a71e7888bbdd948a692.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.transforms({ + v: "true", + format: "json", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2d9b30acd6b5683f39d53494c0dd779c.asciidoc b/docs/doc_examples/2d9b30acd6b5683f39d53494c0dd779c.asciidoc new file mode 100644 index 000000000..df4dd09ce --- /dev/null +++ b/docs/doc_examples/2d9b30acd6b5683f39d53494c0dd779c.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.health(); +console.log(response); + +const response1 = await client.cat.recovery(); +console.log(response1); +---- diff --git a/docs/doc_examples/2dad2b0c8ba503228f4b11cecca0b348.asciidoc b/docs/doc_examples/2dad2b0c8ba503228f4b11cecca0b348.asciidoc new file mode 100644 index 000000000..48f3e7445 --- /dev/null +++ b/docs/doc_examples/2dad2b0c8ba503228f4b11cecca0b348.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putDataLifecycle({ + name: "dsl-data-stream", + data_retention: "7d", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2de6885bacb8769b8f22dce253c96b0c.asciidoc b/docs/doc_examples/2de6885bacb8769b8f22dce253c96b0c.asciidoc new file mode 100644 index 000000000..fdb1ef5bf --- /dev/null +++ b/docs/doc_examples/2de6885bacb8769b8f22dce253c96b0c.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + intervals: { + my_text: { + match: { + query: "hot porridge", + filter: { + script: { + source: + "interval.start > 10 && interval.end < 20 && interval.gaps == 0", + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2e09666d3ad5ad9afc22763ee6e97a2b.asciidoc b/docs/doc_examples/2e09666d3ad5ad9afc22763ee6e97a2b.asciidoc new file mode 100644 index 000000000..eb4f6acad --- /dev/null +++ b/docs/doc_examples/2e09666d3ad5ad9afc22763ee6e97a2b.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.slm.putLifecycle({ + policy_id: "hourly-snapshots", + schedule: "1h", + name: "", + repository: "my_repository", + config: { + indices: ["data-*", "important"], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2e364833626c9790c042c8f006fcc999.asciidoc b/docs/doc_examples/2e364833626c9790c042c8f006fcc999.asciidoc new file mode 100644 index 000000000..b791428b4 --- /dev/null +++ b/docs/doc_examples/2e364833626c9790c042c8f006fcc999.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "multiplexer_example", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "standard", + filter: ["my_multiplexer"], + }, + }, + filter: { + my_multiplexer: { + type: "multiplexer", + filters: ["lowercase", "lowercase, porter_stem"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2e36fe22051a47e052e349854d9948b9.asciidoc b/docs/doc_examples/2e36fe22051a47e052e349854d9948b9.asciidoc new file mode 100644 index 000000000..e29c93a48 --- /dev/null +++ b/docs/doc_examples/2e36fe22051a47e052e349854d9948b9.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.explain({ + index: "my-index-000001", + id: 0, + q: "message:search", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2e3d1b293da93f2a9ecfc26786ec28d6.asciidoc b/docs/doc_examples/2e3d1b293da93f2a9ecfc26786ec28d6.asciidoc new file mode 100644 index 000000000..b4e87a4e8 --- /dev/null +++ b/docs/doc_examples/2e3d1b293da93f2a9ecfc26786ec28d6.asciidoc @@ -0,0 +1,154 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "my-data-stream-template", + index_patterns: ["my-data-stream*"], + data_stream: {}, + template: { + settings: { + index: { + mode: "time_series", + routing_path: [ + "kubernetes.namespace", + "kubernetes.host", + "kubernetes.node", + "kubernetes.pod", + ], + number_of_replicas: 0, + number_of_shards: 2, + }, + }, + mappings: { + properties: { + "@timestamp": { + type: "date", + }, + kubernetes: { + properties: { + container: { + properties: { + cpu: { + properties: { + usage: { + properties: { + core: { + properties: { + ns: { + type: "long", + }, + }, + }, + limit: { + properties: { + pct: { + type: "float", + }, + }, + }, + nanocores: { + type: "long", + time_series_metric: "gauge", + }, + node: { + properties: { + pct: { + type: "float", + }, + }, + }, + }, + }, + }, + }, + memory: { + properties: { + available: { + properties: { + bytes: { + type: "long", + time_series_metric: "gauge", + }, + }, + }, + majorpagefaults: { + type: "long", + }, + pagefaults: { + type: "long", + time_series_metric: "gauge", + }, + rss: { + properties: { + bytes: { + type: "long", + time_series_metric: "gauge", + }, + }, + }, + usage: { + properties: { + bytes: { + type: "long", + time_series_metric: "gauge", + }, + limit: { + properties: { + pct: { + type: "float", + }, + }, + }, + node: { + properties: { + pct: { + type: "float", + }, + }, + }, + }, + }, + workingset: { + properties: { + bytes: { + type: "long", + time_series_metric: "gauge", + }, + }, + }, + }, + }, + name: { + type: "keyword", + }, + start_time: { + type: "date", + }, + }, + }, + host: { + type: "keyword", + time_series_dimension: true, + }, + namespace: { + type: "keyword", + time_series_dimension: true, + }, + node: { + type: "keyword", + time_series_dimension: true, + }, + pod: { + type: "keyword", + time_series_dimension: true, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2e7844477b41fcfa9efefee4ec0e7101.asciidoc b/docs/doc_examples/2e7844477b41fcfa9efefee4ec0e7101.asciidoc new file mode 100644 index 000000000..e19fb83da --- /dev/null +++ b/docs/doc_examples/2e7844477b41fcfa9efefee4ec0e7101.asciidoc @@ -0,0 +1,43 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + retriever: { + rule: { + match_criteria: { + query_string: "puggles", + user_country: "us", + }, + ruleset_ids: ["my-ruleset"], + retriever: { + rrf: { + retrievers: [ + { + standard: { + query: { + query_string: { + query: "pugs", + }, + }, + }, + }, + { + standard: { + query: { + query_string: { + query: "puggles", + }, + }, + }, + }, + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2e796e5ca59768d4426abbf9a049db3e.asciidoc b/docs/doc_examples/2e796e5ca59768d4426abbf9a049db3e.asciidoc new file mode 100644 index 000000000..ab5a2cc20 --- /dev/null +++ b/docs/doc_examples/2e796e5ca59768d4426abbf9a049db3e.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.split({ + index: "my_source_index", + target: "my_target_index", + settings: { + "index.number_of_shards": 2, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2e7f4b9be999422a12abb680572b13c8.asciidoc b/docs/doc_examples/2e7f4b9be999422a12abb680572b13c8.asciidoc new file mode 100644 index 000000000..c7cafa2e2 --- /dev/null +++ b/docs/doc_examples/2e7f4b9be999422a12abb680572b13c8.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.getLifecycle({ + name: "my_policy", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2e847378ba26aa64d40186b6e3e6a1da.asciidoc b/docs/doc_examples/2e847378ba26aa64d40186b6e3e6a1da.asciidoc new file mode 100644 index 000000000..4109e8fef --- /dev/null +++ b/docs/doc_examples/2e847378ba26aa64d40186b6e3e6a1da.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_index", + query: { + script_score: { + query: { + match_all: {}, + }, + script: { + source: + "field('my_counter').asBigInteger(BigInteger.ZERO).floatValue()", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2e93eaaebf75fa4a2451e8a76ffa9f20.asciidoc b/docs/doc_examples/2e93eaaebf75fa4a2451e8a76ffa9f20.asciidoc new file mode 100644 index 000000000..b75698f7e --- /dev/null +++ b/docs/doc_examples/2e93eaaebf75fa4a2451e8a76ffa9f20.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "my-data-stream-template", + index_patterns: ["my-data-stream*"], + data_stream: {}, + priority: 500, + template: { + mappings: { + properties: { + message: { + type: "text", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2ebcdd00ccbf26b4c8e6d9c80dfb3d55.asciidoc b/docs/doc_examples/2ebcdd00ccbf26b4c8e6d9c80dfb3d55.asciidoc new file mode 100644 index 000000000..6689c3417 --- /dev/null +++ b/docs/doc_examples/2ebcdd00ccbf26b4c8e6d9c80dfb3d55.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: { + type: "linestring", + coordinates: [ + [-377.03653, 389.897676], + [-377.009051, 389.889939], + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2ec8d757188349a4630e120ba2c98c3b.asciidoc b/docs/doc_examples/2ec8d757188349a4630e120ba2c98c3b.asciidoc new file mode 100644 index 000000000..a11d42c1f --- /dev/null +++ b/docs/doc_examples/2ec8d757188349a4630e120ba2c98c3b.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "whitespace", + filter: [ + { + type: "pattern_replace", + pattern: "(dog)", + replacement: "watch$1", + }, + ], + text: "foxes jump lazy dogs", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2ee002e60bd7a38d466e5f0eb0c38946.asciidoc b/docs/doc_examples/2ee002e60bd7a38d466e5f0eb0c38946.asciidoc new file mode 100644 index 000000000..26c954c49 --- /dev/null +++ b/docs/doc_examples/2ee002e60bd7a38d466e5f0eb0c38946.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.updateAliases({ + actions: [ + { + add: { + index: "my-index-2099.05.06-000001", + alias: "my-alias", + routing: "1", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/2ee239df3243c98418f7d9a5c7be4cfd.asciidoc b/docs/doc_examples/2ee239df3243c98418f7d9a5c7be4cfd.asciidoc new file mode 100644 index 000000000..1ac2d8ecb --- /dev/null +++ b/docs/doc_examples/2ee239df3243c98418f7d9a5c7be4cfd.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_custom_index_analyzer: { + type: "custom", + tokenizer: "standard", + filter: ["my_custom_word_delimiter_graph_filter", "flatten_graph"], + }, + }, + filter: { + my_custom_word_delimiter_graph_filter: { + type: "word_delimiter_graph", + catenate_all: true, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2eebaeb3983a04ef7a9201c1f4d40dc1.asciidoc b/docs/doc_examples/2eebaeb3983a04ef7a9201c1f4d40dc1.asciidoc new file mode 100644 index 000000000..380d5aa17 --- /dev/null +++ b/docs/doc_examples/2eebaeb3983a04ef7a9201c1f4d40dc1.asciidoc @@ -0,0 +1,68 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "my-index", + refresh: "true", + operations: [ + { + index: {}, + }, + { + timestamp: "2020-04-30T14:30:17-05:00", + message: + '40.135.0.0 - - [30/Apr/2020:14:30:17 -0500] "GET /images/hm_bg.jpg HTTP/1.0" 200 24736', + }, + { + index: {}, + }, + { + timestamp: "2020-04-30T14:30:53-05:00", + message: + '232.0.0.0 - - [30/Apr/2020:14:30:53 -0500] "GET /images/hm_bg.jpg HTTP/1.0" 200 24736', + }, + { + index: {}, + }, + { + timestamp: "2020-04-30T14:31:12-05:00", + message: + '26.1.0.0 - - [30/Apr/2020:14:31:12 -0500] "GET /images/hm_bg.jpg HTTP/1.0" 200 24736', + }, + { + index: {}, + }, + { + timestamp: "2020-04-30T14:31:19-05:00", + message: + '247.37.0.0 - - [30/Apr/2020:14:31:19 -0500] "GET /french/splash_inet.html HTTP/1.0" 200 3781', + }, + { + index: {}, + }, + { + timestamp: "2020-04-30T14:31:22-05:00", + message: + '247.37.0.0 - - [30/Apr/2020:14:31:22 -0500] "GET /images/hm_nbg.jpg HTTP/1.0" 304 0', + }, + { + index: {}, + }, + { + timestamp: "2020-04-30T14:31:27-05:00", + message: + '252.0.0.0 - - [30/Apr/2020:14:31:27 -0500] "GET /images/hm_bg.jpg HTTP/1.0" 200 24736', + }, + { + index: {}, + }, + { + timestamp: "2020-04-30T14:31:28-05:00", + message: "not a valid apache log", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/2f0b2181c434a879a23b4643bdd92575.asciidoc b/docs/doc_examples/2f0b2181c434a879a23b4643bdd92575.asciidoc new file mode 100644 index 000000000..a3d808bf4 --- /dev/null +++ b/docs/doc_examples/2f0b2181c434a879a23b4643bdd92575.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getSettings({ + index: "my-index-000001,my-index-000002", +}); +console.log(response); + +const response1 = await client.indices.getSettings({ + index: "_all", +}); +console.log(response1); + +const response2 = await client.indices.getSettings({ + index: "log_2099_*", +}); +console.log(response2); +---- diff --git a/docs/doc_examples/2f195eeb93229e40c4d8f1a6ab4a358c.asciidoc b/docs/doc_examples/2f195eeb93229e40c4d8f1a6ab4a358c.asciidoc new file mode 100644 index 000000000..dbc53e814 --- /dev/null +++ b/docs/doc_examples/2f195eeb93229e40c4d8f1a6ab4a358c.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.simulate({ + pipeline: { + processors: [ + { + fingerprint: { + fields: ["user"], + }, + }, + ], + }, + docs: [ + { + _source: { + user: { + last_name: "Smith", + first_name: "John", + date_of_birth: "1980-01-15", + is_active: true, + }, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/2f2580ea420e1836d922fe48fa8ada97.asciidoc b/docs/doc_examples/2f2580ea420e1836d922fe48fa8ada97.asciidoc new file mode 100644 index 000000000..3fa52a230 --- /dev/null +++ b/docs/doc_examples/2f2580ea420e1836d922fe48fa8ada97.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.deleteAutoFollowPattern({ + name: "", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2f2fd35905feef0b561c05d70c7064c1.asciidoc b/docs/doc_examples/2f2fd35905feef0b561c05d70c7064c1.asciidoc new file mode 100644 index 000000000..c37e742e2 --- /dev/null +++ b/docs/doc_examples/2f2fd35905feef0b561c05d70c7064c1.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getMapping({ + index: "my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2f4a55dfeba8851b306ef9c1b216ef54.asciidoc b/docs/doc_examples/2f4a55dfeba8851b306ef9c1b216ef54.asciidoc new file mode 100644 index 000000000..f5fb24488 --- /dev/null +++ b/docs/doc_examples/2f4a55dfeba8851b306ef9c1b216ef54.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "bug_reports", + query: { + term: { + "labels.release": "v1.3.0", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2f4e28c81db47547ad39d0926babab12.asciidoc b/docs/doc_examples/2f4e28c81db47547ad39d0926babab12.asciidoc new file mode 100644 index 000000000..6313ca3ea --- /dev/null +++ b/docs/doc_examples/2f4e28c81db47547ad39d0926babab12.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "estonian_example", + settings: { + analysis: { + filter: { + estonian_stop: { + type: "stop", + stopwords: "_estonian_", + }, + estonian_keywords: { + type: "keyword_marker", + keywords: ["näide"], + }, + estonian_stemmer: { + type: "stemmer", + language: "estonian", + }, + }, + analyzer: { + rebuilt_estonian: { + tokenizer: "standard", + filter: [ + "lowercase", + "estonian_stop", + "estonian_keywords", + "estonian_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2f72a63c73dd672ac2dc3997ad15dd41.asciidoc b/docs/doc_examples/2f72a63c73dd672ac2dc3997ad15dd41.asciidoc new file mode 100644 index 000000000..e1edb8658 --- /dev/null +++ b/docs/doc_examples/2f72a63c73dd672ac2dc3997ad15dd41.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "test-index", + mappings: { + properties: { + source_field: { + type: "text", + fields: { + infer_field: { + type: "semantic_text", + inference_id: ".elser-2-elasticsearch", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2f9574fee2ebecd6f7d917ee99b26bcc.asciidoc b/docs/doc_examples/2f9574fee2ebecd6f7d917ee99b26bcc.asciidoc new file mode 100644 index 000000000..e2201ac00 --- /dev/null +++ b/docs/doc_examples/2f9574fee2ebecd6f7d917ee99b26bcc.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + status_code: { + type: "keyword", + }, + session_id: { + type: "keyword", + doc_values: false, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2f98924c3d593ea2b60edb9cef5bee22.asciidoc b/docs/doc_examples/2f98924c3d593ea2b60edb9cef5bee22.asciidoc new file mode 100644 index 000000000..79d2f713b --- /dev/null +++ b/docs/doc_examples/2f98924c3d593ea2b60edb9cef5bee22.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.forcemerge({ + index: "my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2f9ee29fe49f7d206a41212aa5945296.asciidoc b/docs/doc_examples/2f9ee29fe49f7d206a41212aa5945296.asciidoc new file mode 100644 index 000000000..8f98c98a9 --- /dev/null +++ b/docs/doc_examples/2f9ee29fe49f7d206a41212aa5945296.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.createFrom({ + source: "my-index", + dest: "my-new-index", + create_from: { + settings_override: { + index: { + "blocks.write": null, + "blocks.read": null, + "blocks.read_only": null, + "blocks.read_only_allow_delete": null, + "blocks.metadata": null, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2fa45d74ba9933188c4728f8a9e5372c.asciidoc b/docs/doc_examples/2fa45d74ba9933188c4728f8a9e5372c.asciidoc new file mode 100644 index 000000000..c9f316a8c --- /dev/null +++ b/docs/doc_examples/2fa45d74ba9933188c4728f8a9e5372c.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "action.auto_create_index": "my-index-000001,index10,-index1*,+ind*", + }, +}); +console.log(response); + +const response1 = await client.cluster.putSettings({ + persistent: { + "action.auto_create_index": "false", + }, +}); +console.log(response1); + +const response2 = await client.cluster.putSettings({ + persistent: { + "action.auto_create_index": "true", + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/2fa7ded8515b32f26c54394ea598f573.asciidoc b/docs/doc_examples/2fa7ded8515b32f26c54394ea598f573.asciidoc new file mode 100644 index 000000000..de58c2dc4 --- /dev/null +++ b/docs/doc_examples/2fa7ded8515b32f26c54394ea598f573.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "template_1", + index_patterns: ["te*", "bar*"], + template: { + settings: { + number_of_shards: 1, + }, + mappings: { + _source: { + enabled: true, + }, + properties: { + host_name: { + type: "keyword", + }, + created_at: { + type: "date", + format: "EEE MMM dd HH:mm:ss Z yyyy", + }, + }, + }, + aliases: { + mydata: {}, + }, + }, + priority: 500, + composed_of: ["component_template1", "runtime_component_template"], + version: 3, + _meta: { + description: "my custom", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2fc2c790a85be29bbcba50bdde1493f4.asciidoc b/docs/doc_examples/2fc2c790a85be29bbcba50bdde1493f4.asciidoc new file mode 100644 index 000000000..e46f4517b --- /dev/null +++ b/docs/doc_examples/2fc2c790a85be29bbcba50bdde1493f4.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.get({ + repository: "my_repository", + snapshot: "my_snapshot_2099.05.06", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2fc80a2ad1ca8b2dcb13ed1895b8e861.asciidoc b/docs/doc_examples/2fc80a2ad1ca8b2dcb13ed1895b8e861.asciidoc new file mode 100644 index 000000000..9235b4fab --- /dev/null +++ b/docs/doc_examples/2fc80a2ad1ca8b2dcb13ed1895b8e861.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + transient: { + "indices.recovery.*": null, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2fd0b3c132b46aa34cc9d92dd2d4bc85.asciidoc b/docs/doc_examples/2fd0b3c132b46aa34cc9d92dd2d4bc85.asciidoc new file mode 100644 index 000000000..b952b6ef8 --- /dev/null +++ b/docs/doc_examples/2fd0b3c132b46aa34cc9d92dd2d4bc85.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "whitespace", + filter: [ + { + type: "common_grams", + common_words: ["is", "the"], + }, + ], + text: "the quick fox is brown", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2fe28d9a91b3081a9ec4601af8fb7b1c.asciidoc b/docs/doc_examples/2fe28d9a91b3081a9ec4601af8fb7b1c.asciidoc new file mode 100644 index 000000000..b0e232648 --- /dev/null +++ b/docs/doc_examples/2fe28d9a91b3081a9ec4601af8fb7b1c.asciidoc @@ -0,0 +1,52 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "test", + mappings: { + dynamic: false, + properties: { + text: { + type: "text", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "test", + refresh: "true", + document: { + text: "words words", + flag: "bar", + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "test", + refresh: "true", + document: { + text: "words words", + flag: "foo", + }, +}); +console.log(response2); + +const response3 = await client.indices.putMapping({ + index: "test", + properties: { + text: { + type: "text", + }, + flag: { + type: "text", + analyzer: "keyword", + }, + }, +}); +console.log(response3); +---- diff --git a/docs/doc_examples/2fea3e324939cc7e9c396964aeee7111.asciidoc b/docs/doc_examples/2fea3e324939cc7e9c396964aeee7111.asciidoc new file mode 100644 index 000000000..f92d041ea --- /dev/null +++ b/docs/doc_examples/2fea3e324939cc7e9c396964aeee7111.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match: { + message: { + query: "to be or not to be", + operator: "and", + zero_terms_query: "all", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/2fee452baff92b409cbfc8d71eb5fc0e.asciidoc b/docs/doc_examples/2fee452baff92b409cbfc8d71eb5fc0e.asciidoc new file mode 100644 index 000000000..83ce4807d --- /dev/null +++ b/docs/doc_examples/2fee452baff92b409cbfc8d71eb5fc0e.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.nodes({ + v: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/2ffa953b29ed0156c9e610daf66b8e48.asciidoc b/docs/doc_examples/2ffa953b29ed0156c9e610daf66b8e48.asciidoc new file mode 100644 index 000000000..2f048f0a9 --- /dev/null +++ b/docs/doc_examples/2ffa953b29ed0156c9e610daf66b8e48.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.explainLifecycle({ + index: "timeseries-*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/300576666769b78fa6fa26b232837f81.asciidoc b/docs/doc_examples/300576666769b78fa6fa26b232837f81.asciidoc new file mode 100644 index 000000000..1cd460618 --- /dev/null +++ b/docs/doc_examples/300576666769b78fa6fa26b232837f81.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.autoscaling.getAutoscalingCapacity(); +console.log(response); +---- diff --git a/docs/doc_examples/305c4cfb2ad4b58b4c319ffbf32336cc.asciidoc b/docs/doc_examples/305c4cfb2ad4b58b4c319ffbf32336cc.asciidoc new file mode 100644 index 000000000..ab2ae2bdc --- /dev/null +++ b/docs/doc_examples/305c4cfb2ad4b58b4c319ffbf32336cc.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + script_fields: { + my_doubled_field: { + script: { + lang: "painless", + source: "doc['my_field'].value * params.get('multiplier');", + params: { + multiplier: 2, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3082ae0c3ecdc61808103214631b40c6.asciidoc b/docs/doc_examples/3082ae0c3ecdc61808103214631b40c6.asciidoc new file mode 100644 index 000000000..22c1eb540 --- /dev/null +++ b/docs/doc_examples/3082ae0c3ecdc61808103214631b40c6.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + sales_per_month: { + date_histogram: { + field: "date", + calendar_interval: "month", + }, + aggs: { + sales: { + sum: { + field: "price", + }, + }, + }, + }, + avg_monthly_sales: { + avg_bucket: { + buckets_path: "sales_per_month>sales", + gap_policy: "skip", + format: "#,##0.00;(#,##0.00)", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/309f0721145b5c656338a02459c3ff1e.asciidoc b/docs/doc_examples/309f0721145b5c656338a02459c3ff1e.asciidoc new file mode 100644 index 000000000..1c5d0dca3 --- /dev/null +++ b/docs/doc_examples/309f0721145b5c656338a02459c3ff1e.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "test", + query: { + rank_feature: { + field: "pagerank", + saturation: { + pivot: 8, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/30abc76a39e551f4b52c65002bb6405d.asciidoc b/docs/doc_examples/30abc76a39e551f4b52c65002bb6405d.asciidoc new file mode 100644 index 000000000..0b8fe7b56 --- /dev/null +++ b/docs/doc_examples/30abc76a39e551f4b52c65002bb6405d.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getApiKey({ + username: "myuser", + realm_name: "native1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/30bd3c0785f3df4795684754adeb5ecb.asciidoc b/docs/doc_examples/30bd3c0785f3df4795684754adeb5ecb.asciidoc new file mode 100644 index 000000000..96086e079 --- /dev/null +++ b/docs/doc_examples/30bd3c0785f3df4795684754adeb5ecb.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.renderSearchTemplate({ + source: { + query: { + match: { + message: "{{query_string}}", + }, + }, + from: "{{from}}", + size: "{{size}}", + }, + params: { + query_string: "hello world", + from: 20, + size: 10, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/30d051f534aeb884176eedb2c11dac85.asciidoc b/docs/doc_examples/30d051f534aeb884176eedb2c11dac85.asciidoc new file mode 100644 index 000000000..0ea31b07a --- /dev/null +++ b/docs/doc_examples/30d051f534aeb884176eedb2c11dac85.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "rerank", + inference_id: "my-elastic-rerank", + inference_config: { + service: "elasticsearch", + service_settings: { + model_id: ".rerank-v1", + num_threads: 1, + adaptive_allocations: { + enabled: true, + min_number_of_allocations: 1, + max_number_of_allocations: 4, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/30db2702dd0071c72a090b8311d0db09.asciidoc b/docs/doc_examples/30db2702dd0071c72a090b8311d0db09.asciidoc new file mode 100644 index 000000000..153beae37 --- /dev/null +++ b/docs/doc_examples/30db2702dd0071c72a090b8311d0db09.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + query: { + match: { + body: "elections", + }, + }, + aggs: { + top_sites: { + terms: { + field: "domain", + order: { + top_hit: "desc", + }, + }, + aggs: { + top_tags_hits: { + top_hits: {}, + }, + top_hit: { + max: { + script: { + source: "_score", + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/30f3e3b9df46afd12e68bc71f18483b4.asciidoc b/docs/doc_examples/30f3e3b9df46afd12e68bc71f18483b4.asciidoc new file mode 100644 index 000000000..12da5e39e --- /dev/null +++ b/docs/doc_examples/30f3e3b9df46afd12e68bc71f18483b4.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", +}); +console.log(response); + +const response1 = await client.indices.create({ + index: "my-index-000002", +}); +console.log(response1); + +const response2 = await client.indices.putMapping({ + index: "my-index-000001,my-index-000002", + properties: { + user: { + properties: { + name: { + type: "keyword", + }, + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/30fa37c9575fe81a0ea7c12cfc08e277.asciidoc b/docs/doc_examples/30fa37c9575fe81a0ea7c12cfc08e277.asciidoc new file mode 100644 index 000000000..04f772fe1 --- /dev/null +++ b/docs/doc_examples/30fa37c9575fe81a0ea7c12cfc08e277.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "bad_example_index", + mappings: { + properties: { + field_1: { + type: "text", + copy_to: "field_2", + }, + field_2: { + type: "text", + copy_to: "field_3", + }, + field_3: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/310bdfb0d0d75bac7bff036a3fe51d4d.asciidoc b/docs/doc_examples/310bdfb0d0d75bac7bff036a3fe51d4d.asciidoc new file mode 100644 index 000000000..34cfe5d2f --- /dev/null +++ b/docs/doc_examples/310bdfb0d0d75bac7bff036a3fe51d4d.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "azure_ai_studio_embeddings_pipeline", + processors: [ + { + inference: { + model_id: "azure_ai_studio_embeddings", + input_output: { + input_field: "content", + output_field: "content_embedding", + }, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/311c4b632a29b9ead63b02d01f10096b.asciidoc b/docs/doc_examples/311c4b632a29b9ead63b02d01f10096b.asciidoc deleted file mode 100644 index cecf117ec..000000000 --- a/docs/doc_examples/311c4b632a29b9ead63b02d01f10096b.asciidoc +++ /dev/null @@ -1,15 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.index({ - index: 'customer', - id: '1', - body: { - name: 'John Doe' - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/3166455372f2d96622caff076e91ebe7.asciidoc b/docs/doc_examples/3166455372f2d96622caff076e91ebe7.asciidoc new file mode 100644 index 000000000..01c9127eb --- /dev/null +++ b/docs/doc_examples/3166455372f2d96622caff076e91ebe7.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + percolate: { + field: "query", + index: "my-index-000001", + id: "2", + version: 1, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/316cd43feb3b86396483903af1a048b1.asciidoc b/docs/doc_examples/316cd43feb3b86396483903af1a048b1.asciidoc new file mode 100644 index 000000000..524e00979 --- /dev/null +++ b/docs/doc_examples/316cd43feb3b86396483903af1a048b1.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sale_date: { + date_histogram: { + field: "date", + calendar_interval: "year", + missing: "2000/01/01", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3182f26c61fbe5cf89400804533d5ed2.asciidoc b/docs/doc_examples/3182f26c61fbe5cf89400804533d5ed2.asciidoc new file mode 100644 index 000000000..321f477e4 --- /dev/null +++ b/docs/doc_examples/3182f26c61fbe5cf89400804533d5ed2.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.renderSearchTemplate({ + id: "my-search-template", + params: { + query_string: "My string", + text_fields: [ + { + user_name: "John", + }, + { + user_name: "kimchy", + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/31832bd71c31c46a1ccf8d1c210d89d4.asciidoc b/docs/doc_examples/31832bd71c31c46a1ccf8d1c210d89d4.asciidoc new file mode 100644 index 000000000..e61007e9c --- /dev/null +++ b/docs/doc_examples/31832bd71c31c46a1ccf8d1c210d89d4.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-*", + query: { + bool: { + must: [ + { + match: { + "user.id": "kimchy", + }, + }, + ], + must_not: [ + { + terms: { + _index: ["my-index-01"], + }, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/318e209cc4d6f306e65cb2f5598a50b1.asciidoc b/docs/doc_examples/318e209cc4d6f306e65cb2f5598a50b1.asciidoc new file mode 100644 index 000000000..0e2f4d81e --- /dev/null +++ b/docs/doc_examples/318e209cc4d6f306e65cb2f5598a50b1.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: { + type: "LineString", + coordinates: [ + [-77.03653, 38.897676], + [-77.009051, 38.889939], + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/31a79a57b242713edec6795599ba0d5d.asciidoc b/docs/doc_examples/31a79a57b242713edec6795599ba0d5d.asciidoc new file mode 100644 index 000000000..7d8fdd9e4 --- /dev/null +++ b/docs/doc_examples/31a79a57b242713edec6795599ba0d5d.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index", + mappings: { + properties: { + my_tokens: { + type: "sparse_vector", + }, + my_text_field: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/31ab4ec26176857280af630bf84a2823.asciidoc b/docs/doc_examples/31ab4ec26176857280af630bf84a2823.asciidoc new file mode 100644 index 000000000..77c3e3ee3 --- /dev/null +++ b/docs/doc_examples/31ab4ec26176857280af630bf84a2823.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.samlServiceProviderMetadata({ + realm_name: "saml1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/31ac1b68dc7c26a1d37350be47ae9381.asciidoc b/docs/doc_examples/31ac1b68dc7c26a1d37350be47ae9381.asciidoc new file mode 100644 index 000000000..cf057e9c6 --- /dev/null +++ b/docs/doc_examples/31ac1b68dc7c26a1d37350be47ae9381.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "music", + mappings: { + properties: { + suggest: { + type: "completion", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/31aed390c30bd4f42a5c56253695e53f.asciidoc b/docs/doc_examples/31aed390c30bd4f42a5c56253695e53f.asciidoc new file mode 100644 index 000000000..4d03f9146 --- /dev/null +++ b/docs/doc_examples/31aed390c30bd4f42a5c56253695e53f.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "whitespace_example", + settings: { + analysis: { + analyzer: { + rebuilt_whitespace: { + tokenizer: "whitespace", + filter: [], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/31f4400716500149cccbc19aa06bff66.asciidoc b/docs/doc_examples/31f4400716500149cccbc19aa06bff66.asciidoc new file mode 100644 index 000000000..4cd289777 --- /dev/null +++ b/docs/doc_examples/31f4400716500149cccbc19aa06bff66.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.danglingIndices.deleteDanglingIndex({ + index_uuid: "", + accept_data_loss: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/320645d771e952af2a67bb7445c3688d.asciidoc b/docs/doc_examples/320645d771e952af2a67bb7445c3688d.asciidoc new file mode 100644 index 000000000..3dbe450ee --- /dev/null +++ b/docs/doc_examples/320645d771e952af2a67bb7445c3688d.asciidoc @@ -0,0 +1,41 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "sorani_example", + settings: { + analysis: { + filter: { + sorani_stop: { + type: "stop", + stopwords: "_sorani_", + }, + sorani_keywords: { + type: "keyword_marker", + keywords: ["mînak"], + }, + sorani_stemmer: { + type: "stemmer", + language: "sorani", + }, + }, + analyzer: { + rebuilt_sorani: { + tokenizer: "standard", + filter: [ + "sorani_normalization", + "lowercase", + "decimal_digit", + "sorani_stop", + "sorani_keywords", + "sorani_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/32123981430e5a8b34fe14314fc48429.asciidoc b/docs/doc_examples/32123981430e5a8b34fe14314fc48429.asciidoc new file mode 100644 index 000000000..0e3687fe2 --- /dev/null +++ b/docs/doc_examples/32123981430e5a8b34fe14314fc48429.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001,my-index-000002", + query: { + match: { + "user.id": "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3218f8ccd59c8c90349816e0428e8fb8.asciidoc b/docs/doc_examples/3218f8ccd59c8c90349816e0428e8fb8.asciidoc new file mode 100644 index 000000000..2725863d8 --- /dev/null +++ b/docs/doc_examples/3218f8ccd59c8c90349816e0428e8fb8.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.clearCache({ + fielddata: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/3250a8d2d2a9619035040e55a03620b9.asciidoc b/docs/doc_examples/3250a8d2d2a9619035040e55a03620b9.asciidoc new file mode 100644 index 000000000..a90140c98 --- /dev/null +++ b/docs/doc_examples/3250a8d2d2a9619035040e55a03620b9.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "logger.org.elasticsearch.http.HttpTracer": "TRACE", + "logger.org.elasticsearch.http.HttpBodyTracer": "TRACE", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/327466380bcd55361973b4a96c6dccb2.asciidoc b/docs/doc_examples/327466380bcd55361973b4a96c6dccb2.asciidoc new file mode 100644 index 000000000..b8aa8a2e9 --- /dev/null +++ b/docs/doc_examples/327466380bcd55361973b4a96c6dccb2.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "spanish_example", + settings: { + analysis: { + filter: { + spanish_stop: { + type: "stop", + stopwords: "_spanish_", + }, + spanish_keywords: { + type: "keyword_marker", + keywords: ["ejemplo"], + }, + spanish_stemmer: { + type: "stemmer", + language: "light_spanish", + }, + }, + analyzer: { + rebuilt_spanish: { + tokenizer: "standard", + filter: [ + "lowercase", + "spanish_stop", + "spanish_keywords", + "spanish_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/32a7acdfb7046966b28f394476c99126.asciidoc b/docs/doc_examples/32a7acdfb7046966b28f394476c99126.asciidoc new file mode 100644 index 000000000..faffd3f29 --- /dev/null +++ b/docs/doc_examples/32a7acdfb7046966b28f394476c99126.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: "POINT (-377.03653 389.897676)", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/32af23a4b0fea6c81c4688ce5fe4ac35.asciidoc b/docs/doc_examples/32af23a4b0fea6c81c4688ce5fe4ac35.asciidoc new file mode 100644 index 000000000..d304a1bdc --- /dev/null +++ b/docs/doc_examples/32af23a4b0fea6c81c4688ce5fe4ac35.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "latency", + size: 0, + aggs: { + load_time_ranks: { + percentile_ranks: { + field: "load_time", + values: [500, 600], + hdr: { + number_of_significant_value_digits: 3, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/32b7963c5cabbe9cc7d15da62f5edda9.asciidoc b/docs/doc_examples/32b7963c5cabbe9cc7d15da62f5edda9.asciidoc new file mode 100644 index 000000000..b0fd0f6e4 --- /dev/null +++ b/docs/doc_examples/32b7963c5cabbe9cc7d15da62f5edda9.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.updateUserProfileData({ + uid: "u_P_0BMHgaOK3p7k-PFWUCbw9dQ-UFjt01oWJ_Dp2PmPc_0", + labels: { + direction: "west", + }, + data: { + app1: { + font: "large", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/32b8a5152b47930f2e16c40c8615c7bb.asciidoc b/docs/doc_examples/32b8a5152b47930f2e16c40c8615c7bb.asciidoc new file mode 100644 index 000000000..86a4d3e28 --- /dev/null +++ b/docs/doc_examples/32b8a5152b47930f2e16c40c8615c7bb.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.put({ + name: "my-example-app", + search_application: { + indices: ["example-index"], + template: { + script: { + lang: "mustache", + source: + '\n {\n "query": {\n "bool": {\n "must": [\n {{#query}}\n {\n "multi_match" : {\n "query": "{{query}}",\n "fields": [ "title^4", "plot", "actors", "directors" ]\n }\n },\n {\n "multi_match" : {\n "query": "{{query}}",\n "type": "phrase_prefix",\n "fields": [ "title^4", "plot"]\n }\n },\n {{/query}}\n ],\n "filter": {{#toJson}}_es_filters{{/toJson}}\n }\n },\n "aggs": {{#toJson}}_es_aggs{{/toJson}},\n "from": {{from}},\n "size": {{size}},\n "sort": {{#toJson}}_es_sort_fields{{/toJson}}\n }\n ', + params: { + query: "", + _es_filters: {}, + _es_aggs: {}, + _es_sort_fields: {}, + size: 10, + from: 0, + }, + dictionary: {}, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/32c8c86702ccd68eb70f1573409c2a1f.asciidoc b/docs/doc_examples/32c8c86702ccd68eb70f1573409c2a1f.asciidoc new file mode 100644 index 000000000..dcc8ff429 --- /dev/null +++ b/docs/doc_examples/32c8c86702ccd68eb70f1573409c2a1f.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + hot: { + actions: { + rollover: { + max_primary_shard_size: "50gb", + }, + searchable_snapshot: { + snapshot_repository: "backing_repo", + replicate_for: "14d", + }, + }, + }, + delete: { + min_age: "28d", + actions: { + delete: {}, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/32cd57666bc80b8cf793d06fa1086669.asciidoc b/docs/doc_examples/32cd57666bc80b8cf793d06fa1086669.asciidoc new file mode 100644 index 000000000..91db8be03 --- /dev/null +++ b/docs/doc_examples/32cd57666bc80b8cf793d06fa1086669.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.query({ + format: "tsv", + query: "SELECT * FROM library ORDER BY page_count DESC", + fetch_size: 5, +}); +console.log(response); +---- diff --git a/docs/doc_examples/32ce26b8af95f7ccc2a7bd5e77a39d6c.asciidoc b/docs/doc_examples/32ce26b8af95f7ccc2a7bd5e77a39d6c.asciidoc new file mode 100644 index 000000000..948af1175 --- /dev/null +++ b/docs/doc_examples/32ce26b8af95f7ccc2a7bd5e77a39d6c.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.recovery({ + index: "my-index", +}); +console.log(response); +---- diff --git a/docs/doc_examples/32de5dd306bd014d67053d2f175defcd.asciidoc b/docs/doc_examples/32de5dd306bd014d67053d2f175defcd.asciidoc new file mode 100644 index 000000000..0e47d6710 --- /dev/null +++ b/docs/doc_examples/32de5dd306bd014d67053d2f175defcd.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "logger.org.elasticsearch.xpack.security.authc.saml": "debug", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3312c82f81816bf76629db9582991812.asciidoc b/docs/doc_examples/3312c82f81816bf76629db9582991812.asciidoc new file mode 100644 index 000000000..f1ba5e168 --- /dev/null +++ b/docs/doc_examples/3312c82f81816bf76629db9582991812.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "my-index-000001", + settings: { + "index.search.slowlog.threshold.query.warn": "10s", + "index.search.slowlog.threshold.query.info": "5s", + "index.search.slowlog.threshold.query.debug": "2s", + "index.search.slowlog.threshold.query.trace": "500ms", + "index.search.slowlog.threshold.fetch.warn": "1s", + "index.search.slowlog.threshold.fetch.info": "800ms", + "index.search.slowlog.threshold.fetch.debug": "500ms", + "index.search.slowlog.threshold.fetch.trace": "200ms", + "index.search.slowlog.include.user": true, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/331caebf810a923644eb6de26e5a97f4.asciidoc b/docs/doc_examples/331caebf810a923644eb6de26e5a97f4.asciidoc new file mode 100644 index 000000000..b98c10d2e --- /dev/null +++ b/docs/doc_examples/331caebf810a923644eb6de26e5a97f4.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + my_join_field: { + type: "join", + relations: { + question: ["answer", "comment"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3337c817ebd438254505a31e91c91724.asciidoc b/docs/doc_examples/3337c817ebd438254505a31e91c91724.asciidoc new file mode 100644 index 000000000..cdd215727 --- /dev/null +++ b/docs/doc_examples/3337c817ebd438254505a31e91c91724.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getDataStream({ + name: "my-data-stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/3341d3bbb53052447a37c92a04c14b70.asciidoc b/docs/doc_examples/3341d3bbb53052447a37c92a04c14b70.asciidoc new file mode 100644 index 000000000..dac17132e --- /dev/null +++ b/docs/doc_examples/3341d3bbb53052447a37c92a04c14b70.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.update({ + index: "my-index-000001", + id: 1, + script: "ctx._source.new_field = 'value_of_new_field'", +}); +console.log(response); +---- diff --git a/docs/doc_examples/3342c69b2c2303247217532956fcce85.asciidoc b/docs/doc_examples/3342c69b2c2303247217532956fcce85.asciidoc deleted file mode 100644 index 4b9941a85..000000000 --- a/docs/doc_examples/3342c69b2c2303247217532956fcce85.asciidoc +++ /dev/null @@ -1,17 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - exists: { - field: 'user' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/3343a4cf559060c422d86c786a95e535.asciidoc b/docs/doc_examples/3343a4cf559060c422d86c786a95e535.asciidoc new file mode 100644 index 000000000..7f260292c --- /dev/null +++ b/docs/doc_examples/3343a4cf559060c422d86c786a95e535.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: ["apostrophe"], + text: "Istanbul'a veya Istanbul'dan", +}); +console.log(response); +---- diff --git a/docs/doc_examples/334811cfceb6858aeec5b3461717dd63.asciidoc b/docs/doc_examples/334811cfceb6858aeec5b3461717dd63.asciidoc new file mode 100644 index 000000000..1e3f14f12 --- /dev/null +++ b/docs/doc_examples/334811cfceb6858aeec5b3461717dd63.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "geoip", + description: "Add ip geolocation info", + processors: [ + { + geoip: { + field: "ip", + }, + }, + ], +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: "my_id", + pipeline: "geoip", + document: { + ip: "80.231.5.0", + }, +}); +console.log(response1); + +const response2 = await client.get({ + index: "my-index-000001", + id: "my_id", +}); +console.log(response2); +---- diff --git a/docs/doc_examples/33610800d9de3c3e6d6b3c611ace7330.asciidoc b/docs/doc_examples/33610800d9de3c3e6d6b3c611ace7330.asciidoc new file mode 100644 index 000000000..9c31f8a68 --- /dev/null +++ b/docs/doc_examples/33610800d9de3c3e6d6b3c611ace7330.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.tasks.get({ + task_id: "oTUltX4IQMOUUVeiohTt8A:124", +}); +console.log(response); +---- diff --git a/docs/doc_examples/336613f48dd95ea993dd3bcce264fd0e.asciidoc b/docs/doc_examples/336613f48dd95ea993dd3bcce264fd0e.asciidoc new file mode 100644 index 000000000..fe4d11b4c --- /dev/null +++ b/docs/doc_examples/336613f48dd95ea993dd3bcce264fd0e.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + cold: { + actions: { + allocate: { + require: { + box_type: "cold", + storage: "high", + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/33732208fc6e6fe1e8d278299681932e.asciidoc b/docs/doc_examples/33732208fc6e6fe1e8d278299681932e.asciidoc new file mode 100644 index 000000000..f864a2ef1 --- /dev/null +++ b/docs/doc_examples/33732208fc6e6fe1e8d278299681932e.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: "LINESTRING (-377.03653 389.897676, -377.009051 389.889939)", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3386fe07e90844dbcdbbe7c07f09e04a.asciidoc b/docs/doc_examples/3386fe07e90844dbcdbbe7c07f09e04a.asciidoc new file mode 100644 index 000000000..03dd504ab --- /dev/null +++ b/docs/doc_examples/3386fe07e90844dbcdbbe7c07f09e04a.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.synonyms.deleteSynonym({ + id: "my-synonyms-set", +}); +console.log(response); +---- diff --git a/docs/doc_examples/339c4e5af9f9069ad9912aa574488b59.asciidoc b/docs/doc_examples/339c4e5af9f9069ad9912aa574488b59.asciidoc new file mode 100644 index 000000000..905e21821 --- /dev/null +++ b/docs/doc_examples/339c4e5af9f9069ad9912aa574488b59.asciidoc @@ -0,0 +1,52 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-bit-vectors", + mappings: { + properties: { + my_dense_vector: { + type: "dense_vector", + index: false, + element_type: "bit", + dims: 40, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-bit-vectors", + id: 1, + document: { + my_dense_vector: [8, 5, -15, 1, -7], + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "my-index-bit-vectors", + id: 2, + document: { + my_dense_vector: [-1, 115, -3, 4, -128], + }, +}); +console.log(response2); + +const response3 = await client.index({ + index: "my-index-bit-vectors", + id: 3, + document: { + my_dense_vector: [2, 18, -5, 0, -124], + }, +}); +console.log(response3); + +const response4 = await client.indices.refresh({ + index: "my-index-bit-vectors", +}); +console.log(response4); +---- diff --git a/docs/doc_examples/33b732bb301e99d2161bd2246494f487.asciidoc b/docs/doc_examples/33b732bb301e99d2161bd2246494f487.asciidoc new file mode 100644 index 000000000..6a2a72cc9 --- /dev/null +++ b/docs/doc_examples/33b732bb301e99d2161bd2246494f487.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "postal_lookup", + processors: [ + { + enrich: { + description: "Add 'geo_data' based on 'geo_location'", + policy_name: "postal_policy", + field: "geo_location", + target_field: "geo_data", + shape_relation: "INTERSECTS", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/33d480fc6812ada75756cf5337bc9092.asciidoc b/docs/doc_examples/33d480fc6812ada75756cf5337bc9092.asciidoc new file mode 100644 index 000000000..88753c964 --- /dev/null +++ b/docs/doc_examples/33d480fc6812ada75756cf5337bc9092.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "GET", + path: "/_connector/_sync_job", + querystring: { + from: "0", + size: "2", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/33f148e3d8676de6cc52f58749898a13.asciidoc b/docs/doc_examples/33f148e3d8676de6cc52f58749898a13.asciidoc index 35ca65dd2..75a1ad5bb 100644 --- a/docs/doc_examples/33f148e3d8676de6cc52f58749898a13.asciidoc +++ b/docs/doc_examples/33f148e3d8676de6cc52f58749898a13.asciidoc @@ -4,25 +4,22 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - dis_max: { - queries: [ - { - match_phrase_prefix: { - subject: 'quick brown f' - } + query: { + dis_max: { + queries: [ + { + match_phrase_prefix: { + subject: "quick brown f", }, - { - match_phrase_prefix: { - message: 'quick brown f' - } - } - ] - } - } - } -}) -console.log(response) + }, + { + match_phrase_prefix: { + message: "quick brown f", + }, + }, + ], + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/342ddf9121aeddd82fea2464665e25da.asciidoc b/docs/doc_examples/342ddf9121aeddd82fea2464665e25da.asciidoc new file mode 100644 index 000000000..0b2b04c09 --- /dev/null +++ b/docs/doc_examples/342ddf9121aeddd82fea2464665e25da.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.connector.put({ + connector_id: "my-connector", + index_name: "search-google-drive", + name: "My Connector", + service_type: "google_drive", +}); +console.log(response); +---- diff --git a/docs/doc_examples/343dd09a8c76987e586858be3bdc51eb.asciidoc b/docs/doc_examples/343dd09a8c76987e586858be3bdc51eb.asciidoc new file mode 100644 index 000000000..816461ead --- /dev/null +++ b/docs/doc_examples/343dd09a8c76987e586858be3bdc51eb.asciidoc @@ -0,0 +1,50 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my_queries2", + settings: { + analysis: { + analyzer: { + wildcard_suffix: { + type: "custom", + tokenizer: "standard", + filter: ["lowercase", "reverse", "wildcard_edge_ngram"], + }, + wildcard_suffix_search_time: { + type: "custom", + tokenizer: "standard", + filter: ["lowercase", "reverse"], + }, + }, + filter: { + wildcard_edge_ngram: { + type: "edge_ngram", + min_gram: 1, + max_gram: 32, + }, + }, + }, + }, + mappings: { + properties: { + query: { + type: "percolator", + }, + my_field: { + type: "text", + fields: { + suffix: { + type: "text", + analyzer: "wildcard_suffix", + search_analyzer: "wildcard_suffix_search_time", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/344b4144244d57f87c6aa4652b100b25.asciidoc b/docs/doc_examples/344b4144244d57f87c6aa4652b100b25.asciidoc new file mode 100644 index 000000000..04b448afc --- /dev/null +++ b/docs/doc_examples/344b4144244d57f87c6aa4652b100b25.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 2, + document: { + color: "blue", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/346f28d82acb5427c304aa574fea0008.asciidoc b/docs/doc_examples/346f28d82acb5427c304aa574fea0008.asciidoc new file mode 100644 index 000000000..84f239166 --- /dev/null +++ b/docs/doc_examples/346f28d82acb5427c304aa574fea0008.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "thai_example", + settings: { + analysis: { + filter: { + thai_stop: { + type: "stop", + stopwords: "_thai_", + }, + }, + analyzer: { + rebuilt_thai: { + tokenizer: "thai", + filter: ["lowercase", "decimal_digit", "thai_stop"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3477a89d869b1f7f72d50c2ca86c4679.asciidoc b/docs/doc_examples/3477a89d869b1f7f72d50c2ca86c4679.asciidoc new file mode 100644 index 000000000..acb89c3e8 --- /dev/null +++ b/docs/doc_examples/3477a89d869b1f7f72d50c2ca86c4679.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.activateWatch({ + watch_id: "my_watch", +}); +console.log(response); +---- diff --git a/docs/doc_examples/3487e60e1ae9d4925ce540cd63574385.asciidoc b/docs/doc_examples/3487e60e1ae9d4925ce540cd63574385.asciidoc new file mode 100644 index 000000000..0e0e4838b --- /dev/null +++ b/docs/doc_examples/3487e60e1ae9d4925ce540cd63574385.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + boosting: { + positive: { + term: { + text: "apple", + }, + }, + negative: { + term: { + text: "pie tart fruit crumble tree", + }, + }, + negative_boost: 0.5, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/34be27141e3a476c138546190101c8bc.asciidoc b/docs/doc_examples/34be27141e3a476c138546190101c8bc.asciidoc new file mode 100644 index 000000000..62b8de4d2 --- /dev/null +++ b/docs/doc_examples/34be27141e3a476c138546190101c8bc.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchMvt({ + index: "my-index", + field: "my-geo-field", + zoom: 15, + x: 5271, + y: 12710, +}); +console.log(response); +---- diff --git a/docs/doc_examples/34d51c54b62e9a160c0ddacc10134bb0.asciidoc b/docs/doc_examples/34d51c54b62e9a160c0ddacc10134bb0.asciidoc new file mode 100644 index 000000000..a0aef5244 --- /dev/null +++ b/docs/doc_examples/34d51c54b62e9a160c0ddacc10134bb0.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + span_first: { + match: { + span_term: { + "user.id": "kimchy", + }, + }, + end: 3, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/34d63740b58209a3d031212909743925.asciidoc b/docs/doc_examples/34d63740b58209a3d031212909743925.asciidoc new file mode 100644 index 000000000..134309249 --- /dev/null +++ b/docs/doc_examples/34d63740b58209a3d031212909743925.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "openai-embeddings", + knn: { + field: "content_embedding", + query_vector_builder: { + text_embedding: { + model_id: "openai_embeddings", + model_text: "Calculate fuel cost", + }, + }, + k: 10, + num_candidates: 100, + }, + _source: ["id", "content"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/34efeade38445b2834749ced59782e25.asciidoc b/docs/doc_examples/34efeade38445b2834749ced59782e25.asciidoc deleted file mode 100644 index be97bbd98..000000000 --- a/docs/doc_examples/34efeade38445b2834749ced59782e25.asciidoc +++ /dev/null @@ -1,29 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - aggs: { - genres: { - terms: { - field: 'genre', - order: { - 'playback_stats.max': 'desc' - } - }, - aggs: { - playback_stats: { - stats: { - field: 'play_count' - } - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/35260b615d0b5628c95d7cc814c39bd3.asciidoc b/docs/doc_examples/35260b615d0b5628c95d7cc814c39bd3.asciidoc new file mode 100644 index 000000000..da118f577 --- /dev/null +++ b/docs/doc_examples/35260b615d0b5628c95d7cc814c39bd3.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + has_child: { + type: "child", + query: { + function_score: { + script_score: { + script: "_score * doc['click_count'].value", + }, + }, + }, + score_mode: "max", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/353020cb30a885ee7f5ce2b141ba574a.asciidoc b/docs/doc_examples/353020cb30a885ee7f5ce2b141ba574a.asciidoc new file mode 100644 index 000000000..73a931778 --- /dev/null +++ b/docs/doc_examples/353020cb30a885ee7f5ce2b141ba574a.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + prefix: { + user: "ki", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3541d4a85e27b2c3896a7a7ee98b4b37.asciidoc b/docs/doc_examples/3541d4a85e27b2c3896a7a7ee98b4b37.asciidoc new file mode 100644 index 000000000..d5f33f225 --- /dev/null +++ b/docs/doc_examples/3541d4a85e27b2c3896a7a7ee98b4b37.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.healthReport({ + verbose: "false", +}); +console.log(response); +---- diff --git a/docs/doc_examples/3544f17cb97b613a2f733707c676f759.asciidoc b/docs/doc_examples/3544f17cb97b613a2f733707c676f759.asciidoc new file mode 100644 index 000000000..0d38c0dcc --- /dev/null +++ b/docs/doc_examples/3544f17cb97b613a2f733707c676f759.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + filter_path: "aggregations", + aggs: { + f: { + filters: { + filters: { + hats: { + term: { + type: "hat", + }, + }, + t_shirts: { + term: { + type: "t-shirt", + }, + }, + }, + }, + aggs: { + avg_price: { + avg: { + field: "price", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3545261682af72f4bee57f2bac0a9590.asciidoc b/docs/doc_examples/3545261682af72f4bee57f2bac0a9590.asciidoc new file mode 100644 index 000000000..0cfae998e --- /dev/null +++ b/docs/doc_examples/3545261682af72f4bee57f2bac0a9590.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.shardStores({ + status: "green", +}); +console.log(response); +---- diff --git a/docs/doc_examples/35563ef92dddef9d83906d9c43c60d0f.asciidoc b/docs/doc_examples/35563ef92dddef9d83906d9c43c60d0f.asciidoc new file mode 100644 index 000000000..9bb6c2042 --- /dev/null +++ b/docs/doc_examples/35563ef92dddef9d83906d9c43c60d0f.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.mtermvectors({ + docs: [ + { + _index: "my-index-000001", + _id: "2", + term_statistics: true, + }, + { + _index: "my-index-000001", + _id: "1", + fields: ["message"], + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/355d0ee2fcb6c1fc403c6267f710e25a.asciidoc b/docs/doc_examples/355d0ee2fcb6c1fc403c6267f710e25a.asciidoc new file mode 100644 index 000000000..8f37b6bd1 --- /dev/null +++ b/docs/doc_examples/355d0ee2fcb6c1fc403c6267f710e25a.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + source: { + index: ["my-index-000001", "my-index-000002"], + }, + dest: { + index: "my-new-index-000002", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/35a272df8c919a12d7c3106a18245748.asciidoc b/docs/doc_examples/35a272df8c919a12d7c3106a18245748.asciidoc new file mode 100644 index 000000000..40e01fb68 --- /dev/null +++ b/docs/doc_examples/35a272df8c919a12d7c3106a18245748.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.inferTrainedModel({ + model_id: "lang_ident_model_1", + docs: [ + { + text: "The fool doth think he is wise, but the wise man knows himself to be a fool.", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/35be136ba9df7474a5521631e2a385b1.asciidoc b/docs/doc_examples/35be136ba9df7474a5521631e2a385b1.asciidoc new file mode 100644 index 000000000..ddd6e37f7 --- /dev/null +++ b/docs/doc_examples/35be136ba9df7474a5521631e2a385b1.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.explainDataLifecycle({ + index: ".ds-metrics-2023.03.22-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/35c33ef48cf8a4ee368874141622f9d5.asciidoc b/docs/doc_examples/35c33ef48cf8a4ee368874141622f9d5.asciidoc new file mode 100644 index 000000000..efd76c619 --- /dev/null +++ b/docs/doc_examples/35c33ef48cf8a4ee368874141622f9d5.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + dynamic_templates: [ + { + strings_as_text: { + match_mapping_type: "string", + mapping: { + type: "text", + }, + }, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/35c664285f2e8b7d5d50ca37ae3ba794.asciidoc b/docs/doc_examples/35c664285f2e8b7d5d50ca37ae3ba794.asciidoc new file mode 100644 index 000000000..dc4c0d608 --- /dev/null +++ b/docs/doc_examples/35c664285f2e8b7d5d50ca37ae3ba794.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + match: { + message: "GET /search", + }, + }, + collapse: { + field: "user.id", + }, + sort: ["user.id"], + search_after: ["dd5ce1ad"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/35e8da9410b8432cf4095f2541ad7b1d.asciidoc b/docs/doc_examples/35e8da9410b8432cf4095f2541ad7b1d.asciidoc deleted file mode 100644 index e84e5e35b..000000000 --- a/docs/doc_examples/35e8da9410b8432cf4095f2541ad7b1d.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - aggs: { - products: { - terms: { - field: 'product', - size: 5, - show_term_doc_count_error: true - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/35eef1765e9a5991d77592a0c7490fe0.asciidoc b/docs/doc_examples/35eef1765e9a5991d77592a0c7490fe0.asciidoc new file mode 100644 index 000000000..2fe9e17b4 --- /dev/null +++ b/docs/doc_examples/35eef1765e9a5991d77592a0c7490fe0.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + aggs: { + grade_min: { + min: { + field: "grade", + missing: 10, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/35f892b475a1770f18328158be7039fd.asciidoc b/docs/doc_examples/35f892b475a1770f18328158be7039fd.asciidoc new file mode 100644 index 000000000..49bcef2d6 --- /dev/null +++ b/docs/doc_examples/35f892b475a1770f18328158be7039fd.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-2", + mappings: { + properties: { + my_vector: { + type: "dense_vector", + dims: 3, + similarity: "dot_product", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/35fc63cbefce7bc131ad467b5ba209ef.asciidoc b/docs/doc_examples/35fc63cbefce7bc131ad467b5ba209ef.asciidoc new file mode 100644 index 000000000..037812101 --- /dev/null +++ b/docs/doc_examples/35fc63cbefce7bc131ad467b5ba209ef.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.allocation({ + v: "true", + s: "disk.avail", + h: "node,disk.percent,disk.avail,disk.total,disk.used,disk.indices,shards", +}); +console.log(response); +---- diff --git a/docs/doc_examples/3608e4fcd17dd8d5f88ec9a3db2f5d89.asciidoc b/docs/doc_examples/3608e4fcd17dd8d5f88ec9a3db2f5d89.asciidoc new file mode 100644 index 000000000..1e2416525 --- /dev/null +++ b/docs/doc_examples/3608e4fcd17dd8d5f88ec9a3db2f5d89.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.synonyms.putSynonym({ + id: "my-synonyms-set", + synonyms_set: [ + { + synonyms: "hello => hi => howdy", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/360b3cef34bbddc5d9579ca95f0cb061.asciidoc b/docs/doc_examples/360b3cef34bbddc5d9579ca95f0cb061.asciidoc new file mode 100644 index 000000000..271b3580b --- /dev/null +++ b/docs/doc_examples/360b3cef34bbddc5d9579ca95f0cb061.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-data-stream", + write_index_only: "true", + properties: { + message: { + type: "text", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/360c4f373e72ba861584ee85bd218124.asciidoc b/docs/doc_examples/360c4f373e72ba861584ee85bd218124.asciidoc new file mode 100644 index 000000000..0c46ff5ce --- /dev/null +++ b/docs/doc_examples/360c4f373e72ba861584ee85bd218124.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "test_index", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "standard", + filter: ["lowercase", "porter_stem"], + }, + }, + }, + }, + mappings: { + properties: { + query: { + type: "percolator", + }, + body: { + type: "text", + analyzer: "my_analyzer", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3613f402ee63f0efb6b8d9c6a919b410.asciidoc b/docs/doc_examples/3613f402ee63f0efb6b8d9c6a919b410.asciidoc new file mode 100644 index 000000000..6b6a75b25 --- /dev/null +++ b/docs/doc_examples/3613f402ee63f0efb6b8d9c6a919b410.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.esql.query({ + format: "txt", + query: + "\n FROM library\n | KEEP author, name, page_count, release_date\n | SORT page_count DESC\n | LIMIT 5\n ", + filter: { + range: { + page_count: { + gte: 100, + lte: 200, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/362dfccdb6f7933b22c909542e0b4e0a.asciidoc b/docs/doc_examples/362dfccdb6f7933b22c909542e0b4e0a.asciidoc new file mode 100644 index 000000000..0b499febe --- /dev/null +++ b/docs/doc_examples/362dfccdb6f7933b22c909542e0b4e0a.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.updateByQuery({ + index: "my-data-stream", + query: { + match: { + "user.id": "l7gk7f82", + }, + }, + script: { + source: "ctx._source.user.id = params.new_id", + params: { + new_id: "XgdX0NoX", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3649194a97d265a3bc758f8b38f7561e.asciidoc b/docs/doc_examples/3649194a97d265a3bc758f8b38f7561e.asciidoc new file mode 100644 index 000000000..e465377fb --- /dev/null +++ b/docs/doc_examples/3649194a97d265a3bc758f8b38f7561e.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "semantic-embeddings", + mappings: { + properties: { + semantic_text: { + type: "semantic_text", + }, + content: { + type: "text", + copy_to: "semantic_text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/365256ebdfa47b449780771d9beba8d9.asciidoc b/docs/doc_examples/365256ebdfa47b449780771d9beba8d9.asciidoc new file mode 100644 index 000000000..d2e864a8d --- /dev/null +++ b/docs/doc_examples/365256ebdfa47b449780771d9beba8d9.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_connector/_sync_job/my-connector-sync-job/_check_in", +}); +console.log(response); +---- diff --git a/docs/doc_examples/3653567181f43a5f64c74f934aa821c2.asciidoc b/docs/doc_examples/3653567181f43a5f64c74f934aa821c2.asciidoc deleted file mode 100644 index cba7c881c..000000000 --- a/docs/doc_examples/3653567181f43a5f64c74f934aa821c2.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.updateAliases({ - body: { - actions: [ - { - remove: { - index: 'test1', - alias: 'alias1' - } - } - ] - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/36792c81c053e0555407d1e83e7e054f.asciidoc b/docs/doc_examples/36792c81c053e0555407d1e83e7e054f.asciidoc new file mode 100644 index 000000000..2256f4c94 --- /dev/null +++ b/docs/doc_examples/36792c81c053e0555407d1e83e7e054f.asciidoc @@ -0,0 +1,70 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "movies", + size: 10, + retriever: { + rescorer: { + rescore: { + window_size: 50, + query: { + rescore_query: { + script_score: { + query: { + match_all: {}, + }, + script: { + source: + "cosineSimilarity(params.queryVector, 'product-vector_final_stage') + 1.0", + params: { + queryVector: [-0.5, 90, -10, 14.8, -156], + }, + }, + }, + }, + }, + }, + retriever: { + rrf: { + rank_window_size: 100, + retrievers: [ + { + standard: { + query: { + sparse_vector: { + field: "plot_embedding", + inference_id: "my-elser-model", + query: "films that explore psychological depths", + }, + }, + }, + }, + { + standard: { + query: { + multi_match: { + query: "crime", + fields: ["plot", "title"], + }, + }, + }, + }, + { + knn: { + field: "vector", + query_vector: [10, 22, 77], + k: 10, + num_candidates: 10, + }, + }, + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/36818c6d9f434d387819c30bd9addb14.asciidoc b/docs/doc_examples/36818c6d9f434d387819c30bd9addb14.asciidoc deleted file mode 100644 index 5ee620974..000000000 --- a/docs/doc_examples/36818c6d9f434d387819c30bd9addb14.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.index({ - index: 'twitter', - body: { - user: 'kimchy', - post_date: '2009-11-15T14:12:12', - message: 'trying out Elasticsearch' - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/36962727b806315b221e8a63e05caddc.asciidoc b/docs/doc_examples/36962727b806315b221e8a63e05caddc.asciidoc new file mode 100644 index 000000000..f50a6c52c --- /dev/null +++ b/docs/doc_examples/36962727b806315b221e8a63e05caddc.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-index-000001", + properties: { + "employee-id": { + type: "keyword", + index: false, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/36ac0ef9ea63efc431580f7ade8ad53c.asciidoc b/docs/doc_examples/36ac0ef9ea63efc431580f7ade8ad53c.asciidoc new file mode 100644 index 000000000..eead102c5 --- /dev/null +++ b/docs/doc_examples/36ac0ef9ea63efc431580f7ade8ad53c.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + wait_for_completion: "false", + source: { + index: "test-data", + size: 50, + }, + dest: { + index: "openai-embeddings", + pipeline: "openai_embeddings_pipeline", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/36b26905c5f96d0b785c3267fb63838d.asciidoc b/docs/doc_examples/36b26905c5f96d0b785c3267fb63838d.asciidoc new file mode 100644 index 000000000..8cea96414 --- /dev/null +++ b/docs/doc_examples/36b26905c5f96d0b785c3267fb63838d.asciidoc @@ -0,0 +1,609 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + ip: { + type: "ip", + }, + version: { + type: "version", + }, + missing_keyword: { + type: "keyword", + }, + "@timestamp": { + type: "date", + }, + type_test: { + type: "keyword", + }, + "@timestamp_pretty": { + type: "date", + format: "dd-MM-yyyy", + }, + event_type: { + type: "keyword", + }, + event: { + properties: { + category: { + type: "alias", + path: "event_type", + }, + }, + }, + host: { + type: "keyword", + }, + os: { + type: "keyword", + }, + bool: { + type: "boolean", + }, + uptime: { + type: "long", + }, + port: { + type: "long", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.create({ + index: "my-index-000002", + mappings: { + properties: { + ip: { + type: "ip", + }, + "@timestamp": { + type: "date", + }, + "@timestamp_pretty": { + type: "date", + format: "yyyy-MM-dd", + }, + type_test: { + type: "keyword", + }, + event_type: { + type: "keyword", + }, + event: { + properties: { + category: { + type: "alias", + path: "event_type", + }, + }, + }, + host: { + type: "keyword", + }, + op_sys: { + type: "keyword", + }, + bool: { + type: "boolean", + }, + uptime: { + type: "long", + }, + port: { + type: "long", + }, + }, + }, +}); +console.log(response1); + +const response2 = await client.indices.create({ + index: "my-index-000003", + mappings: { + properties: { + host_ip: { + type: "ip", + }, + "@timestamp": { + type: "date", + }, + date: { + type: "date", + }, + event_type: { + type: "keyword", + }, + event: { + properties: { + category: { + type: "alias", + path: "event_type", + }, + }, + }, + missing_keyword: { + type: "keyword", + }, + host: { + type: "keyword", + }, + os: { + type: "keyword", + }, + bool: { + type: "boolean", + }, + uptime: { + type: "long", + }, + port: { + type: "long", + }, + }, + }, +}); +console.log(response2); + +const response3 = await client.bulk({ + index: "my-index-000001", + refresh: "true", + operations: [ + { + index: { + _id: 1, + }, + }, + { + "@timestamp": "1234567891", + "@timestamp_pretty": "12-12-2022", + missing_keyword: "test", + type_test: "abc", + ip: "10.0.0.1", + event_type: "alert", + host: "doom", + uptime: 0, + port: 1234, + os: "win10", + version: "1.0.0", + id: 11, + }, + { + index: { + _id: 2, + }, + }, + { + "@timestamp": "1234567892", + "@timestamp_pretty": "13-12-2022", + event_type: "alert", + type_test: "abc", + host: "CS", + uptime: 5, + port: 1, + os: "win10", + version: "1.2.0", + id: 12, + }, + { + index: { + _id: 3, + }, + }, + { + "@timestamp": "1234567893", + "@timestamp_pretty": "12-12-2022", + event_type: "alert", + type_test: "abc", + host: "farcry", + uptime: 1, + port: 1234, + bool: false, + os: "win10", + version: "2.0.0", + id: 13, + }, + { + index: { + _id: 4, + }, + }, + { + "@timestamp": "1234567894", + "@timestamp_pretty": "13-12-2022", + event_type: "alert", + type_test: "abc", + host: "GTA", + uptime: 3, + port: 12, + os: "slack", + version: "10.0.0", + id: 14, + }, + { + index: { + _id: 5, + }, + }, + { + "@timestamp": "1234567895", + "@timestamp_pretty": "17-12-2022", + event_type: "alert", + host: "sniper 3d", + uptime: 6, + port: 1234, + os: "fedora", + version: "20.1.0", + id: 15, + }, + { + index: { + _id: 6, + }, + }, + { + "@timestamp": "1234568896", + "@timestamp_pretty": "17-12-2022", + event_type: "alert", + host: "doom", + port: 65123, + bool: true, + os: "redhat", + version: "20.10.0", + id: 16, + }, + { + index: { + _id: 7, + }, + }, + { + "@timestamp": "1234567897", + "@timestamp_pretty": "17-12-2022", + missing_keyword: "yyy", + event_type: "failure", + host: "doom", + uptime: 15, + port: 1234, + bool: true, + os: "redhat", + version: "20.2.0", + id: 17, + }, + { + index: { + _id: 8, + }, + }, + { + "@timestamp": "1234567898", + "@timestamp_pretty": "12-12-2022", + missing_keyword: "test", + event_type: "success", + host: "doom", + uptime: 16, + port: 512, + os: "win10", + version: "1.2.3", + id: 18, + }, + { + index: { + _id: 9, + }, + }, + { + "@timestamp": "1234567899", + "@timestamp_pretty": "15-12-2022", + missing_keyword: "test", + event_type: "success", + host: "GTA", + port: 12, + bool: true, + os: "win10", + version: "1.2.3", + id: 19, + }, + { + index: { + _id: 10, + }, + }, + { + "@timestamp": "1234567893", + missing_keyword: null, + ip: "10.0.0.5", + event_type: "alert", + host: "farcry", + uptime: 1, + port: 1234, + bool: true, + os: "win10", + version: "1.2.3", + id: 110, + }, + ], +}); +console.log(response3); + +const response4 = await client.bulk({ + index: "my-index-000002", + refresh: "true", + operations: [ + { + index: { + _id: 1, + }, + }, + { + "@timestamp": "1234567991", + type_test: "abc", + ip: "10.0.0.1", + event_type: "alert", + host: "doom", + uptime: 0, + port: 1234, + op_sys: "win10", + id: 21, + }, + { + index: { + _id: 2, + }, + }, + { + "@timestamp": "1234567992", + type_test: "abc", + event_type: "alert", + host: "CS", + uptime: 5, + port: 1, + op_sys: "win10", + id: 22, + }, + { + index: { + _id: 3, + }, + }, + { + "@timestamp": "1234567993", + type_test: "abc", + "@timestamp_pretty": "2022-12-17", + event_type: "alert", + host: "farcry", + uptime: 1, + port: 1234, + bool: false, + op_sys: "win10", + id: 23, + }, + { + index: { + _id: 4, + }, + }, + { + "@timestamp": "1234567994", + event_type: "alert", + host: "GTA", + uptime: 3, + port: 12, + op_sys: "slack", + id: 24, + }, + { + index: { + _id: 5, + }, + }, + { + "@timestamp": "1234567995", + event_type: "alert", + host: "sniper 3d", + uptime: 6, + port: 1234, + op_sys: "fedora", + id: 25, + }, + { + index: { + _id: 6, + }, + }, + { + "@timestamp": "1234568996", + "@timestamp_pretty": "2022-12-17", + ip: "10.0.0.5", + event_type: "alert", + host: "doom", + port: 65123, + bool: true, + op_sys: "redhat", + id: 26, + }, + { + index: { + _id: 7, + }, + }, + { + "@timestamp": "1234567997", + "@timestamp_pretty": "2022-12-17", + event_type: "failure", + host: "doom", + uptime: 15, + port: 1234, + bool: true, + op_sys: "redhat", + id: 27, + }, + { + index: { + _id: 8, + }, + }, + { + "@timestamp": "1234567998", + ip: "10.0.0.1", + event_type: "success", + host: "doom", + uptime: 16, + port: 512, + op_sys: "win10", + id: 28, + }, + { + index: { + _id: 9, + }, + }, + { + "@timestamp": "1234567999", + ip: "10.0.0.1", + event_type: "success", + host: "GTA", + port: 12, + bool: false, + op_sys: "win10", + id: 29, + }, + ], +}); +console.log(response4); + +const response5 = await client.bulk({ + index: "my-index-000003", + refresh: "true", + operations: [ + { + index: { + _id: 1, + }, + }, + { + "@timestamp": "1334567891", + host_ip: "10.0.0.1", + event_type: "alert", + host: "doom", + uptime: 0, + port: 12, + os: "win10", + id: 31, + }, + { + index: { + _id: 2, + }, + }, + { + "@timestamp": "1334567892", + event_type: "alert", + host: "CS", + os: "win10", + id: 32, + }, + { + index: { + _id: 3, + }, + }, + { + "@timestamp": "1334567893", + event_type: "alert", + host: "farcry", + bool: true, + os: "win10", + id: 33, + }, + { + index: { + _id: 4, + }, + }, + { + "@timestamp": "1334567894", + event_type: "alert", + host: "GTA", + os: "slack", + bool: true, + id: 34, + }, + { + index: { + _id: 5, + }, + }, + { + "@timestamp": "1234567895", + event_type: "alert", + host: "sniper 3d", + os: "fedora", + id: 35, + }, + { + index: { + _id: 6, + }, + }, + { + "@timestamp": "1234578896", + host_ip: "10.0.0.1", + event_type: "alert", + host: "doom", + bool: true, + os: "redhat", + id: 36, + }, + { + index: { + _id: 7, + }, + }, + { + "@timestamp": "1234567897", + event_type: "failure", + missing_keyword: "test", + host: "doom", + bool: true, + os: "redhat", + id: 37, + }, + { + index: { + _id: 8, + }, + }, + { + "@timestamp": "1234577898", + event_type: "success", + host: "doom", + os: "win10", + id: 38, + date: "1671235200000", + }, + { + index: { + _id: 9, + }, + }, + { + "@timestamp": "1234577899", + host_ip: "10.0.0.5", + event_type: "success", + host: "GTA", + bool: true, + os: "win10", + id: 39, + }, + ], +}); +console.log(response5); +---- diff --git a/docs/doc_examples/36b2778f23d0955255f52c075c4d213d.asciidoc b/docs/doc_examples/36b2778f23d0955255f52c075c4d213d.asciidoc deleted file mode 100644 index 41c977ec1..000000000 --- a/docs/doc_examples/36b2778f23d0955255f52c075c4d213d.asciidoc +++ /dev/null @@ -1,28 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.reindex({ - body: { - source: { - remote: { - host: 'http://otherhost:9200/', - username: 'user', - password: 'pass' - }, - index: 'source', - query: { - match: { - test: 'data' - } - } - }, - dest: { - index: 'dest' - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/36b86b97feedcf5632824eefc251d6ed.asciidoc b/docs/doc_examples/36b86b97feedcf5632824eefc251d6ed.asciidoc new file mode 100644 index 000000000..c55cad6ae --- /dev/null +++ b/docs/doc_examples/36b86b97feedcf5632824eefc251d6ed.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "books", + query: { + match: { + name: "brave", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/36d229f734adcdab00be266a7ce038b1.asciidoc b/docs/doc_examples/36d229f734adcdab00be266a7ce038b1.asciidoc new file mode 100644 index 000000000..771d1a09d --- /dev/null +++ b/docs/doc_examples/36d229f734adcdab00be266a7ce038b1.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-bit-vectors", + mappings: { + properties: { + my_vector: { + type: "dense_vector", + dims: 40, + element_type: "bit", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/36da9668fef56910370f16bfb772cc40.asciidoc b/docs/doc_examples/36da9668fef56910370f16bfb772cc40.asciidoc new file mode 100644 index 000000000..910f8a458 --- /dev/null +++ b/docs/doc_examples/36da9668fef56910370f16bfb772cc40.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.stats({ + metric: "request_cache", + human: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/36e09bbd5896498ede0f5d37a18eae2c.asciidoc b/docs/doc_examples/36e09bbd5896498ede0f5d37a18eae2c.asciidoc new file mode 100644 index 000000000..c8fd4c771 --- /dev/null +++ b/docs/doc_examples/36e09bbd5896498ede0f5d37a18eae2c.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 2, + routing: 1, + refresh: "true", + document: { + text: "This is a child document.", + "my-join-field": { + name: "my-child", + parent: "1", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/36fae9dfc0b815546b45745bac054b67.asciidoc b/docs/doc_examples/36fae9dfc0b815546b45745bac054b67.asciidoc new file mode 100644 index 000000000..782d9bf5e --- /dev/null +++ b/docs/doc_examples/36fae9dfc0b815546b45745bac054b67.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + match: { + model_number: "HG537PU", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/370b297ed3433577adf53e64f572d89d.asciidoc b/docs/doc_examples/370b297ed3433577adf53e64f572d89d.asciidoc new file mode 100644 index 000000000..4e76cc905 --- /dev/null +++ b/docs/doc_examples/370b297ed3433577adf53e64f572d89d.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "DELETE", + path: "/_connector/_sync_job/my-connector-sync-job-id", +}); +console.log(response); +---- diff --git a/docs/doc_examples/371962cf63e65c10026177c6a1bad0b6.asciidoc b/docs/doc_examples/371962cf63e65c10026177c6a1bad0b6.asciidoc new file mode 100644 index 000000000..633073dea --- /dev/null +++ b/docs/doc_examples/371962cf63e65c10026177c6a1bad0b6.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.slm.start(); +console.log(response); +---- diff --git a/docs/doc_examples/3722dad876023e0757138dd5a6d3240e.asciidoc b/docs/doc_examples/3722dad876023e0757138dd5a6d3240e.asciidoc new file mode 100644 index 000000000..e071509a9 --- /dev/null +++ b/docs/doc_examples/3722dad876023e0757138dd5a6d3240e.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index", + settings: { + index: { + number_of_shards: 3, + "blocks.write": true, + }, + }, + mappings: { + properties: { + field1: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/37530f35f315b9f35e3e6a13cf2a1ccd.asciidoc b/docs/doc_examples/37530f35f315b9f35e3e6a13cf2a1ccd.asciidoc new file mode 100644 index 000000000..98875ead3 --- /dev/null +++ b/docs/doc_examples/37530f35f315b9f35e3e6a13cf2a1ccd.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + actors: { + terms: { + field: "actors", + size: 10, + collect_mode: "breadth_first", + }, + aggs: { + costars: { + terms: { + field: "actors", + size: 5, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3758b8f2ab9f6f28a764ee6c42c85766.asciidoc b/docs/doc_examples/3758b8f2ab9f6f28a764ee6c42c85766.asciidoc new file mode 100644 index 000000000..7e3581418 --- /dev/null +++ b/docs/doc_examples/3758b8f2ab9f6f28a764ee6c42c85766.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + scroll: "1m", + slice: { + id: 0, + max: 2, + }, + query: { + match: { + message: "foo", + }, + }, +}); +console.log(response); + +const response1 = await client.search({ + index: "my-index-000001", + scroll: "1m", + slice: { + id: 1, + max: 2, + }, + query: { + match: { + message: "foo", + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/3759ca688c4bd3c838780a9aad63258b.asciidoc b/docs/doc_examples/3759ca688c4bd3c838780a9aad63258b.asciidoc new file mode 100644 index 000000000..1b4c9b4a6 --- /dev/null +++ b/docs/doc_examples/3759ca688c4bd3c838780a9aad63258b.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getIndexTemplate({ + name: "template_1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/375bf2c51ce6cc386f9d4d635d5e84a7.asciidoc b/docs/doc_examples/375bf2c51ce6cc386f9d4d635d5e84a7.asciidoc new file mode 100644 index 000000000..4da50d7f2 --- /dev/null +++ b/docs/doc_examples/375bf2c51ce6cc386f9d4d635d5e84a7.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_locations", + query: { + geo_grid: { + location: { + geohex: "811fbffffffffff", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/376fbc965e1b093f6dbc198a94c83aa9.asciidoc b/docs/doc_examples/376fbc965e1b093f6dbc198a94c83aa9.asciidoc new file mode 100644 index 000000000..201aa0674 --- /dev/null +++ b/docs/doc_examples/376fbc965e1b093f6dbc198a94c83aa9.asciidoc @@ -0,0 +1,49 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "my-index", + refresh: "true", + operations: [ + { + index: {}, + }, + { + gc: "[2021-04-27T16:16:34.699+0000][82460][gc,heap,exit] class space used 266K, capacity 384K, committed 384K, reserved 1048576K", + }, + { + index: {}, + }, + { + gc: "[2021-03-24T20:27:24.184+0000][90239][gc,heap,exit] class space used 15255K, capacity 16726K, committed 16844K, reserved 1048576K", + }, + { + index: {}, + }, + { + gc: "[2021-03-24T20:27:24.184+0000][90239][gc,heap,exit] Metaspace used 115409K, capacity 119541K, committed 120248K, reserved 1153024K", + }, + { + index: {}, + }, + { + gc: "[2021-04-19T15:03:21.735+0000][84408][gc,heap,exit] class space used 14503K, capacity 15894K, committed 15948K, reserved 1048576K", + }, + { + index: {}, + }, + { + gc: "[2021-04-19T15:03:21.735+0000][84408][gc,heap,exit] Metaspace used 107719K, capacity 111775K, committed 112724K, reserved 1146880K", + }, + { + index: {}, + }, + { + gc: "[2021-04-27T16:16:34.699+0000][82460][gc,heap,exit] class space used 266K, capacity 367K, committed 384K, reserved 1048576K", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/376ff4b2b5f657481af78a778aaab57f.asciidoc b/docs/doc_examples/376ff4b2b5f657481af78a778aaab57f.asciidoc new file mode 100644 index 000000000..8f9731735 --- /dev/null +++ b/docs/doc_examples/376ff4b2b5f657481af78a778aaab57f.asciidoc @@ -0,0 +1,74 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index", + mappings: { + properties: { + nr: { + type: "integer", + }, + state: { + type: "keyword", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "my-index", + refresh: "true", + operations: [ + { + index: {}, + }, + { + nr: 1, + state: "started", + }, + { + index: {}, + }, + { + nr: 2, + state: "stopped", + }, + { + index: {}, + }, + { + nr: 3, + state: "N/A", + }, + { + index: {}, + }, + { + nr: 4, + }, + ], +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index", + filter_path: "aggregations", + aggs: { + my_top_metrics: { + top_metrics: { + metrics: { + field: "state", + missing: "N/A", + }, + sort: { + nr: "desc", + }, + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/377af0ea9b19c113f224d8150890b41b.asciidoc b/docs/doc_examples/377af0ea9b19c113f224d8150890b41b.asciidoc new file mode 100644 index 000000000..9b5a9864f --- /dev/null +++ b/docs/doc_examples/377af0ea9b19c113f224d8150890b41b.asciidoc @@ -0,0 +1,74 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + bool: { + filter: [ + { + term: { + "event.outcome": "failure", + }, + }, + { + range: { + "@timestamp": { + gte: "2021-02-01", + lt: "2021-02-04", + }, + }, + }, + { + term: { + "service.name": { + value: "frontend-node", + }, + }, + }, + ], + }, + }, + aggs: { + failure_p_value: { + significant_terms: { + field: "user_agent.version", + background_filter: { + bool: { + must_not: [ + { + term: { + "event.outcome": "failure", + }, + }, + ], + filter: [ + { + range: { + "@timestamp": { + gte: "2021-02-01", + lt: "2021-02-04", + }, + }, + }, + { + term: { + "service.name": { + value: "frontend-node", + }, + }, + }, + ], + }, + }, + p_value: { + background_is_superset: false, + normalize_above: 1000, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/378e55f78fa13578a1302bae8d479765.asciidoc b/docs/doc_examples/378e55f78fa13578a1302bae8d479765.asciidoc new file mode 100644 index 000000000..dc69a09f6 --- /dev/null +++ b/docs/doc_examples/378e55f78fa13578a1302bae8d479765.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + color: { + type: "keyword", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/37983daac3d9c8582583a507b3adb7f2.asciidoc b/docs/doc_examples/37983daac3d9c8582583a507b3adb7f2.asciidoc new file mode 100644 index 000000000..9eee60e5d --- /dev/null +++ b/docs/doc_examples/37983daac3d9c8582583a507b3adb7f2.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.shutdown.putNode({ + node_id: "USpTGYaBSIKbgSUJR2Z9lg", + type: "restart", + reason: "Demonstrating how the node shutdown API works", +}); +console.log(response); +---- diff --git a/docs/doc_examples/37ae7c3e4d6d954487ec4185fe7d9ec8.asciidoc b/docs/doc_examples/37ae7c3e4d6d954487ec4185fe7d9ec8.asciidoc new file mode 100644 index 000000000..523c79f6d --- /dev/null +++ b/docs/doc_examples/37ae7c3e4d6d954487ec4185fe7d9ec8.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggregations: { + forces: { + terms: { + field: "force", + }, + aggregations: { + significant_crime_types: { + significant_terms: { + field: "crime_type", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/37b84f2ab7c2f6b4fe0e14cc7e018b1f.asciidoc b/docs/doc_examples/37b84f2ab7c2f6b4fe0e14cc7e018b1f.asciidoc new file mode 100644 index 000000000..7d06f09f6 --- /dev/null +++ b/docs/doc_examples/37b84f2ab7c2f6b4fe0e14cc7e018b1f.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + cluster: { + remote: { + clusterB: { + mode: "proxy", + skip_unavailable: true, + server_name: "clusterb.es.region-b.gcp.elastic-cloud.com", + proxy_socket_connections: 18, + proxy_address: "clusterb.es.region-b.gcp.elastic-cloud.com:9400", + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.cluster.putSettings({ + persistent: { + cluster: { + remote: { + clusterA: { + mode: "proxy", + skip_unavailable: true, + server_name: "clustera.es.region-a.gcp.elastic-cloud.com", + proxy_socket_connections: 18, + proxy_address: "clustera.es.region-a.gcp.elastic-cloud.com:9400", + }, + }, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/37c73410bf13429279cbc61a413957d8.asciidoc b/docs/doc_examples/37c73410bf13429279cbc61a413957d8.asciidoc new file mode 100644 index 000000000..62a84cd43 --- /dev/null +++ b/docs/doc_examples/37c73410bf13429279cbc61a413957d8.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.stats({ + filter_path: "indices.shards.total", +}); +console.log(response); +---- diff --git a/docs/doc_examples/37eaab0630976d3dee90a52011342883.asciidoc b/docs/doc_examples/37eaab0630976d3dee90a52011342883.asciidoc new file mode 100644 index 000000000..722466b4d --- /dev/null +++ b/docs/doc_examples/37eaab0630976d3dee90a52011342883.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "whitespace", + filter: ["stop"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/37f1f2e75ed95308ae436bbbb8d5645e.asciidoc b/docs/doc_examples/37f1f2e75ed95308ae436bbbb8d5645e.asciidoc new file mode 100644 index 000000000..40d543a28 --- /dev/null +++ b/docs/doc_examples/37f1f2e75ed95308ae436bbbb8d5645e.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.license.postStartTrial({ + acknowledge: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/3819d0a5c2eed635c88e9e7bf2e81584.asciidoc b/docs/doc_examples/3819d0a5c2eed635c88e9e7bf2e81584.asciidoc new file mode 100644 index 000000000..456a47a30 --- /dev/null +++ b/docs/doc_examples/3819d0a5c2eed635c88e9e7bf2e81584.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.revertModelSnapshot({ + job_id: "low_request_rate", + snapshot_id: 1637092688, + delete_intervening_results: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/381fced1882ca8337143e6bb180a5715.asciidoc b/docs/doc_examples/381fced1882ca8337143e6bb180a5715.asciidoc deleted file mode 100644 index b5c93dd1b..000000000 --- a/docs/doc_examples/381fced1882ca8337143e6bb180a5715.asciidoc +++ /dev/null @@ -1,18 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.index({ - index: 'test', - id: '1', - body: { - counter: 1, - tags: [ - 'red' - ] - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/386eb7dcd3149db82605bf22c5d851bf.asciidoc b/docs/doc_examples/386eb7dcd3149db82605bf22c5d851bf.asciidoc new file mode 100644 index 000000000..4f60f6d2d --- /dev/null +++ b/docs/doc_examples/386eb7dcd3149db82605bf22c5d851bf.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.createApiKey({ + name: "application-key-1", + metadata: { + application: "my-application", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/388d3eda4f792d3fce044777739217e6.asciidoc b/docs/doc_examples/388d3eda4f792d3fce044777739217e6.asciidoc new file mode 100644 index 000000000..5ec9ee4b3 --- /dev/null +++ b/docs/doc_examples/388d3eda4f792d3fce044777739217e6.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.evaluateDataFrame({ + index: "animal_classification", + evaluation: { + classification: { + actual_field: "animal_class", + predicted_field: "ml.animal_class_prediction", + metrics: { + multiclass_confusion_matrix: {}, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/388ec2b038d3ad69378f4c2e5bc36dce.asciidoc b/docs/doc_examples/388ec2b038d3ad69378f4c2e5bc36dce.asciidoc new file mode 100644 index 000000000..4052bc3ed --- /dev/null +++ b/docs/doc_examples/388ec2b038d3ad69378f4c2e5bc36dce.asciidoc @@ -0,0 +1,38 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + span_near: { + clauses: [ + { + span_term: { + text: "quick brown", + }, + }, + { + span_field_masking: { + query: { + span_term: { + "text.stems": "fox", + }, + }, + field: "text", + }, + }, + ], + slop: 5, + in_order: false, + }, + }, + highlight: { + require_field_match: false, + fields: { + "*": {}, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/38af4a55c1ea0f908dc7b06d680d2789.asciidoc b/docs/doc_examples/38af4a55c1ea0f908dc7b06d680d2789.asciidoc new file mode 100644 index 000000000..95db11a5b --- /dev/null +++ b/docs/doc_examples/38af4a55c1ea0f908dc7b06d680d2789.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.createDataStream({ + name: "new-data-stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/38b20fe981605e80a41517e9aa13134a.asciidoc b/docs/doc_examples/38b20fe981605e80a41517e9aa13134a.asciidoc new file mode 100644 index 000000000..0260866f4 --- /dev/null +++ b/docs/doc_examples/38b20fe981605e80a41517e9aa13134a.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sales_per_month: { + date_histogram: { + field: "date", + calendar_interval: "month", + }, + aggs: { + total_sales: { + sum: { + field: "price", + }, + }, + sales_bucket_filter: { + bucket_selector: { + buckets_path: { + totalSales: "total_sales", + }, + script: "params.totalSales > 200", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/38c1d0f6668e9563c0827f839f9fa505.asciidoc b/docs/doc_examples/38c1d0f6668e9563c0827f839f9fa505.asciidoc deleted file mode 100644 index ecd233312..000000000 --- a/docs/doc_examples/38c1d0f6668e9563c0827f839f9fa505.asciidoc +++ /dev/null @@ -1,17 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.update({ - index: 'test', - id: '1', - body: { - doc: { - name: 'new_name' - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/38eed000de433b540116928681c520d3.asciidoc b/docs/doc_examples/38eed000de433b540116928681c520d3.asciidoc new file mode 100644 index 000000000..4807c0f30 --- /dev/null +++ b/docs/doc_examples/38eed000de433b540116928681c520d3.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.previewDatafeed({ + datafeed_id: "datafeed-high_sum_total_sales", +}); +console.log(response); +---- diff --git a/docs/doc_examples/38f7739f750f1411bccf511a0abaaea3.asciidoc b/docs/doc_examples/38f7739f750f1411bccf511a0abaaea3.asciidoc new file mode 100644 index 000000000..671550520 --- /dev/null +++ b/docs/doc_examples/38f7739f750f1411bccf511a0abaaea3.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.fieldCaps({ + fields: "rating", +}); +console.log(response); +---- diff --git a/docs/doc_examples/38ffa96674b5fd4042589af0ebb0437b.asciidoc b/docs/doc_examples/38ffa96674b5fd4042589af0ebb0437b.asciidoc new file mode 100644 index 000000000..4a6e0dbd6 --- /dev/null +++ b/docs/doc_examples/38ffa96674b5fd4042589af0ebb0437b.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "basic_users", + roles: ["user"], + rules: { + field: { + groups: "cn=users,dc=example,dc=com", + }, + }, + enabled: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3924ee252581ebb96ac0e60046125ae8.asciidoc b/docs/doc_examples/3924ee252581ebb96ac0e60046125ae8.asciidoc new file mode 100644 index 000000000..a440d8210 --- /dev/null +++ b/docs/doc_examples/3924ee252581ebb96ac0e60046125ae8.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getUser({ + username: "jacknich", +}); +console.log(response); +---- diff --git a/docs/doc_examples/3951d7fcd7f849fa278daf342872125a.asciidoc b/docs/doc_examples/3951d7fcd7f849fa278daf342872125a.asciidoc new file mode 100644 index 000000000..6357296cf --- /dev/null +++ b/docs/doc_examples/3951d7fcd7f849fa278daf342872125a.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + index: "analyze_sample", + text: "this is a test", +}); +console.log(response); +---- diff --git a/docs/doc_examples/39760996f94ad34aaceaa16a5cc97993.asciidoc b/docs/doc_examples/39760996f94ad34aaceaa16a5cc97993.asciidoc new file mode 100644 index 000000000..77a80d74b --- /dev/null +++ b/docs/doc_examples/39760996f94ad34aaceaa16a5cc97993.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.shutdown.getNode({ + node_id: "USpTGYaBSIKbgSUJR2Z9lg", +}); +console.log(response); +---- diff --git a/docs/doc_examples/397ab5f9ea0b69ae85038bb0b9915180.asciidoc b/docs/doc_examples/397ab5f9ea0b69ae85038bb0b9915180.asciidoc new file mode 100644 index 000000000..38335e2c1 --- /dev/null +++ b/docs/doc_examples/397ab5f9ea0b69ae85038bb0b9915180.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.dataStreamsStats({ + name: "datastream", + human: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/397bdb40d0146102f1f4c6a35675e16a.asciidoc b/docs/doc_examples/397bdb40d0146102f1f4c6a35675e16a.asciidoc new file mode 100644 index 000000000..cb5ab8825 --- /dev/null +++ b/docs/doc_examples/397bdb40d0146102f1f4c6a35675e16a.asciidoc @@ -0,0 +1,57 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "index", + settings: { + analysis: { + analyzer: { + english_exact: { + tokenizer: "standard", + filter: ["lowercase"], + }, + }, + }, + }, + mappings: { + properties: { + body: { + type: "text", + analyzer: "english", + fields: { + exact: { + type: "text", + analyzer: "english_exact", + }, + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "index", + id: 1, + document: { + body: "Ski resort", + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "index", + id: 2, + document: { + body: "A pair of skis", + }, +}); +console.log(response2); + +const response3 = await client.indices.refresh({ + index: "index", +}); +console.log(response3); +---- diff --git a/docs/doc_examples/398389933901b572a06a752bc780af7c.asciidoc b/docs/doc_examples/398389933901b572a06a752bc780af7c.asciidoc new file mode 100644 index 000000000..cfbe8ea75 --- /dev/null +++ b/docs/doc_examples/398389933901b572a06a752bc780af7c.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "completion", + inference_id: "anthropic_completion", + inference_config: { + service: "anthropic", + service_settings: { + api_key: "", + model_id: "", + }, + task_settings: { + max_tokens: 1024, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/39963032d423e2f20f53c4621b6ca3c6.asciidoc b/docs/doc_examples/39963032d423e2f20f53c4621b6ca3c6.asciidoc new file mode 100644 index 000000000..233d15c49 --- /dev/null +++ b/docs/doc_examples/39963032d423e2f20f53c4621b6ca3c6.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "ngram", + text: "Quick Fox", +}); +console.log(response); +---- diff --git a/docs/doc_examples/39ce44333d28ed2b833722d3e3cb06f3.asciidoc b/docs/doc_examples/39ce44333d28ed2b833722d3e3cb06f3.asciidoc new file mode 100644 index 000000000..3ab16d3e4 --- /dev/null +++ b/docs/doc_examples/39ce44333d28ed2b833722d3e3cb06f3.asciidoc @@ -0,0 +1,38 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + include_named_queries_score: "true", + query: { + bool: { + should: [ + { + match: { + "name.first": { + query: "shay", + _name: "first", + }, + }, + }, + { + match: { + "name.last": { + query: "banon", + _name: "last", + }, + }, + }, + ], + filter: { + terms: { + "name.last": ["banon", "kimchy"], + _name: "test", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/39d6f575c9458d9c941364dfd0493fa0.asciidoc b/docs/doc_examples/39d6f575c9458d9c941364dfd0493fa0.asciidoc new file mode 100644 index 000000000..2152c1f8e --- /dev/null +++ b/docs/doc_examples/39d6f575c9458d9c941364dfd0493fa0.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.getCalendarEvents({ + calendar_id: "planned-outages", +}); +console.log(response); +---- diff --git a/docs/doc_examples/3a12feb0de224bfaaf518d95b9f516ff.asciidoc b/docs/doc_examples/3a12feb0de224bfaaf518d95b9f516ff.asciidoc new file mode 100644 index 000000000..ab9097f4d --- /dev/null +++ b/docs/doc_examples/3a12feb0de224bfaaf518d95b9f516ff.asciidoc @@ -0,0 +1,56 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.putWatch({ + id: "my-watch", + trigger: { + schedule: { + cron: "0 0/1 * * * ?", + }, + }, + input: { + search: { + request: { + indices: ["logstash*"], + body: { + query: { + bool: { + must: { + match: { + response: 404, + }, + }, + filter: { + range: { + "@timestamp": { + from: "{{ctx.trigger.scheduled_time}}||-5m", + to: "{{ctx.trigger.triggered_time}}", + }, + }, + }, + }, + }, + }, + }, + }, + }, + condition: { + compare: { + "ctx.payload.hits.total": { + gt: 0, + }, + }, + }, + actions: { + email_admin: { + email: { + to: "admin@domain.host.com", + subject: "404 recently encountered", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3a204b57072a104d9b50f3a9e064a8f6.asciidoc b/docs/doc_examples/3a204b57072a104d9b50f3a9e064a8f6.asciidoc new file mode 100644 index 000000000..087b6dc1b --- /dev/null +++ b/docs/doc_examples/3a204b57072a104d9b50f3a9e064a8f6.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: ".ml-anomalies-custom-example", + size: 0, + aggs: { + job_ids: { + terms: { + field: "job_id", + size: 100, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3a2953fd81d65118a776c87a81530e15.asciidoc b/docs/doc_examples/3a2953fd81d65118a776c87a81530e15.asciidoc new file mode 100644 index 000000000..cf4bd65a7 --- /dev/null +++ b/docs/doc_examples/3a2953fd81d65118a776c87a81530e15.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match: { + "user.id": "kimchy", + }, + }, + highlight: { + order: "score", + fields: { + comment: { + fragment_size: 150, + number_of_fragments: 3, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3a2f37f8f32b1aa6bcfb252b9e00f904.asciidoc b/docs/doc_examples/3a2f37f8f32b1aa6bcfb252b9e00f904.asciidoc new file mode 100644 index 000000000..597e9e13d --- /dev/null +++ b/docs/doc_examples/3a2f37f8f32b1aa6bcfb252b9e00f904.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + index: { + mode: "standard", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3a3adae6dbb2c0316a7d98d0a6c1d4f8.asciidoc b/docs/doc_examples/3a3adae6dbb2c0316a7d98d0a6c1d4f8.asciidoc new file mode 100644 index 000000000..46049d94d --- /dev/null +++ b/docs/doc_examples/3a3adae6dbb2c0316a7d98d0a6c1d4f8.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "quantized-image-index", + knn: { + field: "image-vector", + query_vector: [0.1, -2], + k: 15, + num_candidates: 100, + }, + fields: ["title"], + rescore: { + window_size: 10, + query: { + rescore_query: { + script_score: { + query: { + match_all: {}, + }, + script: { + source: + "cosineSimilarity(params.query_vector, 'image-vector') + 1.0", + params: { + query_vector: [0.1, -2], + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3a3e6e2627cafa08e4402a0de95785cc.asciidoc b/docs/doc_examples/3a3e6e2627cafa08e4402a0de95785cc.asciidoc new file mode 100644 index 000000000..493323793 --- /dev/null +++ b/docs/doc_examples/3a3e6e2627cafa08e4402a0de95785cc.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + match: { + message: "you know for search", + }, + }, + collapse: { + field: "user.id", + }, + rescore: { + window_size: 50, + query: { + rescore_query: { + match_phrase: { + message: "you know for search", + }, + }, + query_weight: 0.3, + rescore_query_weight: 1.4, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3a489743e49902df38e3368cae00717a.asciidoc b/docs/doc_examples/3a489743e49902df38e3368cae00717a.asciidoc new file mode 100644 index 000000000..5aec6e804 --- /dev/null +++ b/docs/doc_examples/3a489743e49902df38e3368cae00717a.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.hotThreads(); +console.log(response); +---- diff --git a/docs/doc_examples/3a4953663a5a3809b692c27446e16b7f.asciidoc b/docs/doc_examples/3a4953663a5a3809b692c27446e16b7f.asciidoc new file mode 100644 index 000000000..1d6946015 --- /dev/null +++ b/docs/doc_examples/3a4953663a5a3809b692c27446e16b7f.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + wait_for_completion: "false", + source: { + index: "test-data", + size: 50, + }, + dest: { + index: "amazon-bedrock-embeddings", + pipeline: "amazon_bedrock_embeddings_pipeline", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3a5f2e2313614ea9693545edee22ac43.asciidoc b/docs/doc_examples/3a5f2e2313614ea9693545edee22ac43.asciidoc new file mode 100644 index 000000000..1ed8c9d64 --- /dev/null +++ b/docs/doc_examples/3a5f2e2313614ea9693545edee22ac43.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.deleteServiceToken({ + namespace: "elastic", + service: "fleet-server", + name: "token42", +}); +console.log(response); +---- diff --git a/docs/doc_examples/3a6238835c7d9f51e6d91f92885fadeb.asciidoc b/docs/doc_examples/3a6238835c7d9f51e6d91f92885fadeb.asciidoc new file mode 100644 index 000000000..d68abb0d9 --- /dev/null +++ b/docs/doc_examples/3a6238835c7d9f51e6d91f92885fadeb.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + post_date: { + type: "date", + }, + user: { + type: "keyword", + }, + name: { + type: "keyword", + }, + age: { + type: "integer", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3a64ae799cc03fadbb802794730c23da.asciidoc b/docs/doc_examples/3a64ae799cc03fadbb802794730c23da.asciidoc new file mode 100644 index 000000000..4869ca8f6 --- /dev/null +++ b/docs/doc_examples/3a64ae799cc03fadbb802794730c23da.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "example_points", + mappings: { + properties: { + location: { + type: "geo_point", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "example_points", + id: 1, + refresh: "true", + document: { + name: "Wind & Wetter, Berlin, Germany", + location: [13.400544, 52.530286], + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/3aa0e2d25a51bf5f3f0bda7fd8403bf2.asciidoc b/docs/doc_examples/3aa0e2d25a51bf5f3f0bda7fd8403bf2.asciidoc new file mode 100644 index 000000000..1c6af827d --- /dev/null +++ b/docs/doc_examples/3aa0e2d25a51bf5f3f0bda7fd8403bf2.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + default: { + tokenizer: "whitespace", + filter: ["my_custom_stop_words_filter"], + }, + }, + filter: { + my_custom_stop_words_filter: { + type: "stop", + ignore_case: true, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3ab8f65fcb55a0e3664c55749ec41efd.asciidoc b/docs/doc_examples/3ab8f65fcb55a0e3664c55749ec41efd.asciidoc new file mode 100644 index 000000000..e498feff3 --- /dev/null +++ b/docs/doc_examples/3ab8f65fcb55a0e3664c55749ec41efd.asciidoc @@ -0,0 +1,40 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "persian_example", + settings: { + analysis: { + char_filter: { + zero_width_spaces: { + type: "mapping", + mappings: ["\\u200C=>\\u0020"], + }, + }, + filter: { + persian_stop: { + type: "stop", + stopwords: "_persian_", + }, + }, + analyzer: { + rebuilt_persian: { + tokenizer: "standard", + char_filter: ["zero_width_spaces"], + filter: [ + "lowercase", + "decimal_digit", + "arabic_normalization", + "persian_normalization", + "persian_stop", + "persian_stem", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3abedc1d68fe1d20621157406b2b1de0.asciidoc b/docs/doc_examples/3abedc1d68fe1d20621157406b2b1de0.asciidoc new file mode 100644 index 000000000..df7a51c73 --- /dev/null +++ b/docs/doc_examples/3abedc1d68fe1d20621157406b2b1de0.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "keyword", + filter: ["my_custom_word_delimiter_filter"], + }, + }, + filter: { + my_custom_word_delimiter_filter: { + type: "word_delimiter", + type_table: ["- => ALPHA"], + split_on_case_change: false, + split_on_numerics: false, + stem_english_possessive: true, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3ac075c5b5bbe648d40d06cce3061367.asciidoc b/docs/doc_examples/3ac075c5b5bbe648d40d06cce3061367.asciidoc new file mode 100644 index 000000000..c8b4bd9b1 --- /dev/null +++ b/docs/doc_examples/3ac075c5b5bbe648d40d06cce3061367.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.renderSearchTemplate({ + source: + '{ "query": { "bool": { "filter": [ {{#year_scope}} { "range": { "@timestamp": { "gte": "now-1y/d", "lt": "now/d" } } }, {{/year_scope}} { "term": { "user.id": "{{user_id}}" }}]}}}', + params: { + year_scope: false, + user_id: "kimchy", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3ac8b5234e9d53859245cf8ab0094ca5.asciidoc b/docs/doc_examples/3ac8b5234e9d53859245cf8ab0094ca5.asciidoc new file mode 100644 index 000000000..790356725 --- /dev/null +++ b/docs/doc_examples/3ac8b5234e9d53859245cf8ab0094ca5.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.deleteJob({ + job_id: "total-requests", +}); +console.log(response); +---- diff --git a/docs/doc_examples/3ae03ba3b56e5e287953094050766738.asciidoc b/docs/doc_examples/3ae03ba3b56e5e287953094050766738.asciidoc deleted file mode 100644 index cb67a6047..000000000 --- a/docs/doc_examples/3ae03ba3b56e5e287953094050766738.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response0 = await client.indices.refresh() -console.log(response0) - -const response1 = await client.search({ - index: 'new_twitter', - size: '0', - filter_path: 'hits.total' -}) -console.log(response1) ----- - diff --git a/docs/doc_examples/3af10fde8138d9d95df127d39d9a0ed2.asciidoc b/docs/doc_examples/3af10fde8138d9d95df127d39d9a0ed2.asciidoc new file mode 100644 index 000000000..423dd5ba8 --- /dev/null +++ b/docs/doc_examples/3af10fde8138d9d95df127d39d9a0ed2.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "cluster.max_shards_per_node": null, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3afc6dacf90b42900ab571aad8a61d75.asciidoc b/docs/doc_examples/3afc6dacf90b42900ab571aad8a61d75.asciidoc new file mode 100644 index 000000000..de00013ae --- /dev/null +++ b/docs/doc_examples/3afc6dacf90b42900ab571aad8a61d75.asciidoc @@ -0,0 +1,40 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "serbian_example", + settings: { + analysis: { + filter: { + serbian_stop: { + type: "stop", + stopwords: "_serbian_", + }, + serbian_keywords: { + type: "keyword_marker", + keywords: ["пример"], + }, + serbian_stemmer: { + type: "stemmer", + language: "serbian", + }, + }, + analyzer: { + rebuilt_serbian: { + tokenizer: "standard", + filter: [ + "lowercase", + "serbian_stop", + "serbian_keywords", + "serbian_stemmer", + "serbian_normalization", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3b0475515ee692a2d9850c2bd7cdb895.asciidoc b/docs/doc_examples/3b0475515ee692a2d9850c2bd7cdb895.asciidoc new file mode 100644 index 000000000..0db0a8821 --- /dev/null +++ b/docs/doc_examples/3b0475515ee692a2d9850c2bd7cdb895.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + dynamic_templates: [ + { + unindexed_longs: { + match_mapping_type: "long", + mapping: { + type: "long", + index: false, + }, + }, + }, + { + unindexed_doubles: { + match_mapping_type: "double", + mapping: { + type: "float", + index: false, + }, + }, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3b04cc894e6a47d57983484010feac0c.asciidoc b/docs/doc_examples/3b04cc894e6a47d57983484010feac0c.asciidoc index f9bc52491..fbb6caf31 100644 --- a/docs/doc_examples/3b04cc894e6a47d57983484010feac0c.asciidoc +++ b/docs/doc_examples/3b04cc894e6a47d57983484010feac0c.asciidoc @@ -3,16 +3,15 @@ [source, js] ---- -const response0 = await client.get({ - index: 'metricbeat-2016.05.30-1', - id: '1' -}) -console.log(response0) +const response = await client.get({ + index: "metricbeat-2016.05.30-1", + id: 1, +}); +console.log(response); const response1 = await client.get({ - index: 'metricbeat-2016.05.31-1', - id: '1' -}) -console.log(response1) + index: "metricbeat-2016.05.31-1", + id: 1, +}); +console.log(response1); ---- - diff --git a/docs/doc_examples/3b05128cba6852e79a905bcdd5a8ebc0.asciidoc b/docs/doc_examples/3b05128cba6852e79a905bcdd5a8ebc0.asciidoc new file mode 100644 index 000000000..55b52a825 --- /dev/null +++ b/docs/doc_examples/3b05128cba6852e79a905bcdd5a8ebc0.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + size: "surprise_me", + error_trace: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/3b162509ed14eda44a9681cd1108fa39.asciidoc b/docs/doc_examples/3b162509ed14eda44a9681cd1108fa39.asciidoc new file mode 100644 index 000000000..e6170cfad --- /dev/null +++ b/docs/doc_examples/3b162509ed14eda44a9681cd1108fa39.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "test", + suggest: { + text: "noble prize", + simple_phrase: { + phrase: { + field: "title.trigram", + size: 1, + gram_size: 3, + direct_generator: [ + { + field: "title.trigram", + suggest_mode: "always", + }, + ], + highlight: { + pre_tag: "", + post_tag: "", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3b18e9de638ff0b1c7a1f1f6bf1c24f3.asciidoc b/docs/doc_examples/3b18e9de638ff0b1c7a1f1f6bf1c24f3.asciidoc new file mode 100644 index 000000000..1902fe423 --- /dev/null +++ b/docs/doc_examples/3b18e9de638ff0b1c7a1f1f6bf1c24f3.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getPrivileges({ + application: "myapp", +}); +console.log(response); +---- diff --git a/docs/doc_examples/3b1ff884f3bab390ae357e622c0544a9.asciidoc b/docs/doc_examples/3b1ff884f3bab390ae357e622c0544a9.asciidoc new file mode 100644 index 000000000..d24d0f706 --- /dev/null +++ b/docs/doc_examples/3b1ff884f3bab390ae357e622c0544a9.asciidoc @@ -0,0 +1,87 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "example-index", + mappings: { + properties: { + text: { + type: "text", + }, + vector: { + type: "dense_vector", + dims: 1, + index: true, + similarity: "l2_norm", + index_options: { + type: "hnsw", + }, + }, + integer: { + type: "integer", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "example-index", + id: 1, + document: { + text: "rrf", + vector: [5], + integer: 1, + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "example-index", + id: 2, + document: { + text: "rrf rrf", + vector: [4], + integer: 2, + }, +}); +console.log(response2); + +const response3 = await client.index({ + index: "example-index", + id: 3, + document: { + text: "rrf rrf rrf", + vector: [3], + integer: 1, + }, +}); +console.log(response3); + +const response4 = await client.index({ + index: "example-index", + id: 4, + document: { + text: "rrf rrf rrf rrf", + integer: 2, + }, +}); +console.log(response4); + +const response5 = await client.index({ + index: "example-index", + id: 5, + document: { + vector: [0], + integer: 1, + }, +}); +console.log(response5); + +const response6 = await client.indices.refresh({ + index: "example-index", +}); +console.log(response6); +---- diff --git a/docs/doc_examples/3b40db1c5c6b36f087d7a09a4ce285c6.asciidoc b/docs/doc_examples/3b40db1c5c6b36f087d7a09a4ce285c6.asciidoc new file mode 100644 index 000000000..3760c9f46 --- /dev/null +++ b/docs/doc_examples/3b40db1c5c6b36f087d7a09a4ce285c6.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getIndexTemplate(); +console.log(response); +---- diff --git a/docs/doc_examples/3b606631284877f9bca15051630995ad.asciidoc b/docs/doc_examples/3b606631284877f9bca15051630995ad.asciidoc new file mode 100644 index 000000000..e355ac371 --- /dev/null +++ b/docs/doc_examples/3b606631284877f9bca15051630995ad.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_test_scores", + query: { + term: { + grad_year: "2099", + }, + }, + sort: [ + { + _script: { + type: "number", + script: { + source: "doc['math_score'].value + doc['verbal_score'].value", + }, + order: "desc", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/3b64821fe9db73eb03860c60d775d7ff.asciidoc b/docs/doc_examples/3b64821fe9db73eb03860c60d775d7ff.asciidoc new file mode 100644 index 000000000..e27e26bb2 --- /dev/null +++ b/docs/doc_examples/3b64821fe9db73eb03860c60d775d7ff.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_security/cross_cluster/api_key/VuaCfGcBCdbkQm-e5aOx", + body: { + access: { + replication: [ + { + names: ["archive"], + }, + ], + }, + metadata: { + application: "replication", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3b8ab7027e0d616fb432acd8813e086c.asciidoc b/docs/doc_examples/3b8ab7027e0d616fb432acd8813e086c.asciidoc new file mode 100644 index 000000000..2170911ae --- /dev/null +++ b/docs/doc_examples/3b8ab7027e0d616fb432acd8813e086c.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + document: { + "@timestamp": "2099-11-15T13:12:00", + message: "GET /search HTTP/1.1 200 1070000", + user: { + id: "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3b9c54604535d97e8368d47148aecc6f.asciidoc b/docs/doc_examples/3b9c54604535d97e8368d47148aecc6f.asciidoc new file mode 100644 index 000000000..819faa5c7 --- /dev/null +++ b/docs/doc_examples/3b9c54604535d97e8368d47148aecc6f.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.updateModelSnapshot({ + job_id: "it_ops_new_logs", + snapshot_id: 1491852978, + description: "Snapshot 1", + retain: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3ba2896bcc724c27be8f0decf6f81813.asciidoc b/docs/doc_examples/3ba2896bcc724c27be8f0decf6f81813.asciidoc new file mode 100644 index 000000000..453fb5d54 --- /dev/null +++ b/docs/doc_examples/3ba2896bcc724c27be8f0decf6f81813.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putTemplate({ + name: "custom_monitoring", + index_patterns: [ + ".monitoring-beats-7-*", + ".monitoring-es-7-*", + ".monitoring-kibana-7-*", + ".monitoring-logstash-7-*", + ], + order: 1, + settings: { + number_of_shards: 5, + number_of_replicas: 2, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3bb491db29deba25e1cc82bcaa1aa1a1.asciidoc b/docs/doc_examples/3bb491db29deba25e1cc82bcaa1aa1a1.asciidoc new file mode 100644 index 000000000..93e5127ef --- /dev/null +++ b/docs/doc_examples/3bb491db29deba25e1cc82bcaa1aa1a1.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + source: { + index: "my-index-000001", + }, + dest: { + index: "my-new-index-000001", + }, + script: { + source: 'ctx._source.tag = ctx._source.remove("flag")', + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3bb5951a9e1186af5d154f56ffc13502.asciidoc b/docs/doc_examples/3bb5951a9e1186af5d154f56ffc13502.asciidoc new file mode 100644 index 000000000..75cd3cb43 --- /dev/null +++ b/docs/doc_examples/3bb5951a9e1186af5d154f56ffc13502.asciidoc @@ -0,0 +1,48 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + message: { + type: "keyword", + ignore_above: 20, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + message: "Syntax error", + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "my-index-000001", + id: 2, + document: { + message: "Syntax error with some long stacktrace", + }, +}); +console.log(response2); + +const response3 = await client.search({ + index: "my-index-000001", + aggs: { + messages: { + terms: { + field: "message", + }, + }, + }, +}); +console.log(response3); +---- diff --git a/docs/doc_examples/3bc4a3681e3ea9cb3de49f72085807d8.asciidoc b/docs/doc_examples/3bc4a3681e3ea9cb3de49f72085807d8.asciidoc new file mode 100644 index 000000000..929ab0ee8 --- /dev/null +++ b/docs/doc_examples/3bc4a3681e3ea9cb3de49f72085807d8.asciidoc @@ -0,0 +1,61 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "retrievers_example", + retriever: { + linear: { + retrievers: [ + { + retriever: { + standard: { + query: { + function_score: { + query: { + term: { + topic: "ai", + }, + }, + functions: [ + { + script_score: { + script: { + source: "doc['timestamp'].value.millis", + }, + }, + }, + ], + boost_mode: "replace", + }, + }, + sort: { + timestamp: { + order: "asc", + }, + }, + }, + }, + weight: 2, + normalizer: "minmax", + }, + { + retriever: { + knn: { + field: "vector", + query_vector: [0.23, 0.67, 0.89], + k: 3, + num_candidates: 5, + }, + }, + weight: 1.5, + }, + ], + rank_window_size: 10, + }, + }, + _source: false, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3bc872dbcdad8ff02cbaea39e7f38352.asciidoc b/docs/doc_examples/3bc872dbcdad8ff02cbaea39e7f38352.asciidoc new file mode 100644 index 000000000..d1794a5b7 --- /dev/null +++ b/docs/doc_examples/3bc872dbcdad8ff02cbaea39e7f38352.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "index_double", + mappings: { + properties: { + field: { + type: "date", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3bfa2362add163802fc2210cc2f37ba2.asciidoc b/docs/doc_examples/3bfa2362add163802fc2210cc2f37ba2.asciidoc new file mode 100644 index 000000000..d31648cf2 --- /dev/null +++ b/docs/doc_examples/3bfa2362add163802fc2210cc2f37ba2.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.clone({ + repository: "my_repository", + snapshot: "source_snapshot", + target_snapshot: "target_snapshot", + indices: "index_a,index_b", +}); +console.log(response); +---- diff --git a/docs/doc_examples/3c04f75bcbb07125d51b21b9b2c9f6f0.asciidoc b/docs/doc_examples/3c04f75bcbb07125d51b21b9b2c9f6f0.asciidoc new file mode 100644 index 000000000..0a22cb911 --- /dev/null +++ b/docs/doc_examples/3c04f75bcbb07125d51b21b9b2c9f6f0.asciidoc @@ -0,0 +1,57 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "index_1", + id: 1, + document: { + text: "Document in index 1", + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "index_2", + id: 2, + refresh: "true", + document: { + text: "Document in index 2", + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "index_1,index_2", + query: { + terms: { + _index: ["index_1", "index_2"], + }, + }, + aggs: { + indices: { + terms: { + field: "_index", + size: 10, + }, + }, + }, + sort: [ + { + _index: { + order: "asc", + }, + }, + ], + script_fields: { + index_name: { + script: { + lang: "painless", + source: "doc['_index']", + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/3c09ca91057216125ed0e3856a91ff95.asciidoc b/docs/doc_examples/3c09ca91057216125ed0e3856a91ff95.asciidoc new file mode 100644 index 000000000..1d1a318d3 --- /dev/null +++ b/docs/doc_examples/3c09ca91057216125ed0e3856a91ff95.asciidoc @@ -0,0 +1,149 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "datastream_template", + index_patterns: ["datastream*"], + data_stream: {}, + template: { + settings: { + index: { + mode: "time_series", + number_of_replicas: 0, + number_of_shards: 2, + }, + "index.lifecycle.name": "datastream_policy", + }, + mappings: { + properties: { + "@timestamp": { + type: "date", + }, + kubernetes: { + properties: { + container: { + properties: { + cpu: { + properties: { + usage: { + properties: { + core: { + properties: { + ns: { + type: "long", + }, + }, + }, + limit: { + properties: { + pct: { + type: "float", + }, + }, + }, + nanocores: { + type: "long", + time_series_metric: "gauge", + }, + node: { + properties: { + pct: { + type: "float", + }, + }, + }, + }, + }, + }, + }, + memory: { + properties: { + available: { + properties: { + bytes: { + type: "long", + time_series_metric: "gauge", + }, + }, + }, + majorpagefaults: { + type: "long", + }, + pagefaults: { + type: "long", + time_series_metric: "gauge", + }, + rss: { + properties: { + bytes: { + type: "long", + time_series_metric: "gauge", + }, + }, + }, + usage: { + properties: { + bytes: { + type: "long", + time_series_metric: "gauge", + }, + limit: { + properties: { + pct: { + type: "float", + }, + }, + }, + node: { + properties: { + pct: { + type: "float", + }, + }, + }, + }, + }, + workingset: { + properties: { + bytes: { + type: "long", + time_series_metric: "gauge", + }, + }, + }, + }, + }, + name: { + type: "keyword", + }, + start_time: { + type: "date", + }, + }, + }, + host: { + type: "keyword", + time_series_dimension: true, + }, + namespace: { + type: "keyword", + time_series_dimension: true, + }, + node: { + type: "keyword", + time_series_dimension: true, + }, + pod: { + type: "keyword", + time_series_dimension: true, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3c0d0c38e1c819a35a68cdba5ae8ccc4.asciidoc b/docs/doc_examples/3c0d0c38e1c819a35a68cdba5ae8ccc4.asciidoc new file mode 100644 index 000000000..d2b00a583 --- /dev/null +++ b/docs/doc_examples/3c0d0c38e1c819a35a68cdba5ae8ccc4.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "text_embedding", + inference_id: "alibabacloud_ai_search_embeddings", + inference_config: { + service: "alibabacloud-ai-search", + service_settings: { + api_key: "", + service_id: "", + host: "", + workspace: "", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3c345feb7c52fd54bcb5d5505fd8bc3b.asciidoc b/docs/doc_examples/3c345feb7c52fd54bcb5d5505fd8bc3b.asciidoc new file mode 100644 index 000000000..0a5371ecb --- /dev/null +++ b/docs/doc_examples/3c345feb7c52fd54bcb5d5505fd8bc3b.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.inferTrainedModel({ + model_id: "model2", + docs: [ + { + text_field: "", + }, + ], + inference_config: { + question_answering: { + question: "", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3c36dc17359c6b6b6a40d04da9293fa7.asciidoc b/docs/doc_examples/3c36dc17359c6b6b6a40d04da9293fa7.asciidoc new file mode 100644 index 000000000..e10137efc --- /dev/null +++ b/docs/doc_examples/3c36dc17359c6b6b6a40d04da9293fa7.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_date_histo: { + date_histogram: { + field: "date", + calendar_interval: "1M", + }, + aggs: { + the_sum: { + sum: { + field: "price", + }, + }, + the_movavg: { + moving_fn: { + buckets_path: "the_sum", + window: 10, + script: "MovingFunctions.unweightedAvg(values)", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3c5d5a5c34a62724942329658c688f5e.asciidoc b/docs/doc_examples/3c5d5a5c34a62724942329658c688f5e.asciidoc new file mode 100644 index 000000000..8c8e80a33 --- /dev/null +++ b/docs/doc_examples/3c5d5a5c34a62724942329658c688f5e.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.setUpgradeMode({ + enabled: "false", +}); +console.log(response); +---- diff --git a/docs/doc_examples/3c65cb58e131ef46f4dd081683b970ac.asciidoc b/docs/doc_examples/3c65cb58e131ef46f4dd081683b970ac.asciidoc new file mode 100644 index 000000000..49abf03b9 --- /dev/null +++ b/docs/doc_examples/3c65cb58e131ef46f4dd081683b970ac.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_locations,my_geoshapes", + query: { + bool: { + must: { + match_all: {}, + }, + filter: { + geo_distance: { + distance: "200km", + "pin.location": { + lat: 40, + lon: -70, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3c6abb9885cb1a997fcdd16f7fa4f673.asciidoc b/docs/doc_examples/3c6abb9885cb1a997fcdd16f7fa4f673.asciidoc new file mode 100644 index 000000000..5ee49acac --- /dev/null +++ b/docs/doc_examples/3c6abb9885cb1a997fcdd16f7fa4f673.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.shrink({ + index: "my-index-000001", + target: "shrunk-my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/3c7621a81fa982b79f040a6d2611530e.asciidoc b/docs/doc_examples/3c7621a81fa982b79f040a6d2611530e.asciidoc new file mode 100644 index 000000000..b299375ef --- /dev/null +++ b/docs/doc_examples/3c7621a81fa982b79f040a6d2611530e.asciidoc @@ -0,0 +1,45 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putComponentTemplate({ + name: "ct1", + template: { + settings: { + "index.number_of_shards": 2, + }, + }, +}); +console.log(response); + +const response1 = await client.cluster.putComponentTemplate({ + name: "ct2", + template: { + settings: { + "index.number_of_replicas": 0, + }, + mappings: { + properties: { + "@timestamp": { + type: "date", + }, + }, + }, + }, +}); +console.log(response1); + +const response2 = await client.indices.putIndexTemplate({ + name: "final-template", + index_patterns: ["my-index-*"], + composed_of: ["ct1", "ct2"], + priority: 5, +}); +console.log(response2); + +const response3 = await client.indices.simulateTemplate({ + name: "final-template", +}); +console.log(response3); +---- diff --git a/docs/doc_examples/3cd2f7f9096a8e8180f27b6c30e71840.asciidoc b/docs/doc_examples/3cd2f7f9096a8e8180f27b6c30e71840.asciidoc new file mode 100644 index 000000000..a6a7c4806 --- /dev/null +++ b/docs/doc_examples/3cd2f7f9096a8e8180f27b6c30e71840.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "logs", + size: 0, + aggs: { + messages: { + filters: { + filters: [ + { + match: { + body: "error", + }, + }, + { + match: { + body: "warning", + }, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3cd50a789b8e1f0ebbbc53a8d7ecf656.asciidoc b/docs/doc_examples/3cd50a789b8e1f0ebbbc53a8d7ecf656.asciidoc deleted file mode 100644 index dd63318fe..000000000 --- a/docs/doc_examples/3cd50a789b8e1f0ebbbc53a8d7ecf656.asciidoc +++ /dev/null @@ -1,38 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - bool: { - should: [ - { - multi_match: { - query: 'Will Smith', - type: 'cross_fields', - fields: [ - 'first', - 'last' - ], - minimum_should_match: '50%' - } - }, - { - multi_match: { - query: 'Will Smith', - type: 'cross_fields', - fields: [ - '*.edge' - ] - } - } - ] - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/3cd93a48906069709b76420c66930c01.asciidoc b/docs/doc_examples/3cd93a48906069709b76420c66930c01.asciidoc new file mode 100644 index 000000000..ab7d04c7b --- /dev/null +++ b/docs/doc_examples/3cd93a48906069709b76420c66930c01.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "standard", + filter: ["lowercase", "my_stemmer"], + }, + }, + filter: { + my_stemmer: { + type: "stemmer", + language: "light_german", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3d05fa99ba8e1f2c3f3dfe59e4ee60f6.asciidoc b/docs/doc_examples/3d05fa99ba8e1f2c3f3dfe59e4ee60f6.asciidoc new file mode 100644 index 000000000..c538c58f4 --- /dev/null +++ b/docs/doc_examples/3d05fa99ba8e1f2c3f3dfe59e4ee60f6.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match: { + content: "kimchy", + }, + }, + highlight: { + fields: { + content: {}, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3d1a0e1dc5310544d032108ae0b3f099.asciidoc b/docs/doc_examples/3d1a0e1dc5310544d032108ae0b3f099.asciidoc new file mode 100644 index 000000000..4d3535af2 --- /dev/null +++ b/docs/doc_examples/3d1a0e1dc5310544d032108ae0b3f099.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match_all: { + boost: 1.2, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3d1ff6097e2359f927c88c2ccdb36252.asciidoc b/docs/doc_examples/3d1ff6097e2359f927c88c2ccdb36252.asciidoc index d770d86e8..db267ffc5 100644 --- a/docs/doc_examples/3d1ff6097e2359f927c88c2ccdb36252.asciidoc +++ b/docs/doc_examples/3d1ff6097e2359f927c88c2ccdb36252.asciidoc @@ -3,7 +3,6 @@ [source, js] ---- -const response = await client.info() -console.log(response) +const response = await client.info(); +console.log(response); ---- - diff --git a/docs/doc_examples/3d316bddd8503a6cc10566630a4155d3.asciidoc b/docs/doc_examples/3d316bddd8503a6cc10566630a4155d3.asciidoc new file mode 100644 index 000000000..44e3eb04e --- /dev/null +++ b/docs/doc_examples/3d316bddd8503a6cc10566630a4155d3.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "GET", + path: "/_watcher/settings", +}); +console.log(response); +---- diff --git a/docs/doc_examples/3d48d1ba49f680aac32177d653944623.asciidoc b/docs/doc_examples/3d48d1ba49f680aac32177d653944623.asciidoc new file mode 100644 index 000000000..c3efc69f2 --- /dev/null +++ b/docs/doc_examples/3d48d1ba49f680aac32177d653944623.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.ackWatch({ + watch_id: "", + action_id: "", +}); +console.log(response); +---- diff --git a/docs/doc_examples/3d6935e04de21ab2f103e5b61cfd7a5b.asciidoc b/docs/doc_examples/3d6935e04de21ab2f103e5b61cfd7a5b.asciidoc new file mode 100644 index 000000000..df578de4c --- /dev/null +++ b/docs/doc_examples/3d6935e04de21ab2f103e5b61cfd7a5b.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "my-pipeline", + processors: [ + { + rename: { + description: "Rename 'provider' to 'cloud.provider'", + field: "provider", + target_field: "cloud.provider", + ignore_failure: true, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/3d6a56dd3d93ece0e3da3fb66b4696d3.asciidoc b/docs/doc_examples/3d6a56dd3d93ece0e3da3fb66b4696d3.asciidoc new file mode 100644 index 000000000..1984895ae --- /dev/null +++ b/docs/doc_examples/3d6a56dd3d93ece0e3da3fb66b4696d3.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.usage(); +console.log(response); +---- diff --git a/docs/doc_examples/3d82257167e8a14a7f474848b32da128.asciidoc b/docs/doc_examples/3d82257167e8a14a7f474848b32da128.asciidoc new file mode 100644 index 000000000..c350ee339 --- /dev/null +++ b/docs/doc_examples/3d82257167e8a14a7f474848b32da128.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "set_bar", + description: "sets the value of bar from the field foo", + processors: [ + { + set: { + field: "bar", + copy_from: "foo", + }, + }, + ], +}); +console.log(response); + +const response1 = await client.ingest.simulate({ + id: "set_bar", + docs: [ + { + _source: { + foo: ["foo1", "foo2"], + }, + }, + ], +}); +console.log(response1); +---- diff --git a/docs/doc_examples/3da35090e093c2d83c3b7d0d83bcb4ae.asciidoc b/docs/doc_examples/3da35090e093c2d83c3b7d0d83bcb4ae.asciidoc new file mode 100644 index 000000000..bc5509871 --- /dev/null +++ b/docs/doc_examples/3da35090e093c2d83c3b7d0d83bcb4ae.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "cluster.routing.allocation.exclude._name": "target-node-name", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3db2b5a6424aa92ecab7a8640c38685a.asciidoc b/docs/doc_examples/3db2b5a6424aa92ecab7a8640c38685a.asciidoc new file mode 100644 index 000000000..d0263a6ed --- /dev/null +++ b/docs/doc_examples/3db2b5a6424aa92ecab7a8640c38685a.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.delete({ + index: "my-index-000001", + id: 1, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3dd45f65e7bfe207e8d796118f25613c.asciidoc b/docs/doc_examples/3dd45f65e7bfe207e8d796118f25613c.asciidoc new file mode 100644 index 000000000..0ba906074 --- /dev/null +++ b/docs/doc_examples/3dd45f65e7bfe207e8d796118f25613c.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.getSettings({ + flat_settings: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/3e121b43773cbb6dffa9b483c86a1f8d.asciidoc b/docs/doc_examples/3e121b43773cbb6dffa9b483c86a1f8d.asciidoc new file mode 100644 index 000000000..507bb9271 --- /dev/null +++ b/docs/doc_examples/3e121b43773cbb6dffa9b483c86a1f8d.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.createApiKey({ + name: "my-api-key", + role_descriptors: { + "role-a": { + cluster: ["all"], + indices: [ + { + names: ["index-a*"], + privileges: ["read"], + }, + ], + }, + }, + metadata: { + application: "my-application", + environment: { + level: 1, + trusted: true, + tags: ["dev", "staging"], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3e13c8a81f40a537eddc0b57633b45f8.asciidoc b/docs/doc_examples/3e13c8a81f40a537eddc0b57633b45f8.asciidoc new file mode 100644 index 000000000..b200ad760 --- /dev/null +++ b/docs/doc_examples/3e13c8a81f40a537eddc0b57633b45f8.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + index: "test_index", + analyzer: "my_analyzer", + text: "missing bicycles", +}); +console.log(response); +---- diff --git a/docs/doc_examples/3e1cb34fd6e510c79c2fff2126ac1c61.asciidoc b/docs/doc_examples/3e1cb34fd6e510c79c2fff2126ac1c61.asciidoc new file mode 100644 index 000000000..c8090df78 --- /dev/null +++ b/docs/doc_examples/3e1cb34fd6e510c79c2fff2126ac1c61.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + _meta: { + class: "MyApp::User", + version: { + min: "1.0", + max: "1.3", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3e278e6c193b4c17dbdc70670e15d78c.asciidoc b/docs/doc_examples/3e278e6c193b4c17dbdc70670e15d78c.asciidoc new file mode 100644 index 000000000..efcd814b2 --- /dev/null +++ b/docs/doc_examples/3e278e6c193b4c17dbdc70670e15d78c.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match: { + "user.id": "kimchy", + }, + }, + highlight: { + fields: { + comment: { + fragment_size: 150, + number_of_fragments: 3, + no_match_size: 150, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3e33c1a4298ea6a0dec65a3ebf9ba973.asciidoc b/docs/doc_examples/3e33c1a4298ea6a0dec65a3ebf9ba973.asciidoc new file mode 100644 index 000000000..0c54623e0 --- /dev/null +++ b/docs/doc_examples/3e33c1a4298ea6a0dec65a3ebf9ba973.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.termvectors({ + index: "my-index-000001", + doc: { + fullname: "John Doe", + text: "test test test", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3e4227250d49e81df48773f8ba803ea7.asciidoc b/docs/doc_examples/3e4227250d49e81df48773f8ba803ea7.asciidoc new file mode 100644 index 000000000..1cc732f90 --- /dev/null +++ b/docs/doc_examples/3e4227250d49e81df48773f8ba803ea7.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-data-stream", + properties: { + message: { + type: "text", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3e573bfabe00f8bfb8bb69aa5820768e.asciidoc b/docs/doc_examples/3e573bfabe00f8bfb8bb69aa5820768e.asciidoc deleted file mode 100644 index a7e633b13..000000000 --- a/docs/doc_examples/3e573bfabe00f8bfb8bb69aa5820768e.asciidoc +++ /dev/null @@ -1,25 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response0 = await client.indices.refresh() -console.log(response0) - -const response1 = await client.search({ - index: 'twitter', - size: '0', - filter_path: 'hits.total', - body: { - query: { - range: { - likes: { - lt: 10 - } - } - } - } -}) -console.log(response1) ----- - diff --git a/docs/doc_examples/3e6db3d80439c2c176dbd1bb1296b6cf.asciidoc b/docs/doc_examples/3e6db3d80439c2c176dbd1bb1296b6cf.asciidoc new file mode 100644 index 000000000..d1a11e0fb --- /dev/null +++ b/docs/doc_examples/3e6db3d80439c2c176dbd1bb1296b6cf.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.renderSearchTemplate({ + id: "my-search-template", + params: { + query_string: "hello world", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3e8ed6ae016eb823cb00d9035b8ac459.asciidoc b/docs/doc_examples/3e8ed6ae016eb823cb00d9035b8ac459.asciidoc new file mode 100644 index 000000000..2d1bd3666 --- /dev/null +++ b/docs/doc_examples/3e8ed6ae016eb823cb00d9035b8ac459.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/3ea33023474e77d73ac0540e3a02b0b2.asciidoc b/docs/doc_examples/3ea33023474e77d73ac0540e3a02b0b2.asciidoc new file mode 100644 index 000000000..6b705d888 --- /dev/null +++ b/docs/doc_examples/3ea33023474e77d73ac0540e3a02b0b2.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "basic_users", + roles: ["user"], + rules: { + any: [ + { + field: { + dn: "cn=John Doe,cn=contractors,dc=example,dc=com", + }, + }, + { + field: { + groups: "cn=users,dc=example,dc=com", + }, + }, + ], + }, + enabled: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3ea4c971b3f47735dcc207ee2645fa03.asciidoc b/docs/doc_examples/3ea4c971b3f47735dcc207ee2645fa03.asciidoc new file mode 100644 index 000000000..32f004a99 --- /dev/null +++ b/docs/doc_examples/3ea4c971b3f47735dcc207ee2645fa03.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.updateAliases({ + actions: [ + { + remove_index: { + index: "my-index-2099.05.06-000001", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/3eb4cdd4a799a117ac1ff5f02b18a512.asciidoc b/docs/doc_examples/3eb4cdd4a799a117ac1ff5f02b18a512.asciidoc new file mode 100644 index 000000000..b934334fb --- /dev/null +++ b/docs/doc_examples/3eb4cdd4a799a117ac1ff5f02b18a512.asciidoc @@ -0,0 +1,46 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "index", + mappings: { + properties: { + query: { + type: "percolator", + }, + body: { + type: "text", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.updateAliases({ + actions: [ + { + add: { + index: "index", + alias: "queries", + }, + }, + ], +}); +console.log(response1); + +const response2 = await client.index({ + index: "queries", + id: 1, + refresh: "true", + document: { + query: { + match: { + body: "quick brown fox", + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/3ec95ba697ff97ee2d1a721a393b5926.asciidoc b/docs/doc_examples/3ec95ba697ff97ee2d1a721a393b5926.asciidoc new file mode 100644 index 000000000..136b2aead --- /dev/null +++ b/docs/doc_examples/3ec95ba697ff97ee2d1a721a393b5926.asciidoc @@ -0,0 +1,70 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + type: "custom", + tokenizer: "standard", + filter: ["lowercase"], + }, + my_stop_analyzer: { + type: "custom", + tokenizer: "standard", + filter: ["lowercase", "english_stop"], + }, + }, + filter: { + english_stop: { + type: "stop", + stopwords: "_english_", + }, + }, + }, + }, + mappings: { + properties: { + title: { + type: "text", + analyzer: "my_analyzer", + search_analyzer: "my_stop_analyzer", + search_quote_analyzer: "my_analyzer", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + title: "The Quick Brown Fox", + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "my-index-000001", + id: 2, + document: { + title: "A Quick Brown Fox", + }, +}); +console.log(response2); + +const response3 = await client.search({ + index: "my-index-000001", + query: { + query_string: { + query: '"the quick brown fox"', + }, + }, +}); +console.log(response3); +---- diff --git a/docs/doc_examples/3eca58ef7592b3a857ea3a9898de5997.asciidoc b/docs/doc_examples/3eca58ef7592b3a857ea3a9898de5997.asciidoc new file mode 100644 index 000000000..59b02055a --- /dev/null +++ b/docs/doc_examples/3eca58ef7592b3a857ea3a9898de5997.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "museums", + size: 0, + aggregations: { + "zoomed-in": { + filter: { + geo_bounding_box: { + location: { + top_left: "POINT (4.9 52.4)", + bottom_right: "POINT (5.0 52.3)", + }, + }, + }, + aggregations: { + zoom1: { + geohash_grid: { + field: "location", + precision: 8, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3ed39eb60fbfafb70f7825b8d103bf17.asciidoc b/docs/doc_examples/3ed39eb60fbfafb70f7825b8d103bf17.asciidoc new file mode 100644 index 000000000..df4254b01 --- /dev/null +++ b/docs/doc_examples/3ed39eb60fbfafb70f7825b8d103bf17.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_locations", + query: { + bool: { + must: { + match_all: {}, + }, + filter: { + geo_distance: { + distance: "200km", + "pin.location": { + lat: 40, + lon: -70, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3ed79871d956bfb2d6d2721d7272520c.asciidoc b/docs/doc_examples/3ed79871d956bfb2d6d2721d7272520c.asciidoc new file mode 100644 index 000000000..710fd7bb1 --- /dev/null +++ b/docs/doc_examples/3ed79871d956bfb2d6d2721d7272520c.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.stats({ + metric: "current_watches", +}); +console.log(response); +---- diff --git a/docs/doc_examples/3ee232bcb2281a12b33cd9764ee4081a.asciidoc b/docs/doc_examples/3ee232bcb2281a12b33cd9764ee4081a.asciidoc new file mode 100644 index 000000000..cca5fd082 --- /dev/null +++ b/docs/doc_examples/3ee232bcb2281a12b33cd9764ee4081a.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "geohex2shape", + description: "translate H3 cell to polygon with enriched fields", + processors: [ + { + geo_grid: { + description: + "Ingest H3 cells like '811fbffffffffff' and create polygons", + field: "geocell", + tile_type: "geohex", + target_format: "wkt", + target_field: "shape", + parent_field: "parent", + children_field: "children", + non_children_field: "nonChildren", + precision_field: "precision", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/3f1fe5f5f99b98d0891f38003e10b636.asciidoc b/docs/doc_examples/3f1fe5f5f99b98d0891f38003e10b636.asciidoc new file mode 100644 index 000000000..be6f3596c --- /dev/null +++ b/docs/doc_examples/3f1fe5f5f99b98d0891f38003e10b636.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.esql.asyncQuery({ + query: + "\n FROM library\n | EVAL year = DATE_TRUNC(1 YEARS, release_date)\n | STATS MAX(page_count) BY year\n | SORT year\n | LIMIT 5\n ", + wait_for_completion_timeout: "2s", +}); +console.log(response); +---- diff --git a/docs/doc_examples/3f20459d358611793272f63dc596e889.asciidoc b/docs/doc_examples/3f20459d358611793272f63dc596e889.asciidoc new file mode 100644 index 000000000..c952456cb --- /dev/null +++ b/docs/doc_examples/3f20459d358611793272f63dc596e889.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "news", + query: { + match: { + custom_all: "elasticsearch", + }, + }, + aggs: { + tags: { + significant_text: { + field: "custom_all", + source_fields: ["content", "title"], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3f292a5f67e20f91bf18f5c2412a07bf.asciidoc b/docs/doc_examples/3f292a5f67e20f91bf18f5c2412a07bf.asciidoc new file mode 100644 index 000000000..7b90f06cb --- /dev/null +++ b/docs/doc_examples/3f292a5f67e20f91bf18f5c2412a07bf.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "user_lookup", + processors: [ + { + enrich: { + description: "Add 'user' data based on 'email'", + policy_name: "users-policy", + field: "email", + target_field: "user", + max_matches: "1", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/3f2e5132e35b9e8b3203a4a0541cf0d4.asciidoc b/docs/doc_examples/3f2e5132e35b9e8b3203a4a0541cf0d4.asciidoc new file mode 100644 index 000000000..cf6730b6b --- /dev/null +++ b/docs/doc_examples/3f2e5132e35b9e8b3203a4a0541cf0d4.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + cold: { + actions: { + searchable_snapshot: { + snapshot_repository: "backing_repo", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3f30310cc6d0adae6b0f61705624a695.asciidoc b/docs/doc_examples/3f30310cc6d0adae6b0f61705624a695.asciidoc new file mode 100644 index 000000000..3852f54b1 --- /dev/null +++ b/docs/doc_examples/3f30310cc6d0adae6b0f61705624a695.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.create({ + repository: "my_repository", + snapshot: "snapshot_2", + wait_for_completion: "true", + indices: "index_1,index_2", + ignore_unavailable: true, + include_global_state: false, + metadata: { + taken_by: "user123", + taken_because: "backup before upgrading", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3f3b3e207f79303ce6f86e03e928e062.asciidoc b/docs/doc_examples/3f3b3e207f79303ce6f86e03e928e062.asciidoc deleted file mode 100644 index 828854a64..000000000 --- a/docs/doc_examples/3f3b3e207f79303ce6f86e03e928e062.asciidoc +++ /dev/null @@ -1,12 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.get({ - index: 'customer', - id: '1' -}) -console.log(response) ----- - diff --git a/docs/doc_examples/3f5b5bee692e7d4b0992dc0a64e95a60.asciidoc b/docs/doc_examples/3f5b5bee692e7d4b0992dc0a64e95a60.asciidoc new file mode 100644 index 000000000..5ba9a4ede --- /dev/null +++ b/docs/doc_examples/3f5b5bee692e7d4b0992dc0a64e95a60.asciidoc @@ -0,0 +1,62 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "test", + mappings: { + properties: { + my_join_field: { + type: "join", + relations: { + my_parent: "my_child", + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "test", + id: 1, + refresh: "true", + document: { + number: 1, + my_join_field: "my_parent", + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "test", + id: 2, + routing: 1, + refresh: "true", + document: { + number: 1, + my_join_field: { + name: "my_child", + parent: "1", + }, + }, +}); +console.log(response2); + +const response3 = await client.search({ + index: "test", + query: { + has_child: { + type: "my_child", + query: { + match: { + number: 1, + }, + }, + inner_hits: {}, + }, + }, +}); +console.log(response3); +---- diff --git a/docs/doc_examples/3f60a892bed18151b7baac6cc712576a.asciidoc b/docs/doc_examples/3f60a892bed18151b7baac6cc712576a.asciidoc new file mode 100644 index 000000000..09d1caed1 --- /dev/null +++ b/docs/doc_examples/3f60a892bed18151b7baac6cc712576a.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "whitespace", + filter: ["lowercase", "kstem"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3f669878713a14dfba251c7ce74dd5c4.asciidoc b/docs/doc_examples/3f669878713a14dfba251c7ce74dd5c4.asciidoc new file mode 100644 index 000000000..0644c5101 --- /dev/null +++ b/docs/doc_examples/3f669878713a14dfba251c7ce74dd5c4.asciidoc @@ -0,0 +1,41 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.previewTransform({ + source: { + index: "kibana_sample_data_ecommerce", + }, + pivot: { + group_by: { + customer_id: { + terms: { + field: "customer_id", + }, + }, + }, + aggregations: { + last: { + top_metrics: { + metrics: [ + { + field: "email", + }, + { + field: "customer_first_name.keyword", + }, + { + field: "customer_last_name.keyword", + }, + ], + sort: { + order_date: "desc", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3f8dc309b63fa0437898107b0d964217.asciidoc b/docs/doc_examples/3f8dc309b63fa0437898107b0d964217.asciidoc new file mode 100644 index 000000000..8fe654c50 --- /dev/null +++ b/docs/doc_examples/3f8dc309b63fa0437898107b0d964217.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.mlJobs({ + h: "id,s,dpr,mb", + v: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/3f94ed945ae6416a0eb372c2db14d7e0.asciidoc b/docs/doc_examples/3f94ed945ae6416a0eb372c2db14d7e0.asciidoc new file mode 100644 index 000000000..319fc75e4 --- /dev/null +++ b/docs/doc_examples/3f94ed945ae6416a0eb372c2db14d7e0.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "index", + query: { + simple_query_string: { + fields: ["body.exact"], + query: "ski", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3f9dcf2aa42f3ecfb5ebfe48c1774103.asciidoc b/docs/doc_examples/3f9dcf2aa42f3ecfb5ebfe48c1774103.asciidoc new file mode 100644 index 000000000..7818a3f0c --- /dev/null +++ b/docs/doc_examples/3f9dcf2aa42f3ecfb5ebfe48c1774103.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "kibana_sample_data_ecommerce", + size: 0, + aggs: { + order_stats: { + stats: { + field: "taxful_total_price", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3faec4ca15d8c2fbbd16781b1c8693d6.asciidoc b/docs/doc_examples/3faec4ca15d8c2fbbd16781b1c8693d6.asciidoc new file mode 100644 index 000000000..5399ba1e1 --- /dev/null +++ b/docs/doc_examples/3faec4ca15d8c2fbbd16781b1c8693d6.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "mistral-embeddings", + knn: { + field: "content_embedding", + query_vector_builder: { + text_embedding: { + model_id: "mistral_embeddings", + model_text: "Calculate fuel cost", + }, + }, + k: 10, + num_candidates: 100, + }, + _source: ["id", "content"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/3faf5e2873de340acfe0a617017db784.asciidoc b/docs/doc_examples/3faf5e2873de340acfe0a617017db784.asciidoc new file mode 100644 index 000000000..682d5836d --- /dev/null +++ b/docs/doc_examples/3faf5e2873de340acfe0a617017db784.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + query_string: { + query: "(content:this OR name:this) AND (content:that OR name:that)", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3fb1289c80a354da66693bfb25d7b412.asciidoc b/docs/doc_examples/3fb1289c80a354da66693bfb25d7b412.asciidoc new file mode 100644 index 000000000..d79d59283 --- /dev/null +++ b/docs/doc_examples/3fb1289c80a354da66693bfb25d7b412.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.slm.putLifecycle({ + policy_id: "nightly-snapshots", + schedule: "0 30 2 * * ?", + name: "", + repository: "my_repository", + config: { + include_global_state: false, + indices: "*", + }, + retention: { + expire_after: "30d", + min_count: 5, + max_count: 50, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3fb2f41ad229a31ad3ae408cc50cbed5.asciidoc b/docs/doc_examples/3fb2f41ad229a31ad3ae408cc50cbed5.asciidoc new file mode 100644 index 000000000..46d564862 --- /dev/null +++ b/docs/doc_examples/3fb2f41ad229a31ad3ae408cc50cbed5.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + timeout: "2s", + query: { + match: { + "user.id": "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3fe0fb38f75d2a34fb1e6ac9bedbcdbc.asciidoc b/docs/doc_examples/3fe0fb38f75d2a34fb1e6ac9bedbcdbc.asciidoc new file mode 100644 index 000000000..19244f327 --- /dev/null +++ b/docs/doc_examples/3fe0fb38f75d2a34fb1e6ac9bedbcdbc.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + exists: { + field: "_ignored", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/3fe4264ace04405989141c43aadfff81.asciidoc b/docs/doc_examples/3fe4264ace04405989141c43aadfff81.asciidoc new file mode 100644 index 000000000..f98271d62 --- /dev/null +++ b/docs/doc_examples/3fe4264ace04405989141c43aadfff81.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "cli_or_drivers_minimal", + cluster: ["cluster:monitor/main"], + indices: [ + { + names: ["test"], + privileges: ["read", "indices:admin/get"], + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/3fe5e6c0d5ea4586aa04f989ae54b72e.asciidoc b/docs/doc_examples/3fe5e6c0d5ea4586aa04f989ae54b72e.asciidoc new file mode 100644 index 000000000..c554ea9ec --- /dev/null +++ b/docs/doc_examples/3fe5e6c0d5ea4586aa04f989ae54b72e.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.verifyRepository({ + name: "my_repository", +}); +console.log(response); +---- diff --git a/docs/doc_examples/3fe79ed63195c5f8018648a5a6d645f6.asciidoc b/docs/doc_examples/3fe79ed63195c5f8018648a5a6d645f6.asciidoc new file mode 100644 index 000000000..9636d7cba --- /dev/null +++ b/docs/doc_examples/3fe79ed63195c5f8018648a5a6d645f6.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000002", + mappings: { + _routing: { + required: true, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000002", + id: 1, + document: { + text: "No routing value provided", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/3fe9006f6c7faea162e43fb250f4da38.asciidoc b/docs/doc_examples/3fe9006f6c7faea162e43fb250f4da38.asciidoc new file mode 100644 index 000000000..b37e62340 --- /dev/null +++ b/docs/doc_examples/3fe9006f6c7faea162e43fb250f4da38.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "my-pipeline", + processors: [ + { + set: { + field: "_source.my-long-field", + value: 10, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/3fecd5c6d0c172566da4a54320e1cff3.asciidoc b/docs/doc_examples/3fecd5c6d0c172566da4a54320e1cff3.asciidoc new file mode 100644 index 000000000..64898ae51 --- /dev/null +++ b/docs/doc_examples/3fecd5c6d0c172566da4a54320e1cff3.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: [ + { + type: "dictionary_decompounder", + word_list: ["Donau", "dampf", "meer", "schiff"], + }, + ], + text: "Donaudampfschiff", +}); +console.log(response); +---- diff --git a/docs/doc_examples/3ffe9952786ab258bb6ab928b03148a2.asciidoc b/docs/doc_examples/3ffe9952786ab258bb6ab928b03148a2.asciidoc new file mode 100644 index 000000000..85740bdd2 --- /dev/null +++ b/docs/doc_examples/3ffe9952786ab258bb6ab928b03148a2.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + genres: { + rare_terms: { + field: "genre", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/400e89eb46ead8e9c9e40f123fd5e590.asciidoc b/docs/doc_examples/400e89eb46ead8e9c9e40f123fd5e590.asciidoc index 5c7696ec7..22dea0b19 100644 --- a/docs/doc_examples/400e89eb46ead8e9c9e40f123fd5e590.asciidoc +++ b/docs/doc_examples/400e89eb46ead8e9c9e40f123fd5e590.asciidoc @@ -4,17 +4,14 @@ [source, js] ---- const response = await client.reindex({ - body: { - source: { - index: 'source', - size: 100 - }, - dest: { - index: 'dest', - routing: '=cat' - } - } -}) -console.log(response) + source: { + index: "source", + size: 100, + }, + dest: { + index: "dest", + routing: "=cat", + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/402092585940953420404c2884a47e59.asciidoc b/docs/doc_examples/402092585940953420404c2884a47e59.asciidoc new file mode 100644 index 000000000..3fb2ef01d --- /dev/null +++ b/docs/doc_examples/402092585940953420404c2884a47e59.asciidoc @@ -0,0 +1,41 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_buckets: { + composite: { + sources: [ + { + date: { + date_histogram: { + field: "timestamp", + calendar_interval: "1d", + order: "desc", + }, + }, + }, + { + product: { + terms: { + field: "product", + }, + }, + }, + ], + }, + aggregations: { + the_avg: { + avg: { + field: "price", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4029af36cb3f8202549017f7378803b4.asciidoc b/docs/doc_examples/4029af36cb3f8202549017f7378803b4.asciidoc new file mode 100644 index 000000000..8f8803735 --- /dev/null +++ b/docs/doc_examples/4029af36cb3f8202549017f7378803b4.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.getSettings(); +console.log(response); +---- diff --git a/docs/doc_examples/4053de806dfd9172167999ce098107c4.asciidoc b/docs/doc_examples/4053de806dfd9172167999ce098107c4.asciidoc new file mode 100644 index 000000000..5ee22552e --- /dev/null +++ b/docs/doc_examples/4053de806dfd9172167999ce098107c4.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + constant_score: { + filter: { + term: { + "user.id": "kimchy", + }, + }, + boost: 1.2, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/405511f7c1f12cc0a227b4563fe7b2e2.asciidoc b/docs/doc_examples/405511f7c1f12cc0a227b4563fe7b2e2.asciidoc new file mode 100644 index 000000000..c63439d9c --- /dev/null +++ b/docs/doc_examples/405511f7c1f12cc0a227b4563fe7b2e2.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.esql.asyncQueryGet({ + id: "FkpMRkJGS1gzVDRlM3g4ZzMyRGlLbkEaTXlJZHdNT09TU2VTZVBoNDM3cFZMUToxMDM=", +}); +console.log(response); +---- diff --git a/docs/doc_examples/405ac843a9156d3cab374e199cac87fb.asciidoc b/docs/doc_examples/405ac843a9156d3cab374e199cac87fb.asciidoc new file mode 100644 index 000000000..dc52c27a2 --- /dev/null +++ b/docs/doc_examples/405ac843a9156d3cab374e199cac87fb.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "POST", + path: "/_connector/_sync_job", + body: { + id: "connector-id", + job_type: "full", + trigger_method: "on_demand", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/405db6f3a01eceacfaa8b0ed3e4b3ac2.asciidoc b/docs/doc_examples/405db6f3a01eceacfaa8b0ed3e4b3ac2.asciidoc new file mode 100644 index 000000000..fd7235c82 --- /dev/null +++ b/docs/doc_examples/405db6f3a01eceacfaa8b0ed3e4b3ac2.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.getOverallBuckets({ + job_id: "job-*", + top_n: 2, + overall_score: 50, + start: 1403532000000, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4061fd5ba7221ca85805ed14d59a6bc5.asciidoc b/docs/doc_examples/4061fd5ba7221ca85805ed14d59a6bc5.asciidoc new file mode 100644 index 000000000..e8aad642f --- /dev/null +++ b/docs/doc_examples/4061fd5ba7221ca85805ed14d59a6bc5.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.deleteScript({ + id: "calculate-score", +}); +console.log(response); +---- diff --git a/docs/doc_examples/406a0f1c1aac947bcee58f86b6d036c1.asciidoc b/docs/doc_examples/406a0f1c1aac947bcee58f86b6d036c1.asciidoc new file mode 100644 index 000000000..5edb2eea3 --- /dev/null +++ b/docs/doc_examples/406a0f1c1aac947bcee58f86b6d036c1.asciidoc @@ -0,0 +1,64 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.putWatch({ + id: "log_event_watch", + trigger: { + schedule: { + interval: "5m", + }, + }, + input: { + search: { + request: { + indices: "log-events", + body: { + size: 0, + query: { + match: { + status: "error", + }, + }, + }, + }, + }, + }, + condition: { + compare: { + "ctx.payload.hits.total": { + gt: 5, + }, + }, + }, + throttle_period: "15m", + actions: { + email_administrator: { + email: { + to: "sys.admino@host.domain", + subject: "Encountered {{ctx.payload.hits.total}} errors", + body: "Too many error in the system, see attached data", + attachments: { + attached_data: { + data: { + format: "json", + }, + }, + }, + priority: "high", + }, + }, + notify_pager: { + webhook: { + method: "POST", + host: "pager.service.domain", + port: 1234, + path: "/{{watch_id}}", + body: "Encountered {{ctx.payload.hits.total}} errors", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/408060f0c52300588a6dee774f4fd6a5.asciidoc b/docs/doc_examples/408060f0c52300588a6dee774f4fd6a5.asciidoc new file mode 100644 index 000000000..0b94860f6 --- /dev/null +++ b/docs/doc_examples/408060f0c52300588a6dee774f4fd6a5.asciidoc @@ -0,0 +1,533 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "datastream", + refresh: "true", + operations: [ + { + create: {}, + }, + { + "@timestamp": "2022-06-21T15:49:00Z", + kubernetes: { + host: "gke-apps-0", + node: "gke-apps-0-0", + pod: "gke-apps-0-0-0", + container: { + cpu: { + usage: { + nanocores: 91153, + core: { + ns: 12828317850, + }, + node: { + pct: 0.0000277905, + }, + limit: { + pct: 0.0000277905, + }, + }, + }, + memory: { + available: { + bytes: 463314616, + }, + usage: { + bytes: 307007078, + node: { + pct: 0.01770037710617187, + }, + limit: { + pct: 0.00009923134671484496, + }, + }, + workingset: { + bytes: 585236, + }, + rss: { + bytes: 102728, + }, + pagefaults: 120901, + majorpagefaults: 0, + }, + start_time: "2021-03-30T07:59:06Z", + name: "container-name-44", + }, + namespace: "namespace26", + }, + }, + { + create: {}, + }, + { + "@timestamp": "2022-06-21T15:45:50Z", + kubernetes: { + host: "gke-apps-0", + node: "gke-apps-0-0", + pod: "gke-apps-0-0-0", + container: { + cpu: { + usage: { + nanocores: 124501, + core: { + ns: 12828317850, + }, + node: { + pct: 0.0000277905, + }, + limit: { + pct: 0.0000277905, + }, + }, + }, + memory: { + available: { + bytes: 982546514, + }, + usage: { + bytes: 360035574, + node: { + pct: 0.01770037710617187, + }, + limit: { + pct: 0.00009923134671484496, + }, + }, + workingset: { + bytes: 1339884, + }, + rss: { + bytes: 381174, + }, + pagefaults: 178473, + majorpagefaults: 0, + }, + start_time: "2021-03-30T07:59:06Z", + name: "container-name-44", + }, + namespace: "namespace26", + }, + }, + { + create: {}, + }, + { + "@timestamp": "2022-06-21T15:44:50Z", + kubernetes: { + host: "gke-apps-0", + node: "gke-apps-0-0", + pod: "gke-apps-0-0-0", + container: { + cpu: { + usage: { + nanocores: 38907, + core: { + ns: 12828317850, + }, + node: { + pct: 0.0000277905, + }, + limit: { + pct: 0.0000277905, + }, + }, + }, + memory: { + available: { + bytes: 862723768, + }, + usage: { + bytes: 379572388, + node: { + pct: 0.01770037710617187, + }, + limit: { + pct: 0.00009923134671484496, + }, + }, + workingset: { + bytes: 431227, + }, + rss: { + bytes: 386580, + }, + pagefaults: 233166, + majorpagefaults: 0, + }, + start_time: "2021-03-30T07:59:06Z", + name: "container-name-44", + }, + namespace: "namespace26", + }, + }, + { + create: {}, + }, + { + "@timestamp": "2022-06-21T15:44:40Z", + kubernetes: { + host: "gke-apps-0", + node: "gke-apps-0-0", + pod: "gke-apps-0-0-0", + container: { + cpu: { + usage: { + nanocores: 86706, + core: { + ns: 12828317850, + }, + node: { + pct: 0.0000277905, + }, + limit: { + pct: 0.0000277905, + }, + }, + }, + memory: { + available: { + bytes: 567160996, + }, + usage: { + bytes: 103266017, + node: { + pct: 0.01770037710617187, + }, + limit: { + pct: 0.00009923134671484496, + }, + }, + workingset: { + bytes: 1724908, + }, + rss: { + bytes: 105431, + }, + pagefaults: 233166, + majorpagefaults: 0, + }, + start_time: "2021-03-30T07:59:06Z", + name: "container-name-44", + }, + namespace: "namespace26", + }, + }, + { + create: {}, + }, + { + "@timestamp": "2022-06-21T15:44:00Z", + kubernetes: { + host: "gke-apps-0", + node: "gke-apps-0-0", + pod: "gke-apps-0-0-0", + container: { + cpu: { + usage: { + nanocores: 150069, + core: { + ns: 12828317850, + }, + node: { + pct: 0.0000277905, + }, + limit: { + pct: 0.0000277905, + }, + }, + }, + memory: { + available: { + bytes: 639054643, + }, + usage: { + bytes: 265142477, + node: { + pct: 0.01770037710617187, + }, + limit: { + pct: 0.00009923134671484496, + }, + }, + workingset: { + bytes: 1786511, + }, + rss: { + bytes: 189235, + }, + pagefaults: 138172, + majorpagefaults: 0, + }, + start_time: "2021-03-30T07:59:06Z", + name: "container-name-44", + }, + namespace: "namespace26", + }, + }, + { + create: {}, + }, + { + "@timestamp": "2022-06-21T15:42:40Z", + kubernetes: { + host: "gke-apps-0", + node: "gke-apps-0-0", + pod: "gke-apps-0-0-0", + container: { + cpu: { + usage: { + nanocores: 82260, + core: { + ns: 12828317850, + }, + node: { + pct: 0.0000277905, + }, + limit: { + pct: 0.0000277905, + }, + }, + }, + memory: { + available: { + bytes: 854735585, + }, + usage: { + bytes: 309798052, + node: { + pct: 0.01770037710617187, + }, + limit: { + pct: 0.00009923134671484496, + }, + }, + workingset: { + bytes: 924058, + }, + rss: { + bytes: 110838, + }, + pagefaults: 259073, + majorpagefaults: 0, + }, + start_time: "2021-03-30T07:59:06Z", + name: "container-name-44", + }, + namespace: "namespace26", + }, + }, + { + create: {}, + }, + { + "@timestamp": "2022-06-21T15:42:10Z", + kubernetes: { + host: "gke-apps-0", + node: "gke-apps-0-0", + pod: "gke-apps-0-0-0", + container: { + cpu: { + usage: { + nanocores: 153404, + core: { + ns: 12828317850, + }, + node: { + pct: 0.0000277905, + }, + limit: { + pct: 0.0000277905, + }, + }, + }, + memory: { + available: { + bytes: 279586406, + }, + usage: { + bytes: 214904955, + node: { + pct: 0.01770037710617187, + }, + limit: { + pct: 0.00009923134671484496, + }, + }, + workingset: { + bytes: 1047265, + }, + rss: { + bytes: 91914, + }, + pagefaults: 302252, + majorpagefaults: 0, + }, + start_time: "2021-03-30T07:59:06Z", + name: "container-name-44", + }, + namespace: "namespace26", + }, + }, + { + create: {}, + }, + { + "@timestamp": "2022-06-21T15:40:20Z", + kubernetes: { + host: "gke-apps-0", + node: "gke-apps-0-0", + pod: "gke-apps-0-0-0", + container: { + cpu: { + usage: { + nanocores: 125613, + core: { + ns: 12828317850, + }, + node: { + pct: 0.0000277905, + }, + limit: { + pct: 0.0000277905, + }, + }, + }, + memory: { + available: { + bytes: 822782853, + }, + usage: { + bytes: 100475044, + node: { + pct: 0.01770037710617187, + }, + limit: { + pct: 0.00009923134671484496, + }, + }, + workingset: { + bytes: 2109932, + }, + rss: { + bytes: 278446, + }, + pagefaults: 74843, + majorpagefaults: 0, + }, + start_time: "2021-03-30T07:59:06Z", + name: "container-name-44", + }, + namespace: "namespace26", + }, + }, + { + create: {}, + }, + { + "@timestamp": "2022-06-21T15:40:10Z", + kubernetes: { + host: "gke-apps-0", + node: "gke-apps-0-0", + pod: "gke-apps-0-0-0", + container: { + cpu: { + usage: { + nanocores: 100046, + core: { + ns: 12828317850, + }, + node: { + pct: 0.0000277905, + }, + limit: { + pct: 0.0000277905, + }, + }, + }, + memory: { + available: { + bytes: 567160996, + }, + usage: { + bytes: 362826547, + node: { + pct: 0.01770037710617187, + }, + limit: { + pct: 0.00009923134671484496, + }, + }, + workingset: { + bytes: 1986724, + }, + rss: { + bytes: 402801, + }, + pagefaults: 296495, + majorpagefaults: 0, + }, + start_time: "2021-03-30T07:59:06Z", + name: "container-name-44", + }, + namespace: "namespace26", + }, + }, + { + create: {}, + }, + { + "@timestamp": "2022-06-21T15:38:30Z", + kubernetes: { + host: "gke-apps-0", + node: "gke-apps-0-0", + pod: "gke-apps-0-0-0", + container: { + cpu: { + usage: { + nanocores: 40018, + core: { + ns: 12828317850, + }, + node: { + pct: 0.0000277905, + }, + limit: { + pct: 0.0000277905, + }, + }, + }, + memory: { + available: { + bytes: 1062428344, + }, + usage: { + bytes: 265142477, + node: { + pct: 0.01770037710617187, + }, + limit: { + pct: 0.00009923134671484496, + }, + }, + workingset: { + bytes: 2294743, + }, + rss: { + bytes: 340623, + }, + pagefaults: 224530, + majorpagefaults: 0, + }, + start_time: "2021-03-30T07:59:06Z", + name: "container-name-44", + }, + namespace: "namespace26", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/40a42f005144cfed3dd1dcf2638e8211.asciidoc b/docs/doc_examples/40a42f005144cfed3dd1dcf2638e8211.asciidoc new file mode 100644 index 000000000..d74f6fdc4 --- /dev/null +++ b/docs/doc_examples/40a42f005144cfed3dd1dcf2638e8211.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.search({ + name: "my_search_application", + params: { + field: "price", + operator: "gte", + value: 500, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/40b73b5c7ca144dc3f63f5b741f33d80.asciidoc b/docs/doc_examples/40b73b5c7ca144dc3f63f5b741f33d80.asciidoc new file mode 100644 index 000000000..9d9b27d2f --- /dev/null +++ b/docs/doc_examples/40b73b5c7ca144dc3f63f5b741f33d80.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + constant_score: { + filter: { + percolate: { + field: "query", + document: { + message: "A new bonsai tree in the office", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/40bd86e400d27e68b8f0ae580c29d32d.asciidoc b/docs/doc_examples/40bd86e400d27e68b8f0ae580c29d32d.asciidoc new file mode 100644 index 000000000..739a8c008 --- /dev/null +++ b/docs/doc_examples/40bd86e400d27e68b8f0ae580c29d32d.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.stats({ + human: "true", + filter_path: "indices.mappings.total_deduplicated_mapping_size*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/40c3e7bb1fdc125a1ab21bd7d7326694.asciidoc b/docs/doc_examples/40c3e7bb1fdc125a1ab21bd7d7326694.asciidoc new file mode 100644 index 000000000..c40e72a18 --- /dev/null +++ b/docs/doc_examples/40c3e7bb1fdc125a1ab21bd7d7326694.asciidoc @@ -0,0 +1,44 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "mv", + mappings: { + properties: { + b: { + type: "long", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "mv", + refresh: "true", + operations: [ + { + index: {}, + }, + { + a: 1, + b: [2, 2, 1], + }, + { + index: {}, + }, + { + a: 2, + b: [1, 1], + }, + ], +}); +console.log(response1); + +const response2 = await client.esql.query({ + query: "FROM mv | EVAL b=TO_STRING(b) | LIMIT 2", +}); +console.log(response2); +---- diff --git a/docs/doc_examples/40d88d4f53343ef663c89ba488ab8001.asciidoc b/docs/doc_examples/40d88d4f53343ef663c89ba488ab8001.asciidoc new file mode 100644 index 000000000..3cc09a3de --- /dev/null +++ b/docs/doc_examples/40d88d4f53343ef663c89ba488ab8001.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: { + type: "envelope", + coordinates: [ + [1000, 100], + [1001, 100], + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/40d90d9dc6f4942bf92d88bfc5a34672.asciidoc b/docs/doc_examples/40d90d9dc6f4942bf92d88bfc5a34672.asciidoc new file mode 100644 index 000000000..3cf2017ee --- /dev/null +++ b/docs/doc_examples/40d90d9dc6f4942bf92d88bfc5a34672.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match_bool_prefix: { + message: { + query: "quick brown f", + analyzer: "keyword", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/40f287bf733420bbab134b74c7d0ea5d.asciidoc b/docs/doc_examples/40f287bf733420bbab134b74c7d0ea5d.asciidoc new file mode 100644 index 000000000..5b94aaeb7 --- /dev/null +++ b/docs/doc_examples/40f287bf733420bbab134b74c7d0ea5d.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "amazon-reviews", + id: 1, + document: { + review_text: + "This product is lifechanging! I'm telling all my friends about it.", + review_vector: [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/40f97f70e8e743c6a6296c81b920aeb0.asciidoc b/docs/doc_examples/40f97f70e8e743c6a6296c81b920aeb0.asciidoc new file mode 100644 index 000000000..b1664b187 --- /dev/null +++ b/docs/doc_examples/40f97f70e8e743c6a6296c81b920aeb0.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.stats({ + human: "true", + filter_path: + "nodes.*.name,nodes.*.indices.mappings.total_estimated_overhead*,nodes.*.jvm.mem.heap_max*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4113c57384aa37c58d11579e20c00760.asciidoc b/docs/doc_examples/4113c57384aa37c58d11579e20c00760.asciidoc new file mode 100644 index 000000000..349bd2277 --- /dev/null +++ b/docs/doc_examples/4113c57384aa37c58d11579e20c00760.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.get({ + index: "my-index-000001", + id: 0, + _source: "false", +}); +console.log(response); +---- diff --git a/docs/doc_examples/41175d304e660da2931764f9a4418fd3.asciidoc b/docs/doc_examples/41175d304e660da2931764f9a4418fd3.asciidoc new file mode 100644 index 000000000..81079be09 --- /dev/null +++ b/docs/doc_examples/41175d304e660da2931764f9a4418fd3.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.connector.updatePipeline({ + connector_id: "my-connector", + pipeline: { + extract_binary_content: true, + name: "my-connector-pipeline", + reduce_whitespace: true, + run_ml_inference: true, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/41195ef13af0465cdee1ae18f6c00fde.asciidoc b/docs/doc_examples/41195ef13af0465cdee1ae18f6c00fde.asciidoc new file mode 100644 index 000000000..3d9e94a58 --- /dev/null +++ b/docs/doc_examples/41195ef13af0465cdee1ae18f6c00fde.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.slm.stop(); +console.log(response); +---- diff --git a/docs/doc_examples/412f8238ab5182678f1d8f6383031b11.asciidoc b/docs/doc_examples/412f8238ab5182678f1d8f6383031b11.asciidoc new file mode 100644 index 000000000..242c7984d --- /dev/null +++ b/docs/doc_examples/412f8238ab5182678f1d8f6383031b11.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getAlias({ + index: "my-data-stream", + name: "my-alias", +}); +console.log(response); +---- diff --git a/docs/doc_examples/413fdcc7c437775a16bb55b81c2bbe2b.asciidoc b/docs/doc_examples/413fdcc7c437775a16bb55b81c2bbe2b.asciidoc new file mode 100644 index 000000000..0d0213521 --- /dev/null +++ b/docs/doc_examples/413fdcc7c437775a16bb55b81c2bbe2b.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-index-000001", + runtime: { + "http.client.ip": { + type: "ip", + script: + '\n String clientip=dissect(\'%{clientip} %{ident} %{auth} [%{@timestamp}] "%{verb} %{request} HTTP/%{httpversion}" %{status} %{size}\').extract(doc["message"].value)?.clientip;\n if (clientip != null) emit(clientip);\n ', + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/415b46bc2b7a7b4dcf9a73ac67ea20e9.asciidoc b/docs/doc_examples/415b46bc2b7a7b4dcf9a73ac67ea20e9.asciidoc new file mode 100644 index 000000000..687bf0527 --- /dev/null +++ b/docs/doc_examples/415b46bc2b7a7b4dcf9a73ac67ea20e9.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "circles", + id: 2, + pipeline: "polygonize_circles", + document: { + circle: { + type: "circle", + radius: "40m", + coordinates: [30, 10], + }, + }, +}); +console.log(response); + +const response1 = await client.get({ + index: "circles", + id: 2, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/416a3ba11232d3c078c1c31340cf356f.asciidoc b/docs/doc_examples/416a3ba11232d3c078c1c31340cf356f.asciidoc new file mode 100644 index 000000000..eb2c98ce7 --- /dev/null +++ b/docs/doc_examples/416a3ba11232d3c078c1c31340cf356f.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match: { + "user.id": "kimchy", + }, + }, + highlight: { + tags_schema: "styled", + fields: { + comment: {}, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/41ad6077f9c1b8d8fefab6ea1660edcd.asciidoc b/docs/doc_examples/41ad6077f9c1b8d8fefab6ea1660edcd.asciidoc new file mode 100644 index 000000000..f6aaab8ed --- /dev/null +++ b/docs/doc_examples/41ad6077f9c1b8d8fefab6ea1660edcd.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + date: { + type: "date", + format: "yyyy-MM-dd", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/41d24383d29b2808a65258a0a3256e96.asciidoc b/docs/doc_examples/41d24383d29b2808a65258a0a3256e96.asciidoc new file mode 100644 index 000000000..67b3c97a4 --- /dev/null +++ b/docs/doc_examples/41d24383d29b2808a65258a0a3256e96.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "jinaai-index", + mappings: { + properties: { + content: { + type: "semantic_text", + inference_id: "jinaai-embeddings", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/41dbd79f624b998d01c10921e9a35c4b.asciidoc b/docs/doc_examples/41dbd79f624b998d01c10921e9a35c4b.asciidoc new file mode 100644 index 000000000..401ace1c2 --- /dev/null +++ b/docs/doc_examples/41dbd79f624b998d01c10921e9a35c4b.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.update({ + index: "test", + id: 1, + doc: { + name: "new_name", + }, + detect_noop: false, +}); +console.log(response); +---- diff --git a/docs/doc_examples/41fd33a293a575bd71a1fac7bcc8b47c.asciidoc b/docs/doc_examples/41fd33a293a575bd71a1fac7bcc8b47c.asciidoc new file mode 100644 index 000000000..b1a830762 --- /dev/null +++ b/docs/doc_examples/41fd33a293a575bd71a1fac7bcc8b47c.asciidoc @@ -0,0 +1,42 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.put({ + name: "my-app", + search_application: { + indices: ["index1", "index2"], + template: { + script: { + source: { + query: { + query_string: { + query: "{{query_string}}", + default_field: "{{default_field}}", + }, + }, + }, + params: { + query_string: "*", + default_field: "*", + }, + }, + dictionary: { + properties: { + query_string: { + type: "string", + }, + default_field: { + type: "string", + enum: ["title", "description"], + }, + additionalProperties: false, + }, + required: ["query_string"], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4207219a892339e8f3abe0df8723dd27.asciidoc b/docs/doc_examples/4207219a892339e8f3abe0df8723dd27.asciidoc new file mode 100644 index 000000000..210aca690 --- /dev/null +++ b/docs/doc_examples/4207219a892339e8f3abe0df8723dd27.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "cluster.metadata.administrator": "sysadmin@example.com", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/421e68e2b9789f0e8c08760d9e685d1c.asciidoc b/docs/doc_examples/421e68e2b9789f0e8c08760d9e685d1c.asciidoc new file mode 100644 index 000000000..4ae947494 --- /dev/null +++ b/docs/doc_examples/421e68e2b9789f0e8c08760d9e685d1c.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.updateJob({ + job_id: "low_request_rate", + description: "An updated job", + detectors: { + detector_index: 0, + description: "An updated detector description", + }, + groups: ["kibana_sample_data", "kibana_sample_web_logs"], + model_plot_config: { + enabled: true, + }, + renormalization_window_days: 30, + background_persist_interval: "2h", + model_snapshot_retention_days: 7, + results_retention_days: 60, +}); +console.log(response); +---- diff --git a/docs/doc_examples/424fbf082cd4affb84439abfc916b597.asciidoc b/docs/doc_examples/424fbf082cd4affb84439abfc916b597.asciidoc new file mode 100644 index 000000000..e5488b55b --- /dev/null +++ b/docs/doc_examples/424fbf082cd4affb84439abfc916b597.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.downsample({ + index: "my-time-series-index", + target_index: "my-downsampled-time-series-index", + config: { + fixed_interval: "1d", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/425eaaf9c7e3b1e77a3474fbab4183b4.asciidoc b/docs/doc_examples/425eaaf9c7e3b1e77a3474fbab4183b4.asciidoc new file mode 100644 index 000000000..4987afa62 --- /dev/null +++ b/docs/doc_examples/425eaaf9c7e3b1e77a3474fbab4183b4.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.threadPool({ + v: "true", + s: "t,n", + h: "type,name,node_name,active,queue,rejected,completed", +}); +console.log(response); +---- diff --git a/docs/doc_examples/42744a175125df5be0ef77413bf8f608.asciidoc b/docs/doc_examples/42744a175125df5be0ef77413bf8f608.asciidoc deleted file mode 100644 index 6fc780ecc..000000000 --- a/docs/doc_examples/42744a175125df5be0ef77413bf8f608.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.putSettings({ - index: 'twitter', - body: { - index: { - refresh_interval: null - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/4275ecbe4aa68d43a8a7139866610a27.asciidoc b/docs/doc_examples/4275ecbe4aa68d43a8a7139866610a27.asciidoc new file mode 100644 index 000000000..27f37bc36 --- /dev/null +++ b/docs/doc_examples/4275ecbe4aa68d43a8a7139866610a27.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "exams", + size: 0, + aggs: { + weighted_grade: { + weighted_avg: { + value: { + field: "grade", + }, + weight: { + field: "weight", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/427f6b5c5376cbf0f71f242a60ca3d9e.asciidoc b/docs/doc_examples/427f6b5c5376cbf0f71f242a60ca3d9e.asciidoc deleted file mode 100644 index a95439de4..000000000 --- a/docs/doc_examples/427f6b5c5376cbf0f71f242a60ca3d9e.asciidoc +++ /dev/null @@ -1,13 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'alias2', - q: 'user:kimchy', - routing: '2,3' -}) -console.log(response) ----- - diff --git a/docs/doc_examples/42ba7c1d13aee91fe6f0a8a42c30eb74.asciidoc b/docs/doc_examples/42ba7c1d13aee91fe6f0a8a42c30eb74.asciidoc new file mode 100644 index 000000000..c45e9cde5 --- /dev/null +++ b/docs/doc_examples/42ba7c1d13aee91fe6f0a8a42c30eb74.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.rollover({ + alias: "my-data-stream", + lazy: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/42bc7608bb675dd6238e2fecbb758d06.asciidoc b/docs/doc_examples/42bc7608bb675dd6238e2fecbb758d06.asciidoc new file mode 100644 index 000000000..25619920d --- /dev/null +++ b/docs/doc_examples/42bc7608bb675dd6238e2fecbb758d06.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "postal_codes", + id: 1, + refresh: "wait_for", + document: { + location: { + type: "envelope", + coordinates: [ + [13, 53], + [14, 52], + ], + }, + postal_code: "96598", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/42d02087f1c8ab0452ef373079a76843.asciidoc b/docs/doc_examples/42d02087f1c8ab0452ef373079a76843.asciidoc new file mode 100644 index 000000000..5290c3b81 --- /dev/null +++ b/docs/doc_examples/42d02087f1c8ab0452ef373079a76843.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + analyzer: "stop", + text: "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone.", +}); +console.log(response); +---- diff --git a/docs/doc_examples/42deb4fe32afbe0f94185e256a79c447.asciidoc b/docs/doc_examples/42deb4fe32afbe0f94185e256a79c447.asciidoc new file mode 100644 index 000000000..5c7b26d50 --- /dev/null +++ b/docs/doc_examples/42deb4fe32afbe0f94185e256a79c447.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "stop_example", + settings: { + analysis: { + filter: { + english_stop: { + type: "stop", + stopwords: "_english_", + }, + }, + analyzer: { + rebuilt_stop: { + tokenizer: "lowercase", + filter: ["english_stop"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4301cb9d970ec65778f91ce1f438e0d5.asciidoc b/docs/doc_examples/4301cb9d970ec65778f91ce1f438e0d5.asciidoc new file mode 100644 index 000000000..141b7eb25 --- /dev/null +++ b/docs/doc_examples/4301cb9d970ec65778f91ce1f438e0d5.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.updateAliases({ + actions: [ + { + add: { + index: "logs-nginx.access-prod", + alias: "logs", + }, + }, + { + add: { + index: "logs-my_app-default", + alias: "logs", + is_write_index: true, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/430705509f8367aef92be413f702520b.asciidoc b/docs/doc_examples/430705509f8367aef92be413f702520b.asciidoc new file mode 100644 index 000000000..c95b05eac --- /dev/null +++ b/docs/doc_examples/430705509f8367aef92be413f702520b.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.connector.updateStatus({ + connector_id: "my-connector", + status: "needs_configuration", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4310869b97d4224acaa6d66b1e196048.asciidoc b/docs/doc_examples/4310869b97d4224acaa6d66b1e196048.asciidoc new file mode 100644 index 000000000..6eb4a744d --- /dev/null +++ b/docs/doc_examples/4310869b97d4224acaa6d66b1e196048.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + query: { + sparse_vector: { + field: "content_embedding", + inference_id: "my-elser-endpoint", + query: "How to avoid muscle soreness after running?", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4323f6d224847eccdce59c23e33fda0a.asciidoc b/docs/doc_examples/4323f6d224847eccdce59c23e33fda0a.asciidoc new file mode 100644 index 000000000..e5c3d639c --- /dev/null +++ b/docs/doc_examples/4323f6d224847eccdce59c23e33fda0a.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "cjk_bigram_example", + settings: { + analysis: { + analyzer: { + standard_cjk_bigram: { + tokenizer: "standard", + filter: ["cjk_bigram"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/433cf45a23decdf3a096016ffaaf26ba.asciidoc b/docs/doc_examples/433cf45a23decdf3a096016ffaaf26ba.asciidoc new file mode 100644 index 000000000..a7ccd0173 --- /dev/null +++ b/docs/doc_examples/433cf45a23decdf3a096016ffaaf26ba.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.updateAliases({ + actions: [ + { + add: { + index: "my-index-2099.05.06-000001", + alias: "my-alias", + search_routing: "1", + index_routing: "2", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/4342ccf6cc24fd80bd3cd1f9a4c2ef8e.asciidoc b/docs/doc_examples/4342ccf6cc24fd80bd3cd1f9a4c2ef8e.asciidoc new file mode 100644 index 000000000..318e76153 --- /dev/null +++ b/docs/doc_examples/4342ccf6cc24fd80bd3cd1f9a4c2ef8e.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.clearScroll({ + scroll_id: [ + "DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ==", + "DnF1ZXJ5VGhlbkZldGNoBQAAAAAAAAABFmtSWWRRWUJrU2o2ZExpSGJCVmQxYUEAAAAAAAAAAxZrUllkUVlCa1NqNmRMaUhiQlZkMWFBAAAAAAAAAAIWa1JZZFFZQmtTajZkTGlIYkJWZDFhQQAAAAAAAAAFFmtSWWRRWUJrU2o2ZExpSGJCVmQxYUEAAAAAAAAABBZrUllkUVlCa1NqNmRMaUhiQlZkMWFB", + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/435e0d6a7d86e074d572d9671b7b9676.asciidoc b/docs/doc_examples/435e0d6a7d86e074d572d9671b7b9676.asciidoc new file mode 100644 index 000000000..30012027c --- /dev/null +++ b/docs/doc_examples/435e0d6a7d86e074d572d9671b7b9676.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: { + type: "Polygon", + coordinates: [ + [ + [100, 0], + [101, 0], + [101, 1], + [100, 1], + [100, 0], + ], + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/43682666e1abcb14770c99f02eb26a0d.asciidoc b/docs/doc_examples/43682666e1abcb14770c99f02eb26a0d.asciidoc deleted file mode 100644 index 3fccfdec7..000000000 --- a/docs/doc_examples/43682666e1abcb14770c99f02eb26a0d.asciidoc +++ /dev/null @@ -1,12 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: '*', - q: 'user:kimchy' -}) -console.log(response) ----- - diff --git a/docs/doc_examples/43854be6aae61edbea5f9ab988cb4ce5.asciidoc b/docs/doc_examples/43854be6aae61edbea5f9ab988cb4ce5.asciidoc new file mode 100644 index 000000000..b76a70ba1 --- /dev/null +++ b/docs/doc_examples/43854be6aae61edbea5f9ab988cb4ce5.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "xpack.security.transport.filter.allow": "172.16.0.0/24", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/43af86de5e49aa06070092fffc138208.asciidoc b/docs/doc_examples/43af86de5e49aa06070092fffc138208.asciidoc deleted file mode 100644 index 85af5a8ba..000000000 --- a/docs/doc_examples/43af86de5e49aa06070092fffc138208.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - bool: { - must_not: { - exists: { - field: 'user' - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/43d9e314431336a6f084cea76dfd6489.asciidoc b/docs/doc_examples/43d9e314431336a6f084cea76dfd6489.asciidoc new file mode 100644 index 000000000..bccc8d81f --- /dev/null +++ b/docs/doc_examples/43d9e314431336a6f084cea76dfd6489.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "restaurants", + retriever: { + knn: { + field: "vector", + query_vector: [10, 22, 77], + k: 10, + num_candidates: 10, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/43e86fbaeed068dcc981214338559b5a.asciidoc b/docs/doc_examples/43e86fbaeed068dcc981214338559b5a.asciidoc new file mode 100644 index 000000000..e4327570f --- /dev/null +++ b/docs/doc_examples/43e86fbaeed068dcc981214338559b5a.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.resolveCluster({ + name: "my-index-*,cluster*:my-index-*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/43f77ddf1ed8106d4f47a12d39df8e3b.asciidoc b/docs/doc_examples/43f77ddf1ed8106d4f47a12d39df8e3b.asciidoc new file mode 100644 index 000000000..e3d7b4c0a --- /dev/null +++ b/docs/doc_examples/43f77ddf1ed8106d4f47a12d39df8e3b.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: "my_id", + pipeline: "networks_lookup", + document: { + ip: "10.100.34.1", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/43fe75fa9f3fca846598fdad58fd98cb.asciidoc b/docs/doc_examples/43fe75fa9f3fca846598fdad58fd98cb.asciidoc new file mode 100644 index 000000000..c8b3ac2eb --- /dev/null +++ b/docs/doc_examples/43fe75fa9f3fca846598fdad58fd98cb.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.xpack.usage(); +console.log(response); +---- diff --git a/docs/doc_examples/44198781d164a15be633d4469485a544.asciidoc b/docs/doc_examples/44198781d164a15be633d4469485a544.asciidoc new file mode 100644 index 000000000..f3125224a --- /dev/null +++ b/docs/doc_examples/44198781d164a15be633d4469485a544.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-bit-vectors", + query: { + script_score: { + query: { + match_all: {}, + }, + script: { + source: "dotProduct(params.query_vector, 'my_dense_vector')", + params: { + query_vector: [8, 5, -15, 1, -7], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/441be98c597698bb2809372abf086c3e.asciidoc b/docs/doc_examples/441be98c597698bb2809372abf086c3e.asciidoc new file mode 100644 index 000000000..07491e972 --- /dev/null +++ b/docs/doc_examples/441be98c597698bb2809372abf086c3e.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + histogram_titles: { + terms: { + field: "my_text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/441f330f6872f995769db1ce2b9627e2.asciidoc b/docs/doc_examples/441f330f6872f995769db1ce2b9627e2.asciidoc new file mode 100644 index 000000000..7c0193b54 --- /dev/null +++ b/docs/doc_examples/441f330f6872f995769db1ce2b9627e2.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + stored_fields: [], + query: { + term: { + user: "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/44231f7cdd5c3a21025861cdef31e355.asciidoc b/docs/doc_examples/44231f7cdd5c3a21025861cdef31e355.asciidoc new file mode 100644 index 000000000..694e5ee8e --- /dev/null +++ b/docs/doc_examples/44231f7cdd5c3a21025861cdef31e355.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.shrink({ + index: "my-index", + target: "my-shrunken-index", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4427517dcd8ec9997541150cdc11a0de.asciidoc b/docs/doc_examples/4427517dcd8ec9997541150cdc11a0de.asciidoc new file mode 100644 index 000000000..c98321130 --- /dev/null +++ b/docs/doc_examples/4427517dcd8ec9997541150cdc11a0de.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.deleteRepository({ + name: "my-repo", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4435b654994b575ba181ea679871c78c.asciidoc b/docs/doc_examples/4435b654994b575ba181ea679871c78c.asciidoc new file mode 100644 index 000000000..54a3e4134 --- /dev/null +++ b/docs/doc_examples/4435b654994b575ba181ea679871c78c.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + match: { + "user.id": "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/44385b61342e20ea05f254015b2b04d7.asciidoc b/docs/doc_examples/44385b61342e20ea05f254015b2b04d7.asciidoc new file mode 100644 index 000000000..fc102c343 --- /dev/null +++ b/docs/doc_examples/44385b61342e20ea05f254015b2b04d7.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.bulkDeleteRole({ + names: ["my_admin_role", "my_user_role"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/443dd902f64b3217505c9595839c3b2d.asciidoc b/docs/doc_examples/443dd902f64b3217505c9595839c3b2d.asciidoc new file mode 100644 index 000000000..eb32eb4b5 --- /dev/null +++ b/docs/doc_examples/443dd902f64b3217505c9595839c3b2d.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + indices_boost: [ + { + "my-alias": 1.4, + }, + { + "my-index*": 1.3, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/443e8da9968f1c65f46a2a65a1e1e078.asciidoc b/docs/doc_examples/443e8da9968f1c65f46a2a65a1e1e078.asciidoc new file mode 100644 index 000000000..057b4dad6 --- /dev/null +++ b/docs/doc_examples/443e8da9968f1c65f46a2a65a1e1e078.asciidoc @@ -0,0 +1,45 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "my-weather-sensor-index-template", + index_patterns: ["metrics-weather_sensors-*"], + data_stream: {}, + template: { + settings: { + "index.mode": "time_series", + "index.lifecycle.name": "my-lifecycle-policy", + }, + mappings: { + properties: { + sensor_id: { + type: "keyword", + time_series_dimension: true, + }, + location: { + type: "keyword", + time_series_dimension: true, + }, + temperature: { + type: "half_float", + time_series_metric: "gauge", + }, + humidity: { + type: "half_float", + time_series_metric: "gauge", + }, + "@timestamp": { + type: "date", + }, + }, + }, + }, + priority: 500, + _meta: { + description: "Template for my weather sensor data", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/445f8a6ef75fb43da52990b3a9063c78.asciidoc b/docs/doc_examples/445f8a6ef75fb43da52990b3a9063c78.asciidoc new file mode 100644 index 000000000..c582b4fd4 --- /dev/null +++ b/docs/doc_examples/445f8a6ef75fb43da52990b3a9063c78.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + match: { + "http.responses": "304", + }, + }, + fields: ["http.client_ip", "timestamp", "http.verb"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/446e8fc8ccfb13bb5ec64e32a5676d18.asciidoc b/docs/doc_examples/446e8fc8ccfb13bb5ec64e32a5676d18.asciidoc new file mode 100644 index 000000000..706cf1e73 --- /dev/null +++ b/docs/doc_examples/446e8fc8ccfb13bb5ec64e32a5676d18.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: ["elision"], + text: "j’examine près du wharf", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4479e8c63a04fa22207a6a8803eadcad.asciidoc b/docs/doc_examples/4479e8c63a04fa22207a6a8803eadcad.asciidoc new file mode 100644 index 000000000..11cb7a2ee --- /dev/null +++ b/docs/doc_examples/4479e8c63a04fa22207a6a8803eadcad.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "cluster.routing.allocation.awareness.attributes": "rack_id", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/44939997b0f2601f82a93585a879f65a.asciidoc b/docs/doc_examples/44939997b0f2601f82a93585a879f65a.asciidoc new file mode 100644 index 000000000..71cbc6b0b --- /dev/null +++ b/docs/doc_examples/44939997b0f2601f82a93585a879f65a.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "my_tokenizer", + }, + }, + tokenizer: { + my_tokenizer: { + type: "simple_pattern_split", + pattern: "_", + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.analyze({ + index: "my-index-000001", + analyzer: "my_analyzer", + text: "an_underscored_phrase", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/4498b9d3b0c77e1b9ef6664ff5963ce2.asciidoc b/docs/doc_examples/4498b9d3b0c77e1b9ef6664ff5963ce2.asciidoc new file mode 100644 index 000000000..36dcdadb5 --- /dev/null +++ b/docs/doc_examples/4498b9d3b0c77e1b9ef6664ff5963ce2.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + "index.requests.cache.enable": false, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/44b8a236d7cfb31c43c6d066ae16d8cd.asciidoc b/docs/doc_examples/44b8a236d7cfb31c43c6d066ae16d8cd.asciidoc new file mode 100644 index 000000000..f6a9a9d5c --- /dev/null +++ b/docs/doc_examples/44b8a236d7cfb31c43c6d066ae16d8cd.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + profile: true, + query: { + match: { + message: "GET /search", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/44bca3f17d403517af3616754dc795bb.asciidoc b/docs/doc_examples/44bca3f17d403517af3616754dc795bb.asciidoc new file mode 100644 index 000000000..16f58b881 --- /dev/null +++ b/docs/doc_examples/44bca3f17d403517af3616754dc795bb.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.explain({ + index: "my-index-000001", + id: 0, + query: { + script_score: { + query: { + match: { + message: "elasticsearch", + }, + }, + script: { + source: + "\n long count = doc['count'].value;\n double normalizedCount = count / 10;\n if (explanation != null) {\n explanation.set('normalized count = count / 10 = ' + count + ' / 10 = ' + normalizedCount);\n }\n return normalizedCount;\n ", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/44da736ce0e1587c1e7c45eee606ead7.asciidoc b/docs/doc_examples/44da736ce0e1587c1e7c45eee606ead7.asciidoc new file mode 100644 index 000000000..19c75356f --- /dev/null +++ b/docs/doc_examples/44da736ce0e1587c1e7c45eee606ead7.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.updateByQuery({ + index: "my-index-000001", + script: { + source: "ctx._source.count++", + lang: "painless", + }, + query: { + term: { + "user.id": "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/44db41b8465af951e366da97ade63bc1.asciidoc b/docs/doc_examples/44db41b8465af951e366da97ade63bc1.asciidoc new file mode 100644 index 000000000..7dbba3532 --- /dev/null +++ b/docs/doc_examples/44db41b8465af951e366da97ade63bc1.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.reloadSearchAnalyzers({ + index: "my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/44dd65d69267017fa2fb2cffadef40bb.asciidoc b/docs/doc_examples/44dd65d69267017fa2fb2cffadef40bb.asciidoc new file mode 100644 index 000000000..3236cc23f --- /dev/null +++ b/docs/doc_examples/44dd65d69267017fa2fb2cffadef40bb.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + runtime_mappings: { + type_and_promoted: { + type: "keyword", + script: "emit(doc['type'].value + ' ' + doc['promoted'].value)", + }, + }, + aggs: { + type_promoted_count: { + cardinality: { + field: "type_and_promoted", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/44dfac5bc3131014e2c6bb1ebc76b33d.asciidoc b/docs/doc_examples/44dfac5bc3131014e2c6bb1ebc76b33d.asciidoc new file mode 100644 index 000000000..22116d55c --- /dev/null +++ b/docs/doc_examples/44dfac5bc3131014e2c6bb1ebc76b33d.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "index_double", + mappings: { + properties: { + field: { + type: "double", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/451b441c3311103d0d2bdbab771b26d2.asciidoc b/docs/doc_examples/451b441c3311103d0d2bdbab771b26d2.asciidoc new file mode 100644 index 000000000..7d5446de3 --- /dev/null +++ b/docs/doc_examples/451b441c3311103d0d2bdbab771b26d2.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.putScript({ + id: "my-search-template", + script: { + lang: "mustache", + source: + '\n {\n "query": {\n "match": {\n {{=( )=}}\n "message": "(query_string)"\n (={{ }}=)\n }\n }\n }\n ', + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/451e7c29b2cf738cfc822f7c175bef56.asciidoc b/docs/doc_examples/451e7c29b2cf738cfc822f7c175bef56.asciidoc new file mode 100644 index 000000000..9e46b4e15 --- /dev/null +++ b/docs/doc_examples/451e7c29b2cf738cfc822f7c175bef56.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "my-index-template", + index_patterns: ["my-data-stream*"], + data_stream: {}, + priority: 500, + template: { + lifecycle: { + data_retention: "7d", + }, + }, + _meta: { + description: "Template with data stream lifecycle", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4527d9bb12cf738111a188af235d5d4c.asciidoc b/docs/doc_examples/4527d9bb12cf738111a188af235d5d4c.asciidoc new file mode 100644 index 000000000..16cc82185 --- /dev/null +++ b/docs/doc_examples/4527d9bb12cf738111a188af235d5d4c.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + runtime_mappings: { + "http.clientip": { + type: "ip", + script: + "\n String clientip=grok('%{COMMONAPACHELOG}').extract(doc[\"message\"].value)?.clientip;\n if (clientip != null) emit(clientip);\n ", + }, + }, + query: { + match: { + "http.clientip": "40.135.0.0", + }, + }, + fields: ["http.clientip"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/45499ed1824d1d7cb59972580d2344cb.asciidoc b/docs/doc_examples/45499ed1824d1d7cb59972580d2344cb.asciidoc new file mode 100644 index 000000000..d81463bae --- /dev/null +++ b/docs/doc_examples/45499ed1824d1d7cb59972580d2344cb.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_index", + query: { + range: { + my_counter: { + gte: "9223372036854775808", + lte: "18446744073709551615", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/455029c3d66306ad5d48f6dbddaf7324.asciidoc b/docs/doc_examples/455029c3d66306ad5d48f6dbddaf7324.asciidoc new file mode 100644 index 000000000..ead1639ea --- /dev/null +++ b/docs/doc_examples/455029c3d66306ad5d48f6dbddaf7324.asciidoc @@ -0,0 +1,59 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "metrics_index", + mappings: { + properties: { + latency_histo: { + type: "histogram", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "metrics_index", + id: 1, + refresh: "true", + document: { + "network.name": "net-1", + latency_histo: { + values: [0.1, 0.2, 0.3, 0.4, 0.5], + counts: [3, 7, 23, 12, 6], + }, + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "metrics_index", + id: 2, + refresh: "true", + document: { + "network.name": "net-2", + latency_histo: { + values: [0.1, 0.2, 0.3, 0.4, 0.5], + counts: [8, 17, 8, 7, 6], + }, + }, +}); +console.log(response2); + +const response3 = await client.search({ + index: "metrics_index", + size: 0, + filter_path: "aggregations", + aggs: { + total_latency: { + sum: { + field: "latency_histo", + }, + }, + }, +}); +console.log(response3); +---- diff --git a/docs/doc_examples/4553e0acb6336687d61eaecc73f517b7.asciidoc b/docs/doc_examples/4553e0acb6336687d61eaecc73f517b7.asciidoc new file mode 100644 index 000000000..803f24081 --- /dev/null +++ b/docs/doc_examples/4553e0acb6336687d61eaecc73f517b7.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "standard", + char_filter: ["my_mappings_char_filter"], + }, + }, + char_filter: { + my_mappings_char_filter: { + type: "mapping", + mappings: [":) => _happy_", ":( => _sad_"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/45813d971bfa890ffa2f51f3f480cce5.asciidoc b/docs/doc_examples/45813d971bfa890ffa2f51f3f480cce5.asciidoc new file mode 100644 index 000000000..892f6073f --- /dev/null +++ b/docs/doc_examples/45813d971bfa890ffa2f51f3f480cce5.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "test_index", + query: { + percolate: { + field: "query", + document: { + body: "Bycicles are missing", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/458b2228aed7464d915a5d73cb6b98f6.asciidoc b/docs/doc_examples/458b2228aed7464d915a5d73cb6b98f6.asciidoc new file mode 100644 index 000000000..1773a1bdc --- /dev/null +++ b/docs/doc_examples/458b2228aed7464d915a5d73cb6b98f6.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.snapshots({ + repository: "repo1", + v: "true", + s: "id", +}); +console.log(response); +---- diff --git a/docs/doc_examples/45954b8aaedfed57012be8b6538b0a24.asciidoc b/docs/doc_examples/45954b8aaedfed57012be8b6538b0a24.asciidoc new file mode 100644 index 000000000..12fb33095 --- /dev/null +++ b/docs/doc_examples/45954b8aaedfed57012be8b6538b0a24.asciidoc @@ -0,0 +1,46 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.chatCompletionUnified({ + inference_id: "openai-completion", + chat_completion_request: { + messages: [ + { + role: "user", + content: [ + { + type: "text", + text: "What's the price of a scarf?", + }, + ], + }, + ], + tools: [ + { + type: "function", + function: { + name: "get_current_price", + description: "Get the current price of a item", + parameters: { + type: "object", + properties: { + item: { + id: "123", + }, + }, + }, + }, + }, + ], + tool_choice: { + type: "function", + function: { + name: "get_current_price", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/45b74f1904533fdb37a5a6f3c8f4ec9b.asciidoc b/docs/doc_examples/45b74f1904533fdb37a5a6f3c8f4ec9b.asciidoc new file mode 100644 index 000000000..f992abc2d --- /dev/null +++ b/docs/doc_examples/45b74f1904533fdb37a5a6f3c8f4ec9b.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "my_tokenizer", + }, + }, + tokenizer: { + my_tokenizer: { + type: "edge_ngram", + min_gram: 2, + max_gram: 10, + token_chars: ["letter", "digit"], + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.analyze({ + index: "my-index-000001", + analyzer: "my_analyzer", + text: "2 Quick Foxes.", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/45c6e54a9c9e08623af96752b4bde346.asciidoc b/docs/doc_examples/45c6e54a9c9e08623af96752b4bde346.asciidoc new file mode 100644 index 000000000..2ab19e442 --- /dev/null +++ b/docs/doc_examples/45c6e54a9c9e08623af96752b4bde346.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_locations", + query: { + bool: { + must: { + match_all: {}, + }, + filter: { + geo_distance: { + distance: "12km", + "pin.location": "POINT (-70 40)", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/45ef5156dbd2d3fd4fd22b8d99f7aad4.asciidoc b/docs/doc_examples/45ef5156dbd2d3fd4fd22b8d99f7aad4.asciidoc new file mode 100644 index 000000000..b7156b497 --- /dev/null +++ b/docs/doc_examples/45ef5156dbd2d3fd4fd22b8d99f7aad4.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "cluster.routing.allocation.enable": null, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/46064e81620162a23e75002a7eeb8b10.asciidoc b/docs/doc_examples/46064e81620162a23e75002a7eeb8b10.asciidoc new file mode 100644 index 000000000..3f6243b38 --- /dev/null +++ b/docs/doc_examples/46064e81620162a23e75002a7eeb8b10.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.moveToStep({ + index: "my-index-000001", + current_step: { + phase: "hot", + action: "complete", + name: "complete", + }, + next_step: { + phase: "warm", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/46103fee3cd5f53dc75123def82d52ad.asciidoc b/docs/doc_examples/46103fee3cd5f53dc75123def82d52ad.asciidoc new file mode 100644 index 000000000..9830c6bf9 --- /dev/null +++ b/docs/doc_examples/46103fee3cd5f53dc75123def82d52ad.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "my-data-stream-template", + index_patterns: ["my-data-stream*"], + data_stream: {}, + priority: 500, + template: { + settings: { + "index.refresh_interval": "30s", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4646764bf09911fee7d58630c72d3137.asciidoc b/docs/doc_examples/4646764bf09911fee7d58630c72d3137.asciidoc deleted file mode 100644 index 89dd77fe0..000000000 --- a/docs/doc_examples/4646764bf09911fee7d58630c72d3137.asciidoc +++ /dev/null @@ -1,24 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - aggs: { - genres: { - terms: { - script: { - id: 'my_script', - params: { - field: 'genre' - } - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/464dffb6a6e24a860223d1c32b232f95.asciidoc b/docs/doc_examples/464dffb6a6e24a860223d1c32b232f95.asciidoc new file mode 100644 index 000000000..617c0423c --- /dev/null +++ b/docs/doc_examples/464dffb6a6e24a860223d1c32b232f95.asciidoc @@ -0,0 +1,43 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + filter: { + my_shingle_filter: { + type: "shingle", + min_shingle_size: 5, + max_shingle_size: 5, + output_unigrams: false, + }, + my_minhash_filter: { + type: "min_hash", + hash_count: 1, + bucket_count: 512, + hash_set_size: 1, + with_rotation: true, + }, + }, + analyzer: { + my_analyzer: { + tokenizer: "standard", + filter: ["my_shingle_filter", "my_minhash_filter"], + }, + }, + }, + }, + mappings: { + properties: { + fingerprint: { + type: "text", + analyzer: "my_analyzer", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4659f639d71a54df571260ee5798dbb3.asciidoc b/docs/doc_examples/4659f639d71a54df571260ee5798dbb3.asciidoc new file mode 100644 index 000000000..a5bcaeefe --- /dev/null +++ b/docs/doc_examples/4659f639d71a54df571260ee5798dbb3.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "museums", + size: 0, + aggregations: { + "zoomed-in": { + filter: { + geo_bounding_box: { + location: { + top_left: "POINT (4.9 52.4)", + bottom_right: "POINT (5.0 52.3)", + }, + }, + }, + aggregations: { + zoom1: { + geotile_grid: { + field: "location", + precision: 22, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/46658f00edc4865dfe472a392374cd0f.asciidoc b/docs/doc_examples/46658f00edc4865dfe472a392374cd0f.asciidoc index f341798d4..5c8453711 100644 --- a/docs/doc_examples/46658f00edc4865dfe472a392374cd0f.asciidoc +++ b/docs/doc_examples/46658f00edc4865dfe472a392374cd0f.asciidoc @@ -4,9 +4,8 @@ [source, js] ---- const response = await client.indices.getTemplate({ - name: 'template_1', - filter_path: '*.version' -}) -console.log(response) + name: "template_1", + filter_path: "*.version", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/4670dd81a9865e07ae74ae8b0266e384.asciidoc b/docs/doc_examples/4670dd81a9865e07ae74ae8b0266e384.asciidoc new file mode 100644 index 000000000..399823687 --- /dev/null +++ b/docs/doc_examples/4670dd81a9865e07ae74ae8b0266e384.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "node_upgrade", + size: 0, + runtime_mappings: { + "startup_time_before.adjusted": { + type: "long", + script: { + source: "emit(doc['startup_time_before'].value - params.adjustment)", + params: { + adjustment: 10, + }, + }, + }, + }, + aggs: { + startup_time_ttest: { + t_test: { + a: { + field: "startup_time_before.adjusted", + }, + b: { + field: "startup_time_after", + }, + type: "paired", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/467833bd44b35a89a7fe0d7df5f253f1.asciidoc b/docs/doc_examples/467833bd44b35a89a7fe0d7df5f253f1.asciidoc new file mode 100644 index 000000000..d2335b434 --- /dev/null +++ b/docs/doc_examples/467833bd44b35a89a7fe0d7df5f253f1.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + analyzer: "pattern", + text: "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone.", +}); +console.log(response); +---- diff --git a/docs/doc_examples/468f7ec42cdd8287cdea3ec1cea4a514.asciidoc b/docs/doc_examples/468f7ec42cdd8287cdea3ec1cea4a514.asciidoc new file mode 100644 index 000000000..f62702896 --- /dev/null +++ b/docs/doc_examples/468f7ec42cdd8287cdea3ec1cea4a514.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.update({ + index: "my-index-000001", + id: 1, + script: { + source: + "if (ctx._source.tags.contains(params['tag'])) { ctx._source.tags.remove(ctx._source.tags.indexOf(params['tag'])) }", + lang: "painless", + params: { + tag: "blue", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/46a0eaaf5c881f1ba716d1812b36c724.asciidoc b/docs/doc_examples/46a0eaaf5c881f1ba716d1812b36c724.asciidoc new file mode 100644 index 000000000..f830179d0 --- /dev/null +++ b/docs/doc_examples/46a0eaaf5c881f1ba716d1812b36c724.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.putAutoFollowPattern({ + name: "logs-generic-default", + remote_cluster: "clusterB", + leader_index_patterns: [".ds-logs-generic-default-20*"], + leader_index_exclusion_patterns: "*-replicated_from_clustera", + follow_index_pattern: "{{leader_index}}-replicated_from_clusterb", +}); +console.log(response); + +const response1 = await client.ccr.putAutoFollowPattern({ + name: "logs-generic-default", + remote_cluster: "clusterA", + leader_index_patterns: [".ds-logs-generic-default-20*"], + leader_index_exclusion_patterns: "*-replicated_from_clusterb", + follow_index_pattern: "{{leader_index}}-replicated_from_clustera", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/46b1c1f6e0c86528be84c373eeb8d425.asciidoc b/docs/doc_examples/46b1c1f6e0c86528be84c373eeb8d425.asciidoc new file mode 100644 index 000000000..56d7ce9fd --- /dev/null +++ b/docs/doc_examples/46b1c1f6e0c86528be84c373eeb8d425.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.license.post({ + acknowledge: "true", + licenses: [ + { + uid: "893361dc-9749-4997-93cb-802e3d7fa4xx", + type: "basic", + issue_date_in_millis: 1411948800000, + expiry_date_in_millis: 1914278399999, + max_nodes: 1, + issued_to: "issuedTo", + issuer: "issuer", + signature: "xx", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/46b771a9932c3fa6057a7b2679c72ef0.asciidoc b/docs/doc_examples/46b771a9932c3fa6057a7b2679c72ef0.asciidoc new file mode 100644 index 000000000..b7d64b4c8 --- /dev/null +++ b/docs/doc_examples/46b771a9932c3fa6057a7b2679c72ef0.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getMigrateReindexStatus({ + index: "my-data-stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/46c4b0dfb674825f9579203d41e7f404.asciidoc b/docs/doc_examples/46c4b0dfb674825f9579203d41e7f404.asciidoc deleted file mode 100644 index af100b302..000000000 --- a/docs/doc_examples/46c4b0dfb674825f9579203d41e7f404.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'my_index', - body: { - mappings: { - properties: { - tags: { - type: 'keyword' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/46c5c14f20118dcf519ff6ef21360209.asciidoc b/docs/doc_examples/46c5c14f20118dcf519ff6ef21360209.asciidoc new file mode 100644 index 000000000..af98ccc6c --- /dev/null +++ b/docs/doc_examples/46c5c14f20118dcf519ff6ef21360209.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "datastream_policy", + policy: { + phases: { + hot: { + actions: { + rollover: { + max_docs: 1, + }, + downsample: { + fixed_interval: "1h", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/46ce40227fa60aa6ba435f366b3a1f5f.asciidoc b/docs/doc_examples/46ce40227fa60aa6ba435f366b3a1f5f.asciidoc new file mode 100644 index 000000000..7b9aa0f1b --- /dev/null +++ b/docs/doc_examples/46ce40227fa60aa6ba435f366b3a1f5f.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.pauseFollow({ + index: "kibana_sample_data_ecommerce2", +}); +console.log(response); + +const response1 = await client.indices.close({ + index: "kibana_sample_data_ecommerce2", +}); +console.log(response1); + +const response2 = await client.ccr.unfollow({ + index: "kibana_sample_data_ecommerce2", +}); +console.log(response2); + +const response3 = await client.indices.open({ + index: "kibana_sample_data_ecommerce2", +}); +console.log(response3); +---- diff --git a/docs/doc_examples/46ebd468c3f132a4978088964466c5cd.asciidoc b/docs/doc_examples/46ebd468c3f132a4978088964466c5cd.asciidoc new file mode 100644 index 000000000..731bf1009 --- /dev/null +++ b/docs/doc_examples/46ebd468c3f132a4978088964466c5cd.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "apostrophe_example", + settings: { + analysis: { + analyzer: { + standard_apostrophe: { + tokenizer: "standard", + filter: ["apostrophe"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/472ec8c57fec8457e31fe6dd7f6e3713.asciidoc b/docs/doc_examples/472ec8c57fec8457e31fe6dd7f6e3713.asciidoc new file mode 100644 index 000000000..6d000e16c --- /dev/null +++ b/docs/doc_examples/472ec8c57fec8457e31fe6dd7f6e3713.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + query_string: { + fields: ["title"], + query: "this that thus", + minimum_should_match: 2, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/473c8ddd4e4b7814a64e5fe40d9d6dca.asciidoc b/docs/doc_examples/473c8ddd4e4b7814a64e5fe40d9d6dca.asciidoc new file mode 100644 index 000000000..e294a84c8 --- /dev/null +++ b/docs/doc_examples/473c8ddd4e4b7814a64e5fe40d9d6dca.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.tasks.cancel({ + task_id: "2j8UKw1bRO283PMwDugNNg:5326", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4752f82fec8b46e5a4b3788b76e3041f.asciidoc b/docs/doc_examples/4752f82fec8b46e5a4b3788b76e3041f.asciidoc new file mode 100644 index 000000000..35aa486ba --- /dev/null +++ b/docs/doc_examples/4752f82fec8b46e5a4b3788b76e3041f.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + warm: { + actions: { + migrate: { + enabled: false, + }, + allocate: { + include: { + rack_id: "one,two", + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/47909e194d10743093f4a22c27a85925.asciidoc b/docs/doc_examples/47909e194d10743093f4a22c27a85925.asciidoc new file mode 100644 index 000000000..2a78ec314 --- /dev/null +++ b/docs/doc_examples/47909e194d10743093f4a22c27a85925.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 10000, + query: { + match: { + "user.id": "elkbee", + }, + }, + pit: { + id: "46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA==", + keep_alive: "1m", + }, + sort: [ + { + "@timestamp": { + order: "asc", + format: "strict_date_optional_time_nanos", + numeric_type: "date_nanos", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/47b5ff897f26e9c943cee5c06034181d.asciidoc b/docs/doc_examples/47b5ff897f26e9c943cee5c06034181d.asciidoc deleted file mode 100644 index d2a05c4fe..000000000 --- a/docs/doc_examples/47b5ff897f26e9c943cee5c06034181d.asciidoc +++ /dev/null @@ -1,13 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.delete({ - index: 'twitter', - id: '1', - routing: 'kimchy' -}) -console.log(response) ----- - diff --git a/docs/doc_examples/47bb632c6091ad0cd94bc660bdd309a5.asciidoc b/docs/doc_examples/47bb632c6091ad0cd94bc660bdd309a5.asciidoc deleted file mode 100644 index 7f7456e59..000000000 --- a/docs/doc_examples/47bb632c6091ad0cd94bc660bdd309a5.asciidoc +++ /dev/null @@ -1,31 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'bank', - body: { - query: { - bool: { - must: [ - { - match: { - age: '40' - } - } - ], - must_not: [ - { - match: { - state: 'ID' - } - } - ] - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/47e6dfb5b09d954c9c0c33fda2b6c66d.asciidoc b/docs/doc_examples/47e6dfb5b09d954c9c0c33fda2b6c66d.asciidoc new file mode 100644 index 000000000..dd6d26ed1 --- /dev/null +++ b/docs/doc_examples/47e6dfb5b09d954c9c0c33fda2b6c66d.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putUser({ + username: "jacknich", + password: "l0ng-r4nd0m-p@ssw0rd", + roles: ["admin", "other_role1"], + full_name: "Jack Nicholson", + email: "jacknich@example.com", + metadata: { + intelligence: 7, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/47fde7874e15a37242993fd69c62063b.asciidoc b/docs/doc_examples/47fde7874e15a37242993fd69c62063b.asciidoc new file mode 100644 index 000000000..4dba02a58 --- /dev/null +++ b/docs/doc_examples/47fde7874e15a37242993fd69c62063b.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "latency", + size: 0, + aggs: { + load_time_ranks: { + percentile_ranks: { + field: "load_time", + values: [500, 600], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/480e531db799c4c909afd8e2a73a8d0b.asciidoc b/docs/doc_examples/480e531db799c4c909afd8e2a73a8d0b.asciidoc new file mode 100644 index 000000000..f3ae3f5f4 --- /dev/null +++ b/docs/doc_examples/480e531db799c4c909afd8e2a73a8d0b.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.forcemerge(); +console.log(response); +---- diff --git a/docs/doc_examples/4818a1288ac24a56d6d6a4130ee70202.asciidoc b/docs/doc_examples/4818a1288ac24a56d6d6a4130ee70202.asciidoc new file mode 100644 index 000000000..492d8778a --- /dev/null +++ b/docs/doc_examples/4818a1288ac24a56d6d6a4130ee70202.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.getScript({ + id: "my-search-template", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4824a823a830a2a5d990eacfd783ac22.asciidoc b/docs/doc_examples/4824a823a830a2a5d990eacfd783ac22.asciidoc new file mode 100644 index 000000000..e74b54883 --- /dev/null +++ b/docs/doc_examples/4824a823a830a2a5d990eacfd783ac22.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.deleteByQuery({ + index: "my-index-000001", + slice: { + id: 0, + max: 2, + }, + query: { + range: { + "http.response.bytes": { + lt: 2000000, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.deleteByQuery({ + index: "my-index-000001", + slice: { + id: 1, + max: 2, + }, + query: { + range: { + "http.response.bytes": { + lt: 2000000, + }, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/48313f620c2871b6f4019b66be730109.asciidoc b/docs/doc_examples/48313f620c2871b6f4019b66be730109.asciidoc new file mode 100644 index 000000000..0d8ee0190 --- /dev/null +++ b/docs/doc_examples/48313f620c2871b6f4019b66be730109.asciidoc @@ -0,0 +1,45 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "shirts", + query: { + bool: { + filter: { + term: { + brand: "gucci", + }, + }, + }, + }, + aggs: { + colors: { + terms: { + field: "color", + }, + }, + color_red: { + filter: { + term: { + color: "red", + }, + }, + aggs: { + models: { + terms: { + field: "model", + }, + }, + }, + }, + }, + post_filter: { + term: { + color: "red", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/483d669ec0768bc4e275a568c6164704.asciidoc b/docs/doc_examples/483d669ec0768bc4e275a568c6164704.asciidoc new file mode 100644 index 000000000..44e105ea9 --- /dev/null +++ b/docs/doc_examples/483d669ec0768bc4e275a568c6164704.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.pauseFollow({ + index: "", +}); +console.log(response); +---- diff --git a/docs/doc_examples/484e24d1ed1a154ba9753e6090d38d78.asciidoc b/docs/doc_examples/484e24d1ed1a154ba9753e6090d38d78.asciidoc new file mode 100644 index 000000000..ce94875dd --- /dev/null +++ b/docs/doc_examples/484e24d1ed1a154ba9753e6090d38d78.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: { + type: "point", + coordinates: [-377.03653, 389.897676], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/487f0e07fd83c05f9763e0795c525e2e.asciidoc b/docs/doc_examples/487f0e07fd83c05f9763e0795c525e2e.asciidoc new file mode 100644 index 000000000..4d59de169 --- /dev/null +++ b/docs/doc_examples/487f0e07fd83c05f9763e0795c525e2e.asciidoc @@ -0,0 +1,99 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "test", + mappings: { + properties: { + my_location: { + type: "geo_point", + }, + group: { + type: "keyword", + }, + "@timestamp": { + type: "date", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "test", + refresh: "true", + operations: [ + { + index: {}, + }, + { + my_location: { + lat: 52.373184, + lon: 4.889187, + }, + "@timestamp": "2023-01-02T09:00:00Z", + }, + { + index: {}, + }, + { + my_location: { + lat: 52.370159, + lon: 4.885057, + }, + "@timestamp": "2023-01-02T10:00:00Z", + }, + { + index: {}, + }, + { + my_location: { + lat: 52.369219, + lon: 4.901618, + }, + "@timestamp": "2023-01-02T13:00:00Z", + }, + { + index: {}, + }, + { + my_location: { + lat: 52.374081, + lon: 4.91235, + }, + "@timestamp": "2023-01-02T16:00:00Z", + }, + { + index: {}, + }, + { + my_location: { + lat: 52.371667, + lon: 4.914722, + }, + "@timestamp": "2023-01-03T12:00:00Z", + }, + ], +}); +console.log(response1); + +const response2 = await client.search({ + index: "test", + filter_path: "aggregations", + aggs: { + line: { + geo_line: { + point: { + field: "my_location", + }, + sort: { + field: "@timestamp", + }, + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/488f6df1df71972392b670ce557f7ff3.asciidoc b/docs/doc_examples/488f6df1df71972392b670ce557f7ff3.asciidoc new file mode 100644 index 000000000..0aabaf773 --- /dev/null +++ b/docs/doc_examples/488f6df1df71972392b670ce557f7ff3.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putTemplate({ + name: "template_1", + index_patterns: ["my-index-*"], + order: 0, + settings: { + number_of_shards: 1, + }, + version: 123, +}); +console.log(response); +---- diff --git a/docs/doc_examples/48d9697a14dfe131325521f48a7adc84.asciidoc b/docs/doc_examples/48d9697a14dfe131325521f48a7adc84.asciidoc new file mode 100644 index 000000000..1798f2fb9 --- /dev/null +++ b/docs/doc_examples/48d9697a14dfe131325521f48a7adc84.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.renderSearchTemplate({ + id: "my-search-template", + params: { + query_string: "My string", + text_fields: [ + { + user_name: "John", + last: false, + }, + { + user_name: "kimchy", + last: true, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/48de51de87a8ad9fd8b8db1ca25b85c1.asciidoc b/docs/doc_examples/48de51de87a8ad9fd8b8db1ca25b85c1.asciidoc new file mode 100644 index 000000000..f367f00be --- /dev/null +++ b/docs/doc_examples/48de51de87a8ad9fd8b8db1ca25b85c1.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.close({ + index: "index", +}); +console.log(response); + +const response1 = await client.indices.putSettings({ + index: "index", + settings: { + index: { + similarity: { + default: { + type: "boolean", + }, + }, + }, + }, +}); +console.log(response1); + +const response2 = await client.indices.open({ + index: "index", +}); +console.log(response2); +---- diff --git a/docs/doc_examples/48e142e6c69014e0509d4c9251749d77.asciidoc b/docs/doc_examples/48e142e6c69014e0509d4c9251749d77.asciidoc new file mode 100644 index 000000000..7c4401c7d --- /dev/null +++ b/docs/doc_examples/48e142e6c69014e0509d4c9251749d77.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "text_embedding", + inference_id: "openai-embeddings", + inference_config: { + service: "openai", + service_settings: { + api_key: "", + model_id: "text-embedding-3-small", + dimensions: 128, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/49100a4f53c0ba345fadacdc4f2f86e4.asciidoc b/docs/doc_examples/49100a4f53c0ba345fadacdc4f2f86e4.asciidoc new file mode 100644 index 000000000..f72955f38 --- /dev/null +++ b/docs/doc_examples/49100a4f53c0ba345fadacdc4f2f86e4.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + q: "kimchy", + filter_path: "took,hits.hits._id,hits.hits._score", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4955bae30f265b9e436f82b015de6d7e.asciidoc b/docs/doc_examples/4955bae30f265b9e436f82b015de6d7e.asciidoc new file mode 100644 index 000000000..400ff437c --- /dev/null +++ b/docs/doc_examples/4955bae30f265b9e436f82b015de6d7e.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + pretty: "true", + query: { + terms: { + color: { + index: "my-index-000001", + id: "2", + path: "color", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/496d35c89dc991a1509f7e8fb93ade45.asciidoc b/docs/doc_examples/496d35c89dc991a1509f7e8fb93ade45.asciidoc new file mode 100644 index 000000000..04eff1a93 --- /dev/null +++ b/docs/doc_examples/496d35c89dc991a1509f7e8fb93ade45.asciidoc @@ -0,0 +1,42 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "bengali_example", + settings: { + analysis: { + filter: { + bengali_stop: { + type: "stop", + stopwords: "_bengali_", + }, + bengali_keywords: { + type: "keyword_marker", + keywords: ["উদাহরণ"], + }, + bengali_stemmer: { + type: "stemmer", + language: "bengali", + }, + }, + analyzer: { + rebuilt_bengali: { + tokenizer: "standard", + filter: [ + "lowercase", + "decimal_digit", + "bengali_keywords", + "indic_normalization", + "bengali_normalization", + "bengali_stop", + "bengali_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4980d6fcb369692b0b29ddc6767d4324.asciidoc b/docs/doc_examples/4980d6fcb369692b0b29ddc6767d4324.asciidoc new file mode 100644 index 000000000..520c175f2 --- /dev/null +++ b/docs/doc_examples/4980d6fcb369692b0b29ddc6767d4324.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.allocationExplain({ + index: "my-index-000001", + shard: 0, + primary: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4982c547be1ad9455ae836990aea92c5.asciidoc b/docs/doc_examples/4982c547be1ad9455ae836990aea92c5.asciidoc new file mode 100644 index 000000000..c6c3e829d --- /dev/null +++ b/docs/doc_examples/4982c547be1ad9455ae836990aea92c5.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.startTrainedModelDeployment({ + model_id: "my_model", + deployment_id: "my_model_for_search", + adaptive_allocations: { + enabled: true, + min_number_of_allocations: 3, + max_number_of_allocations: 10, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4989cc97ce1c8fff634a10d343031bd0.asciidoc b/docs/doc_examples/4989cc97ce1c8fff634a10d343031bd0.asciidoc new file mode 100644 index 000000000..6b446681f --- /dev/null +++ b/docs/doc_examples/4989cc97ce1c8fff634a10d343031bd0.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.shards({ + v: "true", + h: "state,node", + s: "state", +}); +console.log(response); +---- diff --git a/docs/doc_examples/49a19615ebe2c013b8321152163478ab.asciidoc b/docs/doc_examples/49a19615ebe2c013b8321152163478ab.asciidoc new file mode 100644 index 000000000..0c497534a --- /dev/null +++ b/docs/doc_examples/49a19615ebe2c013b8321152163478ab.asciidoc @@ -0,0 +1,42 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + refresh: "true", + document: { + text: "quick brown fox", + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 2, + refresh: "true", + document: { + text: "quick fox", + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + query: { + script_score: { + query: { + match: { + text: "quick brown fox", + }, + }, + script: { + source: "_termStats.termFreq().getAverage()", + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/49c052a748c943180db78fee8e144239.asciidoc b/docs/doc_examples/49c052a748c943180db78fee8e144239.asciidoc new file mode 100644 index 000000000..f4bcb9fdb --- /dev/null +++ b/docs/doc_examples/49c052a748c943180db78fee8e144239.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.clearApiKeyCache({ + ids: "yVGMr3QByxdh1MSaicYx,YoiMaqREw0YVpjn40iMg", +}); +console.log(response); +---- diff --git a/docs/doc_examples/49c40b51da2469a6e00fea8fa6fbf56e.asciidoc b/docs/doc_examples/49c40b51da2469a6e00fea8fa6fbf56e.asciidoc new file mode 100644 index 000000000..c1278b3f1 --- /dev/null +++ b/docs/doc_examples/49c40b51da2469a6e00fea8fa6fbf56e.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.tasks.list({ + pretty: "true", + detailed: "true", + group_by: "parents", + human: "true", + actions: "*data/read/esql", +}); +console.log(response); +---- diff --git a/docs/doc_examples/49cb3f48a0097bfc597c52fa51c6d379.asciidoc b/docs/doc_examples/49cb3f48a0097bfc597c52fa51c6d379.asciidoc new file mode 100644 index 000000000..c7d0ad054 --- /dev/null +++ b/docs/doc_examples/49cb3f48a0097bfc597c52fa51c6d379.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "saml-service-role", + cluster: ["manage_saml", "manage_token"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/49d87c2eb7314ed34221c5fb4f21dfcc.asciidoc b/docs/doc_examples/49d87c2eb7314ed34221c5fb4f21dfcc.asciidoc new file mode 100644 index 000000000..8c7811081 --- /dev/null +++ b/docs/doc_examples/49d87c2eb7314ed34221c5fb4f21dfcc.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + index: "analyze_sample", + normalizer: "my_normalizer", + text: "BaR", +}); +console.log(response); +---- diff --git a/docs/doc_examples/49e8773a34fcbf825de38426cff5509c.asciidoc b/docs/doc_examples/49e8773a34fcbf825de38426cff5509c.asciidoc new file mode 100644 index 000000000..db9a6bca1 --- /dev/null +++ b/docs/doc_examples/49e8773a34fcbf825de38426cff5509c.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-knn-index", + profile: true, + knn: { + field: "my-vector", + query_vector: [-5, 9, -12], + k: 3, + num_candidates: 100, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/49f4d2a461536d150e16b1e0a3148678.asciidoc b/docs/doc_examples/49f4d2a461536d150e16b1e0a3148678.asciidoc new file mode 100644 index 000000000..ddfea5d4b --- /dev/null +++ b/docs/doc_examples/49f4d2a461536d150e16b1e0a3148678.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.clearCache({ + index: "my-index-000001", + fielddata: "true", +}); +console.log(response); + +const response1 = await client.indices.clearCache({ + index: "my-index-000001", + query: "true", +}); +console.log(response1); + +const response2 = await client.indices.clearCache({ + index: "my-index-000001", + request: "true", +}); +console.log(response2); +---- diff --git a/docs/doc_examples/4a1951844bd39f26961bfc965f3432b1.asciidoc b/docs/doc_examples/4a1951844bd39f26961bfc965f3432b1.asciidoc new file mode 100644 index 000000000..bfdaca2d6 --- /dev/null +++ b/docs/doc_examples/4a1951844bd39f26961bfc965f3432b1.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.mget({ + index: "my-index-000001", + docs: [ + { + _id: "1", + }, + { + _id: "2", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/4a2080ae55d931eb0643cc3eb91ec524.asciidoc b/docs/doc_examples/4a2080ae55d931eb0643cc3eb91ec524.asciidoc new file mode 100644 index 000000000..8b68c5966 --- /dev/null +++ b/docs/doc_examples/4a2080ae55d931eb0643cc3eb91ec524.asciidoc @@ -0,0 +1,53 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + text: { + type: "text", + fields: { + english: { + type: "text", + analyzer: "english", + }, + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + text: "quick brown fox", + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "my-index-000001", + id: 2, + document: { + text: "quick brown foxes", + }, +}); +console.log(response2); + +const response3 = await client.search({ + index: "my-index-000001", + query: { + multi_match: { + query: "quick brown foxes", + fields: ["text", "text.english"], + type: "most_fields", + }, + }, +}); +console.log(response3); +---- diff --git a/docs/doc_examples/4a4b8a406681584a91c0e614c1fa4344.asciidoc b/docs/doc_examples/4a4b8a406681584a91c0e614c1fa4344.asciidoc new file mode 100644 index 000000000..55a3cfae9 --- /dev/null +++ b/docs/doc_examples/4a4b8a406681584a91c0e614c1fa4344.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.createApiKey({ + name: "my-api-key", + expiration: "1d", + role_descriptors: { + "role-a": { + cluster: ["all"], + indices: [ + { + names: ["index-a*"], + privileges: ["read"], + }, + ], + }, + "role-b": { + cluster: ["all"], + indices: [ + { + names: ["index-b*"], + privileges: ["all"], + }, + ], + }, + }, + metadata: { + application: "my-application", + environment: { + level: 1, + trusted: true, + tags: ["dev", "staging"], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4a72c68b96f44e80463084dfc0449d51.asciidoc b/docs/doc_examples/4a72c68b96f44e80463084dfc0449d51.asciidoc new file mode 100644 index 000000000..002ca35ca --- /dev/null +++ b/docs/doc_examples/4a72c68b96f44e80463084dfc0449d51.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + runtime_mappings: { + day_of_week: { + type: "keyword", + script: { + source: + "emit(doc['@timestamp'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ENGLISH))", + }, + }, + }, + aggs: { + day_of_week: { + terms: { + field: "day_of_week", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4a7510a9c0468303658383c00796dad2.asciidoc b/docs/doc_examples/4a7510a9c0468303658383c00796dad2.asciidoc new file mode 100644 index 000000000..f71abdb74 --- /dev/null +++ b/docs/doc_examples/4a7510a9c0468303658383c00796dad2.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + "index.mapping.ignore_malformed": true, + }, + mappings: { + properties: { + number_one: { + type: "byte", + }, + number_two: { + type: "integer", + ignore_malformed: false, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4aa81a694266fb634904224d14cd9a87.asciidoc b/docs/doc_examples/4aa81a694266fb634904224d14cd9a87.asciidoc new file mode 100644 index 000000000..54bc674f6 --- /dev/null +++ b/docs/doc_examples/4aa81a694266fb634904224d14cd9a87.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_queries2", + query: { + percolate: { + field: "query", + document: { + my_field: "wxyz", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4ae494d1e62231e832fc0436b04e2014.asciidoc b/docs/doc_examples/4ae494d1e62231e832fc0436b04e2014.asciidoc new file mode 100644 index 000000000..2593e595a --- /dev/null +++ b/docs/doc_examples/4ae494d1e62231e832fc0436b04e2014.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.validateQuery({ + index: "my-index-000001", + query: { + bool: { + must: { + query_string: { + query: "*:*", + }, + }, + filter: { + term: { + "user.id": "kimchy", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4af15c4f26ddefb9c350e7a246a66a15.asciidoc b/docs/doc_examples/4af15c4f26ddefb9c350e7a246a66a15.asciidoc new file mode 100644 index 000000000..9b0f9cbc8 --- /dev/null +++ b/docs/doc_examples/4af15c4f26ddefb9c350e7a246a66a15.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "node", + filter_path: "aggregations", + aggs: { + ip: { + terms: { + field: "ip", + order: { + "tm.m": "desc", + }, + }, + aggs: { + tm: { + top_metrics: { + metrics: { + field: "m", + }, + sort: { + date: "desc", + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4b1044259a6d777d87529eae25675005.asciidoc b/docs/doc_examples/4b1044259a6d777d87529eae25675005.asciidoc new file mode 100644 index 000000000..de6386e18 --- /dev/null +++ b/docs/doc_examples/4b1044259a6d777d87529eae25675005.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "set-foo", + description: "sets foo", + processors: [ + { + set: { + field: "foo", + value: "bar", + }, + }, + ], +}); +console.log(response); + +const response1 = await client.updateByQuery({ + index: "my-index-000001", + pipeline: "set-foo", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/4b3a49710fafa35d6d41a8ec12434515.asciidoc b/docs/doc_examples/4b3a49710fafa35d6d41a8ec12434515.asciidoc new file mode 100644 index 000000000..d64246fd4 --- /dev/null +++ b/docs/doc_examples/4b3a49710fafa35d6d41a8ec12434515.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + percolate: { + field: "query", + documents: [ + { + message: "bonsai tree", + }, + { + message: "new tree", + }, + { + message: "the office", + }, + { + message: "office tree", + }, + ], + }, + }, + highlight: { + fields: { + message: {}, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4b5110a21676cc0e26e050a4b4552235.asciidoc b/docs/doc_examples/4b5110a21676cc0e26e050a4b4552235.asciidoc new file mode 100644 index 000000000..4366d9a73 --- /dev/null +++ b/docs/doc_examples/4b5110a21676cc0e26e050a4b4552235.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.synonyms.getSynonym({ + id: "my-synonyms-set", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4b90feb9d5d3dbfce424dac0341320b7.asciidoc b/docs/doc_examples/4b90feb9d5d3dbfce424dac0341320b7.asciidoc deleted file mode 100644 index 74010bbda..000000000 --- a/docs/doc_examples/4b90feb9d5d3dbfce424dac0341320b7.asciidoc +++ /dev/null @@ -1,23 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'bank', - body: { - query: { - match_all: {} - }, - sort: [ - { - account_number: 'asc' - } - ], - from: 10, - size: 10 - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc b/docs/doc_examples/4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc new file mode 100644 index 000000000..1e6cd8582 --- /dev/null +++ b/docs/doc_examples/4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.streamCompletion({ + inference_id: "openai-completion", + input: "What is Elastic?", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4ba86373e13e106de044f190343be328.asciidoc b/docs/doc_examples/4ba86373e13e106de044f190343be328.asciidoc new file mode 100644 index 000000000..cc51c372c --- /dev/null +++ b/docs/doc_examples/4ba86373e13e106de044f190343be328.asciidoc @@ -0,0 +1,40 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + countries: { + terms: { + field: "artist.country", + order: [ + { + "rock>playback_stats.avg": "desc", + }, + { + _count: "desc", + }, + ], + }, + aggs: { + rock: { + filter: { + term: { + genre: "rock", + }, + }, + aggs: { + playback_stats: { + stats: { + field: "play_count", + }, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4bb4a64cf04e3feb133b0221d29beaa9.asciidoc b/docs/doc_examples/4bb4a64cf04e3feb133b0221d29beaa9.asciidoc new file mode 100644 index 000000000..1030165fb --- /dev/null +++ b/docs/doc_examples/4bb4a64cf04e3feb133b0221d29beaa9.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.restore({ + repository: "my_repository", + snapshot: "my_snapshot_2099.05.06", + indices: "my-index,logs-my_app-default", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4bb7bcfebca682fb9c9e3e47bfd5ef6f.asciidoc b/docs/doc_examples/4bb7bcfebca682fb9c9e3e47bfd5ef6f.asciidoc new file mode 100644 index 000000000..6ca27ec56 --- /dev/null +++ b/docs/doc_examples/4bb7bcfebca682fb9c9e3e47bfd5ef6f.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + track_total_hits: false, + aggs: { + my_buckets: { + composite: { + sources: [ + { + user_name: { + terms: { + field: "user_name", + }, + }, + }, + { + date: { + date_histogram: { + field: "timestamp", + calendar_interval: "1d", + order: "desc", + }, + }, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4bba59cf745ac7b996bf90308bc26957.asciidoc b/docs/doc_examples/4bba59cf745ac7b996bf90308bc26957.asciidoc new file mode 100644 index 000000000..da7cbe01c --- /dev/null +++ b/docs/doc_examples/4bba59cf745ac7b996bf90308bc26957.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "file-path-test", + query: { + bool: { + must: { + match: { + file_path: "16", + }, + }, + filter: { + term: { + "file_path.tree": "/User/alice", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4bc4db44b8c74610b73f21a421099a13.asciidoc b/docs/doc_examples/4bc4db44b8c74610b73f21a421099a13.asciidoc new file mode 100644 index 000000000..9b8c6cf65 --- /dev/null +++ b/docs/doc_examples/4bc4db44b8c74610b73f21a421099a13.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.invalidateToken({ + realm_name: "saml1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4bc744b0f33b322741a8caf6d8d7d765.asciidoc b/docs/doc_examples/4bc744b0f33b322741a8caf6d8d7d765.asciidoc new file mode 100644 index 000000000..2e11299ce --- /dev/null +++ b/docs/doc_examples/4bc744b0f33b322741a8caf6d8d7d765.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + op_type: "create", + document: { + "@timestamp": "2099-11-15T13:12:00", + message: "GET /search HTTP/1.1 200 1070000", + user: { + id: "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4bd42e31ac4a5cf237777f1a0e97aba8.asciidoc b/docs/doc_examples/4bd42e31ac4a5cf237777f1a0e97aba8.asciidoc new file mode 100644 index 000000000..4bc587d0b --- /dev/null +++ b/docs/doc_examples/4bd42e31ac4a5cf237777f1a0e97aba8.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.startTransform({ + transform_id: "suspicious_client_ips", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4be07b34db282044c88d5021c7ea08ee.asciidoc b/docs/doc_examples/4be07b34db282044c88d5021c7ea08ee.asciidoc new file mode 100644 index 000000000..f48a5f5fb --- /dev/null +++ b/docs/doc_examples/4be07b34db282044c88d5021c7ea08ee.asciidoc @@ -0,0 +1,41 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index", + mappings: { + properties: { + my_vector: { + type: "dense_vector", + dims: 3, + }, + my_text: { + type: "keyword", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index", + id: 1, + document: { + my_text: "text1", + my_vector: [0.5, 10, 6], + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "my-index", + id: 2, + document: { + my_text: "text2", + my_vector: [-0.5, 10, 10], + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/4be20da16d2b58216e8b307218c7bf3a.asciidoc b/docs/doc_examples/4be20da16d2b58216e8b307218c7bf3a.asciidoc new file mode 100644 index 000000000..c1d20522b --- /dev/null +++ b/docs/doc_examples/4be20da16d2b58216e8b307218c7bf3a.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "my-data-stream-template", + index_patterns: ["my-data-stream*"], + data_stream: {}, + priority: 500, + template: { + mappings: { + properties: { + host: { + properties: { + ip: { + type: "ip", + ignore_malformed: true, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4bef98a2dac575a50ee0783c2269f1db.asciidoc b/docs/doc_examples/4bef98a2dac575a50ee0783c2269f1db.asciidoc new file mode 100644 index 000000000..23c616763 --- /dev/null +++ b/docs/doc_examples/4bef98a2dac575a50ee0783c2269f1db.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + text_embedding: { + type: "dense_vector", + dims: 384, + index_options: { + type: "flat", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4bf6bb703a52267379ae2b1e1308cf8b.asciidoc b/docs/doc_examples/4bf6bb703a52267379ae2b1e1308cf8b.asciidoc new file mode 100644 index 000000000..365539ce5 --- /dev/null +++ b/docs/doc_examples/4bf6bb703a52267379ae2b1e1308cf8b.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + bool: { + filter: { + script: { + script: { + source: "doc['num1'].value > params.param1", + lang: "painless", + params: { + param1: 5, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4bfcb2861f1d572bd0d66acd66deab0b.asciidoc b/docs/doc_examples/4bfcb2861f1d572bd0d66acd66deab0b.asciidoc new file mode 100644 index 000000000..9bacd56b7 --- /dev/null +++ b/docs/doc_examples/4bfcb2861f1d572bd0d66acd66deab0b.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.updateDatafeed({ + datafeed_id: "datafeed-test-job", + query: { + term: { + "geo.src": "US", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4c174e228b6b74497b73ef2be80de7ad.asciidoc b/docs/doc_examples/4c174e228b6b74497b73ef2be80de7ad.asciidoc new file mode 100644 index 000000000..8315e4886 --- /dev/null +++ b/docs/doc_examples/4c174e228b6b74497b73ef2be80de7ad.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.getTrainedModels(); +console.log(response); +---- diff --git a/docs/doc_examples/4c3db8987d7b2d3d3df78ff1e71e7ede.asciidoc b/docs/doc_examples/4c3db8987d7b2d3d3df78ff1e71e7ede.asciidoc new file mode 100644 index 000000000..cccd5f36f --- /dev/null +++ b/docs/doc_examples/4c3db8987d7b2d3d3df78ff1e71e7ede.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match: { + message: { + query: "this is a test", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4c5f0d7af287618062bb627b44ccb23e.asciidoc b/docs/doc_examples/4c5f0d7af287618062bb627b44ccb23e.asciidoc new file mode 100644 index 000000000..704b7dd74 --- /dev/null +++ b/docs/doc_examples/4c5f0d7af287618062bb627b44ccb23e.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.forcemerge({ + index: "my-index", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4c712bd5637892a11f16b8975a0a98ed.asciidoc b/docs/doc_examples/4c712bd5637892a11f16b8975a0a98ed.asciidoc new file mode 100644 index 000000000..aae92e25f --- /dev/null +++ b/docs/doc_examples/4c712bd5637892a11f16b8975a0a98ed.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.mlDataFrameAnalytics({ + v: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4c777b8360ef6c7671ae2e3803c0b0f6.asciidoc b/docs/doc_examples/4c777b8360ef6c7671ae2e3803c0b0f6.asciidoc new file mode 100644 index 000000000..7c466f200 --- /dev/null +++ b/docs/doc_examples/4c777b8360ef6c7671ae2e3803c0b0f6.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + top_tags: { + terms: { + field: "type", + size: 3, + }, + aggs: { + top_sales_hits: { + top_hits: { + sort: [ + { + date: { + order: "desc", + }, + }, + ], + _source: { + includes: ["date", "price"], + }, + size: 1, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4c77d12039fe2445c9251e33979071ac.asciidoc b/docs/doc_examples/4c77d12039fe2445c9251e33979071ac.asciidoc new file mode 100644 index 000000000..925f2fd98 --- /dev/null +++ b/docs/doc_examples/4c77d12039fe2445c9251e33979071ac.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "log-messages", + filter_path: "aggregations", + aggs: { + categories: { + categorize_text: { + field: "message", + categorization_filters: ["\\w+\\_\\d{3}"], + similarity_threshold: 11, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4c803b088c1915a7b0634d5cafabe606.asciidoc b/docs/doc_examples/4c803b088c1915a7b0634d5cafabe606.asciidoc new file mode 100644 index 000000000..420a3ae92 --- /dev/null +++ b/docs/doc_examples/4c803b088c1915a7b0634d5cafabe606.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "network-traffic", + size: 0, + aggs: { + "ipv4-subnets": { + ip_prefix: { + field: "ipv4", + prefix_length: 24, + keyed: true, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4c9350ed09b28f00e297ebe73c3b95a2.asciidoc b/docs/doc_examples/4c9350ed09b28f00e297ebe73c3b95a2.asciidoc new file mode 100644 index 000000000..2c8c7983b --- /dev/null +++ b/docs/doc_examples/4c9350ed09b28f00e297ebe73c3b95a2.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "text_embedding", + inference_id: "my-msmarco-minilm-model", + inference_config: { + service: "elasticsearch", + service_settings: { + num_allocations: 1, + num_threads: 1, + model_id: "msmarco-MiniLM-L12-cos-v5", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4c95d54b32df4dc49e9762b6c1ae2c05.asciidoc b/docs/doc_examples/4c95d54b32df4dc49e9762b6c1ae2c05.asciidoc new file mode 100644 index 000000000..bff837e2d --- /dev/null +++ b/docs/doc_examples/4c95d54b32df4dc49e9762b6c1ae2c05.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + tag: { + type: "text", + fielddata: true, + fielddata_frequency_filter: { + min: 0.001, + max: 0.1, + min_segment_size: 500, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4ca15672fc5ab1d80a127d086b6d2837.asciidoc b/docs/doc_examples/4ca15672fc5ab1d80a127d086b6d2837.asciidoc new file mode 100644 index 000000000..50706a679 --- /dev/null +++ b/docs/doc_examples/4ca15672fc5ab1d80a127d086b6d2837.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.allocationExplain(); +console.log(response); +---- diff --git a/docs/doc_examples/4ca5bc2c2b2f64d15b9c16370ae97a39.asciidoc b/docs/doc_examples/4ca5bc2c2b2f64d15b9c16370ae97a39.asciidoc new file mode 100644 index 000000000..4f7a524a1 --- /dev/null +++ b/docs/doc_examples/4ca5bc2c2b2f64d15b9c16370ae97a39.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "museums", + size: 0, + aggregations: { + "tiles-in-bounds": { + geohash_grid: { + field: "location", + precision: 8, + bounds: { + top_left: "POINT (4.21875 53.4375)", + bottom_right: "POINT (5.625 52.03125)", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4cb44556b8c699f43489b17b42ddd475.asciidoc b/docs/doc_examples/4cb44556b8c699f43489b17b42ddd475.asciidoc new file mode 100644 index 000000000..9d45d4014 --- /dev/null +++ b/docs/doc_examples/4cb44556b8c699f43489b17b42ddd475.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.mget({ + docs: [ + { + _index: "test", + _id: "1", + stored_fields: ["field1", "field2"], + }, + { + _index: "test", + _id: "2", + stored_fields: ["field3", "field4"], + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/4cd246e5c4c035a2cd4081ae9a3d54e5.asciidoc b/docs/doc_examples/4cd246e5c4c035a2cd4081ae9a3d54e5.asciidoc deleted file mode 100644 index 6ad69bca6..000000000 --- a/docs/doc_examples/4cd246e5c4c035a2cd4081ae9a3d54e5.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.update({ - index: 'test', - id: '1', - body: { - script: { - source: 'ctx._source.tags.add(params.tag)', - lang: 'painless', - params: { - tag: 'blue' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/4cd40113e0fc90c37976f28d7e4a2327.asciidoc b/docs/doc_examples/4cd40113e0fc90c37976f28d7e4a2327.asciidoc new file mode 100644 index 000000000..4ee26b047 --- /dev/null +++ b/docs/doc_examples/4cd40113e0fc90c37976f28d7e4a2327.asciidoc @@ -0,0 +1,81 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "index", + settings: { + analysis: { + normalizer: { + my_normalizer: { + type: "custom", + char_filter: [], + filter: ["lowercase", "asciifolding"], + }, + }, + }, + }, + mappings: { + properties: { + foo: { + type: "keyword", + normalizer: "my_normalizer", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "index", + id: 1, + document: { + foo: "BÀR", + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "index", + id: 2, + document: { + foo: "bar", + }, +}); +console.log(response2); + +const response3 = await client.index({ + index: "index", + id: 3, + document: { + foo: "baz", + }, +}); +console.log(response3); + +const response4 = await client.indices.refresh({ + index: "index", +}); +console.log(response4); + +const response5 = await client.search({ + index: "index", + query: { + term: { + foo: "BAR", + }, + }, +}); +console.log(response5); + +const response6 = await client.search({ + index: "index", + query: { + match: { + foo: "BAR", + }, + }, +}); +console.log(response6); +---- diff --git a/docs/doc_examples/4cdbd53f08df4bf66e2a47c0f1fcb3f8.asciidoc b/docs/doc_examples/4cdbd53f08df4bf66e2a47c0f1fcb3f8.asciidoc new file mode 100644 index 000000000..f61623475 --- /dev/null +++ b/docs/doc_examples/4cdbd53f08df4bf66e2a47c0f1fcb3f8.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.clearCache({ + index: "my-index-000001", + fields: "foo,bar", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4cdcc3fde5cea165a3a7567962b9bd61.asciidoc b/docs/doc_examples/4cdcc3fde5cea165a3a7567962b9bd61.asciidoc new file mode 100644 index 000000000..21cbecbe4 --- /dev/null +++ b/docs/doc_examples/4cdcc3fde5cea165a3a7567962b9bd61.asciidoc @@ -0,0 +1,64 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.synonyms.putSynonym({ + id: "my-synonyms-set", + synonyms_set: [ + { + id: "test-1", + synonyms: "hello, hi", + }, + ], +}); +console.log(response); + +const response1 = await client.indices.create({ + index: "test-index", + settings: { + analysis: { + filter: { + synonyms_filter: { + type: "synonym_graph", + synonyms_set: "my-synonyms-set", + updateable: true, + }, + }, + analyzer: { + my_index_analyzer: { + type: "custom", + tokenizer: "standard", + filter: ["lowercase"], + }, + my_search_analyzer: { + type: "custom", + tokenizer: "standard", + filter: ["lowercase", "synonyms_filter"], + }, + }, + }, + }, + mappings: { + properties: { + title: { + type: "text", + analyzer: "my_index_analyzer", + search_analyzer: "my_search_analyzer", + }, + }, + }, +}); +console.log(response1); + +const response2 = await client.synonyms.putSynonym({ + id: "my-synonyms-set", + synonyms_set: [ + { + id: "test-1", + synonyms: "hello, hi, howdy", + }, + ], +}); +console.log(response2); +---- diff --git a/docs/doc_examples/4ce4563e207233c48ffe849728052dca.asciidoc b/docs/doc_examples/4ce4563e207233c48ffe849728052dca.asciidoc new file mode 100644 index 000000000..02cf01978 --- /dev/null +++ b/docs/doc_examples/4ce4563e207233c48ffe849728052dca.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.rollover({ + alias: "logs-my_app-default", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4d21725453955582ff12b4a1104aa7b6.asciidoc b/docs/doc_examples/4d21725453955582ff12b4a1104aa7b6.asciidoc new file mode 100644 index 000000000..f220f30fb --- /dev/null +++ b/docs/doc_examples/4d21725453955582ff12b4a1104aa7b6.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.updateFilter({ + filter_id: "safe_domains", + description: "Updated list of domains", + add_items: ["*.myorg.com"], + remove_items: ["wikipedia.org"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/4d2e6eb7fea407deeb7a859c267fda62.asciidoc b/docs/doc_examples/4d2e6eb7fea407deeb7a859c267fda62.asciidoc new file mode 100644 index 000000000..6ff988112 --- /dev/null +++ b/docs/doc_examples/4d2e6eb7fea407deeb7a859c267fda62.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.rollup.putJob({ + id: "sensor", + index_pattern: "sensor-*", + rollup_index: "sensor_rollup", + cron: "*/30 * * * * ?", + page_size: 1000, + groups: { + date_histogram: { + field: "timestamp", + fixed_interval: "1h", + delay: "7d", + }, + terms: { + fields: ["node"], + }, + }, + metrics: [ + { + field: "temperature", + metrics: ["min", "max", "sum"], + }, + { + field: "voltage", + metrics: ["avg"], + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/4d46dbb96125b27f46299547de9d8709.asciidoc b/docs/doc_examples/4d46dbb96125b27f46299547de9d8709.asciidoc deleted file mode 100644 index f37274336..000000000 --- a/docs/doc_examples/4d46dbb96125b27f46299547de9d8709.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'test', - body: { - settings: { - 'index.write.wait_for_active_shards': '2' - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/4d46e2160784bdf7cce948e9f0d31fc8.asciidoc b/docs/doc_examples/4d46e2160784bdf7cce948e9f0d31fc8.asciidoc new file mode 100644 index 000000000..df784c1c8 --- /dev/null +++ b/docs/doc_examples/4d46e2160784bdf7cce948e9f0d31fc8.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "keyword", + filter: ["my_custom_word_delimiter_graph_filter"], + }, + }, + filter: { + my_custom_word_delimiter_graph_filter: { + type: "word_delimiter_graph", + type_table: ["- => ALPHA"], + split_on_case_change: false, + split_on_numerics: false, + stem_english_possessive: true, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4d56b179242fed59e3d6476f817b6055.asciidoc b/docs/doc_examples/4d56b179242fed59e3d6476f817b6055.asciidoc deleted file mode 100644 index 88b67c658..000000000 --- a/docs/doc_examples/4d56b179242fed59e3d6476f817b6055.asciidoc +++ /dev/null @@ -1,24 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'test', - body: { - aliases: { - alias_1: {}, - alias_2: { - filter: { - term: { - user: 'kimchy' - } - }, - routing: 'kimchy' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/4d6997c70a1851f9151443c0d38b532e.asciidoc b/docs/doc_examples/4d6997c70a1851f9151443c0d38b532e.asciidoc deleted file mode 100644 index e14cc77bd..000000000 --- a/docs/doc_examples/4d6997c70a1851f9151443c0d38b532e.asciidoc +++ /dev/null @@ -1,55 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response0 = await client.index({ - index: 'my_index', - id: '1', - body: { - message: 'some arrays in this document...', - tags: [ - 'elasticsearch', - 'wow' - ], - lists: [ - { - name: 'prog_list', - description: 'programming list' - }, - { - name: 'cool_list', - description: 'cool stuff list' - } - ] - } -}) -console.log(response0) - -const response1 = await client.index({ - index: 'my_index', - id: '2', - body: { - message: 'no arrays in this document...', - tags: 'elasticsearch', - lists: { - name: 'prog_list', - description: 'programming list' - } - } -}) -console.log(response1) - -const response2 = await client.search({ - index: 'my_index', - body: { - query: { - match: { - tags: 'elasticsearch' - } - } - } -}) -console.log(response2) ----- - diff --git a/docs/doc_examples/4d7c0b52d3c0a084157428624c543c90.asciidoc b/docs/doc_examples/4d7c0b52d3c0a084157428624c543c90.asciidoc new file mode 100644 index 000000000..e54879982 --- /dev/null +++ b/docs/doc_examples/4d7c0b52d3c0a084157428624c543c90.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.info(); +console.log(response); +---- diff --git a/docs/doc_examples/4da0cb8693e9ceceee2ba3b558014bbf.asciidoc b/docs/doc_examples/4da0cb8693e9ceceee2ba3b558014bbf.asciidoc new file mode 100644 index 000000000..effb6d63d --- /dev/null +++ b/docs/doc_examples/4da0cb8693e9ceceee2ba3b558014bbf.asciidoc @@ -0,0 +1,38 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.updateByQuery({ + index: "INDEX_NAME", + conflicts: "proceed", + query: { + bool: { + filter: [ + { + match: { + object_type: "drive_item", + }, + }, + { + exists: { + field: "file", + }, + }, + { + range: { + lastModifiedDateTime: { + lte: "now-180d", + }, + }, + }, + ], + }, + }, + script: { + source: "ctx._source.body = ''", + lang: "painless", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4dc151eebefd484a28aed1a175743364.asciidoc b/docs/doc_examples/4dc151eebefd484a28aed1a175743364.asciidoc new file mode 100644 index 000000000..f7f5c27d2 --- /dev/null +++ b/docs/doc_examples/4dc151eebefd484a28aed1a175743364.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "openai_embeddings_pipeline", + processors: [ + { + inference: { + model_id: "openai_embeddings", + input_output: { + input_field: "content", + output_field: "content_embedding", + }, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/4de4bb55bbc0a76c75d256f245a3ee3f.asciidoc b/docs/doc_examples/4de4bb55bbc0a76c75d256f245a3ee3f.asciidoc new file mode 100644 index 000000000..22100f235 --- /dev/null +++ b/docs/doc_examples/4de4bb55bbc0a76c75d256f245a3ee3f.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "sparse_embedding", + inference_id: "elser-model-eis", + inference_config: { + service: "elastic", + service_settings: { + model_name: "elser", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4ded8ad815ac0e83b1c21a6c18fd0763.asciidoc b/docs/doc_examples/4ded8ad815ac0e83b1c21a6c18fd0763.asciidoc new file mode 100644 index 000000000..df0d19f50 --- /dev/null +++ b/docs/doc_examples/4ded8ad815ac0e83b1c21a6c18fd0763.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.startTransform({ + transform_id: "ecommerce-customer-transform", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4e1f02928ef243bf07fd425754b7642b.asciidoc b/docs/doc_examples/4e1f02928ef243bf07fd425754b7642b.asciidoc new file mode 100644 index 000000000..c49a47833 --- /dev/null +++ b/docs/doc_examples/4e1f02928ef243bf07fd425754b7642b.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.postVotingConfigExclusions({ + node_names: "node_name", +}); +console.log(response); + +const response1 = await client.cluster.postVotingConfigExclusions({ + node_names: "node_name", + timeout: "1m", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/4e2317aa45e87922d07c8ddc67a82d32.asciidoc b/docs/doc_examples/4e2317aa45e87922d07c8ddc67a82d32.asciidoc new file mode 100644 index 000000000..fe78fcfc0 --- /dev/null +++ b/docs/doc_examples/4e2317aa45e87922d07c8ddc67a82d32.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "my_tokenizer", + }, + }, + tokenizer: { + my_tokenizer: { + type: "path_hierarchy", + delimiter: "-", + replacement: "/", + skip: 2, + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.analyze({ + index: "my-index-000001", + analyzer: "my_analyzer", + text: "one-two-three-four-five", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/4e3414fc712b16311f9e433dd366f49d.asciidoc b/docs/doc_examples/4e3414fc712b16311f9e433dd366f49d.asciidoc new file mode 100644 index 000000000..e9274320d --- /dev/null +++ b/docs/doc_examples/4e3414fc712b16311f9e433dd366f49d.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.delete({ + task_type: "sparse_embedding", + inference_id: "my-elser-model", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4e4608ae4ce93c27bd174a9ea078cab2.asciidoc b/docs/doc_examples/4e4608ae4ce93c27bd174a9ea078cab2.asciidoc new file mode 100644 index 000000000..72581c41b --- /dev/null +++ b/docs/doc_examples/4e4608ae4ce93c27bd174a9ea078cab2.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + retriever: { + rrf: { + retrievers: [ + { + standard: { + query: { + match: { + my_text_field: "the query string", + }, + }, + }, + }, + { + standard: { + query: { + sparse_vector: { + field: "my_tokens", + inference_id: "my-elser-endpoint", + query: "the query string", + }, + }, + }, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4e50d9d25bfb07ac73e3a2be5d2fbbf7.asciidoc b/docs/doc_examples/4e50d9d25bfb07ac73e3a2be5d2fbbf7.asciidoc new file mode 100644 index 000000000..782e49383 --- /dev/null +++ b/docs/doc_examples/4e50d9d25bfb07ac73e3a2be5d2fbbf7.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 10000, + query: { + match: { + "user.id": "elkbee", + }, + }, + pit: { + id: "46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA==", + keep_alive: "1m", + }, + sort: [ + { + "@timestamp": { + order: "asc", + format: "strict_date_optional_time_nanos", + }, + }, + { + _shard_doc: "desc", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/4e5f7a97efdbf517f7a2ed6ef7ff469c.asciidoc b/docs/doc_examples/4e5f7a97efdbf517f7a2ed6ef7ff469c.asciidoc new file mode 100644 index 000000000..99ec5cffe --- /dev/null +++ b/docs/doc_examples/4e5f7a97efdbf517f7a2ed6ef7ff469c.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.renderSearchTemplate({ + source: '{ "query": { "terms": { "tags": {{#toJson}}tags{{/toJson}} }}}', + params: { + tags: ["prod", "es01"], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4e6b78ac991ed2d5f9a2e7c89f4fc471.asciidoc b/docs/doc_examples/4e6b78ac991ed2d5f9a2e7c89f4fc471.asciidoc new file mode 100644 index 000000000..38d7d2790 --- /dev/null +++ b/docs/doc_examples/4e6b78ac991ed2d5f9a2e7c89f4fc471.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "music", + pretty: "true", + suggest: { + "song-suggest": { + prefix: "nir", + completion: { + field: "suggest", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4e926063a9494b563387617b08c4f232.asciidoc b/docs/doc_examples/4e926063a9494b563387617b08c4f232.asciidoc new file mode 100644 index 000000000..9aca6a082 --- /dev/null +++ b/docs/doc_examples/4e926063a9494b563387617b08c4f232.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.get({ + repository: "my_repository", + snapshot: "*", + verbose: "false", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4e931cfac74e46e221cf4a9ab88a182d.asciidoc b/docs/doc_examples/4e931cfac74e46e221cf4a9ab88a182d.asciidoc new file mode 100644 index 000000000..38c12bda0 --- /dev/null +++ b/docs/doc_examples/4e931cfac74e46e221cf4a9ab88a182d.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.fieldCaps({ + fields: "rating,title", + include_unmapped: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4ed946065faa92f9950f04e402676a97.asciidoc b/docs/doc_examples/4ed946065faa92f9950f04e402676a97.asciidoc new file mode 100644 index 000000000..be96e1741 --- /dev/null +++ b/docs/doc_examples/4ed946065faa92f9950f04e402676a97.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.xpack.info({ + human: "false", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4edfb5934d14ad7655bd7e19a112b5c0.asciidoc b/docs/doc_examples/4edfb5934d14ad7655bd7e19a112b5c0.asciidoc new file mode 100644 index 000000000..3bce99ecc --- /dev/null +++ b/docs/doc_examples/4edfb5934d14ad7655bd7e19a112b5c0.asciidoc @@ -0,0 +1,55 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "cooking_blog", + query: { + bool: { + must: [ + { + term: { + tags: "vegetarian", + }, + }, + { + range: { + rating: { + gte: 4.5, + }, + }, + }, + ], + should: [ + { + term: { + category: "Main Course", + }, + }, + { + multi_match: { + query: "curry spicy", + fields: ["title^2", "description"], + }, + }, + { + range: { + date: { + gte: "now-1M/d", + }, + }, + }, + ], + must_not: [ + { + term: { + "category.keyword": "Dessert", + }, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4ee31fd4ea6d18f32ec28b7fa433441d.asciidoc b/docs/doc_examples/4ee31fd4ea6d18f32ec28b7fa433441d.asciidoc new file mode 100644 index 000000000..a98b7cbf1 --- /dev/null +++ b/docs/doc_examples/4ee31fd4ea6d18f32ec28b7fa433441d.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putPrivileges({ + privileges: { + myapp: { + read: { + actions: ["data:read/*", "action:login"], + metadata: { + description: "Read access to myapp", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4eeded40f30949e359714a5bb6c88612.asciidoc b/docs/doc_examples/4eeded40f30949e359714a5bb6c88612.asciidoc new file mode 100644 index 000000000..435c2eaff --- /dev/null +++ b/docs/doc_examples/4eeded40f30949e359714a5bb6c88612.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + wait_for_completion: "false", + source: { + index: "test-data", + size: 50, + }, + dest: { + index: "elser-embeddings", + pipeline: "elser_embeddings_pipeline", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4f08d9e21d9f199acc77abfb83287878.asciidoc b/docs/doc_examples/4f08d9e21d9f199acc77abfb83287878.asciidoc new file mode 100644 index 000000000..d3563e31a --- /dev/null +++ b/docs/doc_examples/4f08d9e21d9f199acc77abfb83287878.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.search({ + name: "my-app", + params: { + query_string: "my first query", + text_fields: [ + { + name: "title", + boost: 5, + }, + { + name: "description", + boost: 1, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4f140d8922efdf3420e41b1cb669a289.asciidoc b/docs/doc_examples/4f140d8922efdf3420e41b1cb669a289.asciidoc new file mode 100644 index 000000000..2f545ce1a --- /dev/null +++ b/docs/doc_examples/4f140d8922efdf3420e41b1cb669a289.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.deleteComponentTemplate({ + name: "template_1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4f1e1205154d280db21fbd2754ed5398.asciidoc b/docs/doc_examples/4f1e1205154d280db21fbd2754ed5398.asciidoc new file mode 100644 index 000000000..a944b2044 --- /dev/null +++ b/docs/doc_examples/4f1e1205154d280db21fbd2754ed5398.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "stats-index", + mappings: { + properties: { + agg_metric: { + type: "aggregate_metric_double", + metrics: ["min", "max", "sum", "value_count"], + default_metric: "max", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4f3366fc26e7ea4de446dfa5cdec9683.asciidoc b/docs/doc_examples/4f3366fc26e7ea4de446dfa5cdec9683.asciidoc new file mode 100644 index 000000000..7fc5cd674 --- /dev/null +++ b/docs/doc_examples/4f3366fc26e7ea4de446dfa5cdec9683.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + function_score: { + gauss: { + "@timestamp": { + origin: "2013-09-17", + scale: "10d", + offset: "5d", + decay: 0.5, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4f621ab694f62ddb89e0684a9e76c4d1.asciidoc b/docs/doc_examples/4f621ab694f62ddb89e0684a9e76c4d1.asciidoc new file mode 100644 index 000000000..7effcb6e3 --- /dev/null +++ b/docs/doc_examples/4f621ab694f62ddb89e0684a9e76c4d1.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match: { + "user.id": "kimchy", + }, + }, + highlight: { + fields: { + comment: { + fragment_size: 150, + number_of_fragments: 3, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4f666d710758578e2582850dac3ad144.asciidoc b/docs/doc_examples/4f666d710758578e2582850dac3ad144.asciidoc new file mode 100644 index 000000000..1b9fc2667 --- /dev/null +++ b/docs/doc_examples/4f666d710758578e2582850dac3ad144.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getUserProfile({ + uid: "u_P_0BMHgaOK3p7k-PFWUCbw9dQ-UFjt01oWJ_Dp2PmPc_0", + data: "*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4f6694ef147a73b1163bde3c13779d26.asciidoc b/docs/doc_examples/4f6694ef147a73b1163bde3c13779d26.asciidoc new file mode 100644 index 000000000..0793b4e08 --- /dev/null +++ b/docs/doc_examples/4f6694ef147a73b1163bde3c13779d26.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.stats({ + human: "true", + filter_path: "nodes.*.indexing_pressure", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4f67b5f5c040f611bd2560a5d38ea6f5.asciidoc b/docs/doc_examples/4f67b5f5c040f611bd2560a5d38ea6f5.asciidoc new file mode 100644 index 000000000..38e0e91b1 --- /dev/null +++ b/docs/doc_examples/4f67b5f5c040f611bd2560a5d38ea6f5.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + genres: { + rare_terms: { + field: "genre", + missing: "N/A", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4f8a4ad49e2bca6784c88ede18a1a709.asciidoc b/docs/doc_examples/4f8a4ad49e2bca6784c88ede18a1a709.asciidoc new file mode 100644 index 000000000..4031829ae --- /dev/null +++ b/docs/doc_examples/4f8a4ad49e2bca6784c88ede18a1a709.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.license.delete(); +console.log(response); +---- diff --git a/docs/doc_examples/4fa9ee04188cbf0b38cfc28f6a56527d.asciidoc b/docs/doc_examples/4fa9ee04188cbf0b38cfc28f6a56527d.asciidoc new file mode 100644 index 000000000..cec0a6911 --- /dev/null +++ b/docs/doc_examples/4fa9ee04188cbf0b38cfc28f6a56527d.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.getDatafeeds({ + datafeed_id: "datafeed-high_sum_total_sales", +}); +console.log(response); +---- diff --git a/docs/doc_examples/4fb0629146ca78b85e823edd405497bb.asciidoc b/docs/doc_examples/4fb0629146ca78b85e823edd405497bb.asciidoc new file mode 100644 index 000000000..92b9fe5f2 --- /dev/null +++ b/docs/doc_examples/4fb0629146ca78b85e823edd405497bb.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.putDataFrameAnalytics({ + id: "loan_classification", + source: { + index: "loan-applicants", + }, + dest: { + index: "loan-applicants-classified", + }, + analysis: { + classification: { + dependent_variable: "label", + training_percent: 75, + num_top_classes: 2, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4fcca1687d7b2cf08de526539fea5a76.asciidoc b/docs/doc_examples/4fcca1687d7b2cf08de526539fea5a76.asciidoc new file mode 100644 index 000000000..a1fd27e13 --- /dev/null +++ b/docs/doc_examples/4fcca1687d7b2cf08de526539fea5a76.asciidoc @@ -0,0 +1,41 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + query: { + bool: { + should: [ + { + text_expansion: { + "ml.inference.title_expanded.predicted_value": { + model_id: ".elser_model_2", + model_text: "How is the weather in Jamaica?", + boost: 1, + }, + }, + }, + { + text_expansion: { + "ml.inference.description_expanded.predicted_value": { + model_id: ".elser_model_2", + model_text: "How is the weather in Jamaica?", + boost: 1, + }, + }, + }, + { + multi_match: { + query: "How is the weather in Jamaica?", + fields: ["title", "description"], + boost: 4, + }, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/4ff2dcec03fe097075cf1d174a019a1f.asciidoc b/docs/doc_examples/4ff2dcec03fe097075cf1d174a019a1f.asciidoc new file mode 100644 index 000000000..edee66d52 --- /dev/null +++ b/docs/doc_examples/4ff2dcec03fe097075cf1d174a019a1f.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + match_phrase: { + message: "number 1", + }, + }, + highlight: { + fields: { + message: { + type: "plain", + fragment_size: 15, + number_of_fragments: 3, + fragmenter: "simple", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/50096ee0ca53fe8a88450ebb2a50f285.asciidoc b/docs/doc_examples/50096ee0ca53fe8a88450ebb2a50f285.asciidoc new file mode 100644 index 000000000..ee1e99bc4 --- /dev/null +++ b/docs/doc_examples/50096ee0ca53fe8a88450ebb2a50f285.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.query({ + format: "csv", + delimiter: ";", + query: "SELECT * FROM library ORDER BY page_count DESC", + fetch_size: 5, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5024c524a7db0d6bb44c1820007cc5f4.asciidoc b/docs/doc_examples/5024c524a7db0d6bb44c1820007cc5f4.asciidoc new file mode 100644 index 000000000..dfb8e219d --- /dev/null +++ b/docs/doc_examples/5024c524a7db0d6bb44c1820007cc5f4.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.simulate({ + pipeline: { + description: "...", + processors: [ + { + grok: { + field: "message", + patterns: [ + "%{IP:client} %{WORD:method} %{URIPATHPARAM:request} %{NUMBER:bytes:int} %{NUMBER:duration:double}", + ], + }, + }, + ], + }, + docs: [ + { + _source: { + message: "55.3.244.1 GET /index.html 15824 0.043", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/5043b83a89091fa00edb341ddf7ba370.asciidoc b/docs/doc_examples/5043b83a89091fa00edb341ddf7ba370.asciidoc deleted file mode 100644 index a94b20183..000000000 --- a/docs/doc_examples/5043b83a89091fa00edb341ddf7ba370.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - match: { - message: { - query: 'this is a testt', - fuzziness: 'AUTO' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/50522d3d5b3d055f712ad737e3d1707a.asciidoc b/docs/doc_examples/50522d3d5b3d055f712ad737e3d1707a.asciidoc new file mode 100644 index 000000000..c0ef20fe2 --- /dev/null +++ b/docs/doc_examples/50522d3d5b3d055f712ad737e3d1707a.asciidoc @@ -0,0 +1,51 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + name: { + type: "text", + fields: { + length: { + type: "token_count", + analyzer: "standard", + }, + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + name: "John Smith", + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "my-index-000001", + id: 2, + document: { + name: "Rachel Alice Williams", + }, +}); +console.log(response2); + +const response3 = await client.search({ + index: "my-index-000001", + query: { + term: { + "name.length": 3, + }, + }, +}); +console.log(response3); +---- diff --git a/docs/doc_examples/505a6c21a4cb608d3662fab1a35eb6df.asciidoc b/docs/doc_examples/505a6c21a4cb608d3662fab1a35eb6df.asciidoc new file mode 100644 index 000000000..ed78e8d92 --- /dev/null +++ b/docs/doc_examples/505a6c21a4cb608d3662fab1a35eb6df.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my_index", + id: 1, + document: { + my_text: "histogram_1", + my_histogram: { + values: [0.1, 0.2, 0.3, 0.4, 0.5], + counts: [3, 7, 23, 12, 6], + }, + _doc_count: 45, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my_index", + id: 2, + document: { + my_text: "histogram_2", + my_histogram: { + values: [0.1, 0.25, 0.35, 0.4, 0.45, 0.5], + counts: [8, 17, 8, 7, 6, 2], + }, + _doc_count: 62, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/506844befdc5691d835771bcbb1c1a60.asciidoc b/docs/doc_examples/506844befdc5691d835771bcbb1c1a60.asciidoc deleted file mode 100644 index 2c64f07b3..000000000 --- a/docs/doc_examples/506844befdc5691d835771bcbb1c1a60.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'bank', - body: { - query: { - match_all: {} - }, - sort: [ - { - account_number: 'asc' - } - ] - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/50764f4ea88079156b0aff2835bcdc45.asciidoc b/docs/doc_examples/50764f4ea88079156b0aff2835bcdc45.asciidoc new file mode 100644 index 000000000..790614490 --- /dev/null +++ b/docs/doc_examples/50764f4ea88079156b0aff2835bcdc45.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.state({ + metric: "metadata", + pretty: "true", + filter_path: "metadata.stored_scripts", +}); +console.log(response); +---- diff --git a/docs/doc_examples/5093bfd281dbe41bd0dba8ff979e6e47.asciidoc b/docs/doc_examples/5093bfd281dbe41bd0dba8ff979e6e47.asciidoc new file mode 100644 index 000000000..c5ebc07bd --- /dev/null +++ b/docs/doc_examples/5093bfd281dbe41bd0dba8ff979e6e47.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.getScript({ + id: "my-stored-script", +}); +console.log(response); +---- diff --git a/docs/doc_examples/50a9623c153cabe64101efb633e10e6c.asciidoc b/docs/doc_examples/50a9623c153cabe64101efb633e10e6c.asciidoc new file mode 100644 index 000000000..3df88c27c --- /dev/null +++ b/docs/doc_examples/50a9623c153cabe64101efb633e10e6c.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.autoscaling.deleteAutoscalingPolicy({ + name: "", +}); +console.log(response); +---- diff --git a/docs/doc_examples/50b5c0332949d2154c72b629b5fa6222.asciidoc b/docs/doc_examples/50b5c0332949d2154c72b629b5fa6222.asciidoc new file mode 100644 index 000000000..9b53504df --- /dev/null +++ b/docs/doc_examples/50b5c0332949d2154c72b629b5fa6222.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + refresh: "wait_for", + document: { + user_id: 12345, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + refresh: "wait_for", + document: { + user_id: 12346, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/50c2b06ecddb5a4aebd8b78e38af5f1f.asciidoc b/docs/doc_examples/50c2b06ecddb5a4aebd8b78e38af5f1f.asciidoc new file mode 100644 index 000000000..d15963c65 --- /dev/null +++ b/docs/doc_examples/50c2b06ecddb5a4aebd8b78e38af5f1f.asciidoc @@ -0,0 +1,54 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my-lifecycle-policy", + policy: { + phases: { + hot: { + actions: { + rollover: { + max_primary_shard_size: "50gb", + }, + }, + }, + warm: { + min_age: "30d", + actions: { + shrink: { + number_of_shards: 1, + }, + forcemerge: { + max_num_segments: 1, + }, + }, + }, + cold: { + min_age: "60d", + actions: { + searchable_snapshot: { + snapshot_repository: "found-snapshots", + }, + }, + }, + frozen: { + min_age: "90d", + actions: { + searchable_snapshot: { + snapshot_repository: "found-snapshots", + }, + }, + }, + delete: { + min_age: "735d", + actions: { + delete: {}, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/50c2cea2adbe9523458c2686ab11df54.asciidoc b/docs/doc_examples/50c2cea2adbe9523458c2686ab11df54.asciidoc new file mode 100644 index 000000000..a049dc794 --- /dev/null +++ b/docs/doc_examples/50c2cea2adbe9523458c2686ab11df54.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "text_payloads", + mappings: { + properties: { + text: { + type: "text", + term_vector: "with_positions_payloads", + analyzer: "payload_delimiter", + }, + }, + }, + settings: { + analysis: { + analyzer: { + payload_delimiter: { + tokenizer: "whitespace", + filter: ["delimited_payload"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/50d5c5b7e8ed9a95b8d9a25a32a77425.asciidoc b/docs/doc_examples/50d5c5b7e8ed9a95b8d9a25a32a77425.asciidoc new file mode 100644 index 000000000..5c66601f8 --- /dev/null +++ b/docs/doc_examples/50d5c5b7e8ed9a95b8d9a25a32a77425.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "whitespace", + filter: ["unique"], + text: "the quick fox jumps the lazy fox", +}); +console.log(response); +---- diff --git a/docs/doc_examples/50d9c0508ddb0fc5ba5a893eec219dd8.asciidoc b/docs/doc_examples/50d9c0508ddb0fc5ba5a893eec219dd8.asciidoc new file mode 100644 index 000000000..4cee63e13 --- /dev/null +++ b/docs/doc_examples/50d9c0508ddb0fc5ba5a893eec219dd8.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "idx", + id: 1, + document: { + "foo.bar.baz": 1, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/50dc35d3d8705bd62aed20a15209476c.asciidoc b/docs/doc_examples/50dc35d3d8705bd62aed20a15209476c.asciidoc new file mode 100644 index 000000000..7d0323216 --- /dev/null +++ b/docs/doc_examples/50dc35d3d8705bd62aed20a15209476c.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "mapping9", + rules: { + field: { + "realm.name": "cloud-saml", + }, + }, + role_templates: [ + { + template: { + source: "saml_user", + }, + }, + { + template: { + source: "_user_{{username}}", + }, + }, + ], + enabled: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/50f922e9f002d8ac570953be59414b7b.asciidoc b/docs/doc_examples/50f922e9f002d8ac570953be59414b7b.asciidoc new file mode 100644 index 000000000..8f508f633 --- /dev/null +++ b/docs/doc_examples/50f922e9f002d8ac570953be59414b7b.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + combined_fields: { + query: "database systems", + fields: ["title", "abstract"], + operator: "and", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/511e5bb8ab881171b7e8629095e30b85.asciidoc b/docs/doc_examples/511e5bb8ab881171b7e8629095e30b85.asciidoc new file mode 100644 index 000000000..0871eea9c --- /dev/null +++ b/docs/doc_examples/511e5bb8ab881171b7e8629095e30b85.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "datastream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/515e1104d136082e826d1b32af011759.asciidoc b/docs/doc_examples/515e1104d136082e826d1b32af011759.asciidoc new file mode 100644 index 000000000..af042e8f2 --- /dev/null +++ b/docs/doc_examples/515e1104d136082e826d1b32af011759.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "products", + id: 0, + refresh: "true", + document: { + name: "LED TV", + resellers: [ + { + reseller: "companyA", + price: 350, + }, + { + reseller: "companyB", + price: 500, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5174c3c731fc1703e5b43ae2bae7a80e.asciidoc b/docs/doc_examples/5174c3c731fc1703e5b43ae2bae7a80e.asciidoc new file mode 100644 index 000000000..115cd3757 --- /dev/null +++ b/docs/doc_examples/5174c3c731fc1703e5b43ae2bae7a80e.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.clearCursor({ + cursor: + "sDXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAAEWYUpOYklQMHhRUEtld3RsNnFtYU1hQQ==:BAFmBGRhdGUBZgVsaWtlcwFzB21lc3NhZ2UBZgR1c2Vy9f///w8=", +}); +console.log(response); +---- diff --git a/docs/doc_examples/518fcf1dc1edd7dba0864accf71b49f4.asciidoc b/docs/doc_examples/518fcf1dc1edd7dba0864accf71b49f4.asciidoc new file mode 100644 index 000000000..fc8e655af --- /dev/null +++ b/docs/doc_examples/518fcf1dc1edd7dba0864accf71b49f4.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + preference: "_local", + query: { + match: { + "user.id": "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5195a88194f7a139c635a84398d76205.asciidoc b/docs/doc_examples/5195a88194f7a139c635a84398d76205.asciidoc new file mode 100644 index 000000000..567ff6501 --- /dev/null +++ b/docs/doc_examples/5195a88194f7a139c635a84398d76205.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.restore({ + repository: "my_repository", + snapshot: "my_snapshot", +}); +console.log(response); +---- diff --git a/docs/doc_examples/519e46350316a33162740e5d7968aa2c.asciidoc b/docs/doc_examples/519e46350316a33162740e5d7968aa2c.asciidoc new file mode 100644 index 000000000..3c92986f6 --- /dev/null +++ b/docs/doc_examples/519e46350316a33162740e5d7968aa2c.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "image-index", + knn: { + field: "image-vector", + query_vector: [-5, 9, -12], + k: 10, + num_candidates: 100, + rescore_vector: { + oversample: 2, + }, + }, + fields: ["title", "file-type"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/51b40610ae05730b4c6afd25647d7ae0.asciidoc b/docs/doc_examples/51b40610ae05730b4c6afd25647d7ae0.asciidoc new file mode 100644 index 000000000..e17edea6d --- /dev/null +++ b/docs/doc_examples/51b40610ae05730b4c6afd25647d7ae0.asciidoc @@ -0,0 +1,40 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + refresh: "true", + document: { + date: "2015-10-01T05:30:00Z", + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 2, + refresh: "true", + document: { + date: "2015-10-01T06:30:00Z", + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + size: 0, + aggs: { + by_day: { + date_histogram: { + field: "date", + calendar_interval: "day", + offset: "+6h", + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/51b44224feee6e2e5974824334474c77.asciidoc b/docs/doc_examples/51b44224feee6e2e5974824334474c77.asciidoc new file mode 100644 index 000000000..ac951d7c0 --- /dev/null +++ b/docs/doc_examples/51b44224feee6e2e5974824334474c77.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.createRepository({ + name: "my_s3_repository", + repository: { + type: "s3", + settings: { + client: "my-client", + bucket: "my-bucket", + endpoint: "my.s3.endpoint", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/51f1a0930362594b231a5bcc17673768.asciidoc b/docs/doc_examples/51f1a0930362594b231a5bcc17673768.asciidoc new file mode 100644 index 000000000..126095322 --- /dev/null +++ b/docs/doc_examples/51f1a0930362594b231a5bcc17673768.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.modifyDataStream({ + actions: [ + { + remove_backing_index: { + data_stream: "my-logs", + index: ".ds-my-logs-2099.01.01-000001", + }, + }, + { + add_backing_index: { + data_stream: "my-logs", + index: "index-to-add", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/51f6cb682424e110f289af79c106f4c7.asciidoc b/docs/doc_examples/51f6cb682424e110f289af79c106f4c7.asciidoc new file mode 100644 index 000000000..236694db7 --- /dev/null +++ b/docs/doc_examples/51f6cb682424e110f289af79c106f4c7.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "cluster.max_shards_per_node.frozen": 3200, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/527324766814561b75aaee853ede49a7.asciidoc b/docs/doc_examples/527324766814561b75aaee853ede49a7.asciidoc deleted file mode 100644 index e3fc030b2..000000000 --- a/docs/doc_examples/527324766814561b75aaee853ede49a7.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - aggs: { - tags: { - terms: { - field: 'tags', - min_doc_count: 10 - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/5275842787967b6db876025f4a1c6942.asciidoc b/docs/doc_examples/5275842787967b6db876025f4a1c6942.asciidoc new file mode 100644 index 000000000..d2ccfd01b --- /dev/null +++ b/docs/doc_examples/5275842787967b6db876025f4a1c6942.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + suggest: { + text: "tring out Elasticsearch", + "my-suggest-1": { + term: { + field: "message", + }, + }, + "my-suggest-2": { + term: { + field: "user", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5276a831513623e43ed567eb52b6dba9.asciidoc b/docs/doc_examples/5276a831513623e43ed567eb52b6dba9.asciidoc new file mode 100644 index 000000000..8b4199dbd --- /dev/null +++ b/docs/doc_examples/5276a831513623e43ed567eb52b6dba9.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + routing: "my-routing-value", + document: { + "@timestamp": "2099-11-15T13:12:00", + message: "GET /search HTTP/1.1 200 1070000", + user: { + id: "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/528e5f1c345c3769248cc6889e8cf552.asciidoc b/docs/doc_examples/528e5f1c345c3769248cc6889e8cf552.asciidoc new file mode 100644 index 000000000..7b49f32ca --- /dev/null +++ b/docs/doc_examples/528e5f1c345c3769248cc6889e8cf552.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "index", + properties: { + title: { + type: "text", + similarity: "my_similarity", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/529671ffaf7cc75fe83a81d729788be4.asciidoc b/docs/doc_examples/529671ffaf7cc75fe83a81d729788be4.asciidoc new file mode 100644 index 000000000..c493ead5a --- /dev/null +++ b/docs/doc_examples/529671ffaf7cc75fe83a81d729788be4.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.update({ + index: ".elastic-connectors", + id: "connector_id", + doc: { + configuration: { + field_a: { + type: "str", + value: "", + }, + field_b: { + type: "bool", + value: false, + }, + field_c: { + type: "int", + value: 1, + }, + field_d: { + type: "list", + value: "a,b", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/529b975b7cedaac58dce9821956adc37.asciidoc b/docs/doc_examples/529b975b7cedaac58dce9821956adc37.asciidoc new file mode 100644 index 000000000..91d0a569e --- /dev/null +++ b/docs/doc_examples/529b975b7cedaac58dce9821956adc37.asciidoc @@ -0,0 +1,42 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: { + type: "MultiPolygon", + coordinates: [ + [ + [ + [102, 2], + [103, 2], + [103, 3], + [102, 3], + [102, 2], + ], + ], + [ + [ + [100, 0], + [101, 0], + [101, 1], + [100, 1], + [100, 0], + ], + [ + [100.2, 0.2], + [100.8, 0.2], + [100.8, 0.8], + [100.2, 0.8], + [100.2, 0.2], + ], + ], + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/52a2d119addb15366a935115518335fd.asciidoc b/docs/doc_examples/52a2d119addb15366a935115518335fd.asciidoc new file mode 100644 index 000000000..45074d6c2 --- /dev/null +++ b/docs/doc_examples/52a2d119addb15366a935115518335fd.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "my_source_index", + settings: { + settings: { + "index.number_of_replicas": 0, + "index.routing.allocation.require._name": "shrink_node_name", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/52b2bfbdd78f8283b6f4891c48013237.asciidoc b/docs/doc_examples/52b2bfbdd78f8283b6f4891c48013237.asciidoc deleted file mode 100644 index a87ac5363..000000000 --- a/docs/doc_examples/52b2bfbdd78f8283b6f4891c48013237.asciidoc +++ /dev/null @@ -1,19 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.reindex({ - body: { - max_docs: 1, - source: { - index: 'twitter' - }, - dest: { - index: 'new_twitter' - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/52b71aa4ae6563abae78cd20ff06d1e9.asciidoc b/docs/doc_examples/52b71aa4ae6563abae78cd20ff06d1e9.asciidoc new file mode 100644 index 000000000..7038db1a4 --- /dev/null +++ b/docs/doc_examples/52b71aa4ae6563abae78cd20ff06d1e9.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.stats({ + human: "true", + filter_path: "nodes.*.name,nodes.*.indices.indexing", +}); +console.log(response); +---- diff --git a/docs/doc_examples/52bc577a0d0cd42b46f33e0ef5124df8.asciidoc b/docs/doc_examples/52bc577a0d0cd42b46f33e0ef5124df8.asciidoc new file mode 100644 index 000000000..72faf47d1 --- /dev/null +++ b/docs/doc_examples/52bc577a0d0cd42b46f33e0ef5124df8.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.putScript({ + id: "my-search-template", + script: { + lang: "mustache", + source: { + query: { + match: { + message: "{{query_string}}", + }, + }, + from: "{{from}}", + size: "{{size}}", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/52be795b68e6ef3f396f35fea52d0481.asciidoc b/docs/doc_examples/52be795b68e6ef3f396f35fea52d0481.asciidoc new file mode 100644 index 000000000..de83fe2e1 --- /dev/null +++ b/docs/doc_examples/52be795b68e6ef3f396f35fea52d0481.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "my-data-stream-template", + index_patterns: ["my-data-stream*"], + data_stream: {}, + priority: 500, +}); +console.log(response); +---- diff --git a/docs/doc_examples/52c2b4c180388f5ae044588ba70b70f0.asciidoc b/docs/doc_examples/52c2b4c180388f5ae044588ba70b70f0.asciidoc new file mode 100644 index 000000000..b1ed824fb --- /dev/null +++ b/docs/doc_examples/52c2b4c180388f5ae044588ba70b70f0.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-image-index", + size: 10, + query: { + bool: { + must: { + knn: { + field: "image-vector", + query_vector: [-5, 9, -12], + k: 3, + }, + }, + filter: { + term: { + "file-type": "png", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/52c7e4172a446c394210a07c464c57d2.asciidoc b/docs/doc_examples/52c7e4172a446c394210a07c464c57d2.asciidoc index c15b0db56..0b1f25a10 100644 --- a/docs/doc_examples/52c7e4172a446c394210a07c464c57d2.asciidoc +++ b/docs/doc_examples/52c7e4172a446c394210a07c464c57d2.asciidoc @@ -4,9 +4,8 @@ [source, js] ---- const response = await client.deleteByQueryRethrottle({ - task_id: 'r1A2WoRbTwKZ516z6NEs5A:36619', - requests_per_second: '-1' -}) -console.log(response) + task_id: "r1A2WoRbTwKZ516z6NEs5A:36619", + requests_per_second: "-1", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/52cdb5526ce69d0223d1dd198308bfea.asciidoc b/docs/doc_examples/52cdb5526ce69d0223d1dd198308bfea.asciidoc new file mode 100644 index 000000000..a15862221 --- /dev/null +++ b/docs/doc_examples/52cdb5526ce69d0223d1dd198308bfea.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + dynamic: false, + properties: { + user: { + properties: { + name: { + type: "text", + }, + social_networks: { + dynamic: true, + properties: {}, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/52f1c1689ab35353858cdeaab7597546.asciidoc b/docs/doc_examples/52f1c1689ab35353858cdeaab7597546.asciidoc new file mode 100644 index 000000000..0a3fbbf4f --- /dev/null +++ b/docs/doc_examples/52f1c1689ab35353858cdeaab7597546.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-data-stream", + pipeline: "my-pipeline", + document: { + message: + '89.160.20.128 - - [05/May/2099:16:21:15 +0000] "GET /favicon.ico HTTP/1.1" 200 3638 "-" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36"', + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/52f4c5eb08d39f98e2e2f5527ece9731.asciidoc b/docs/doc_examples/52f4c5eb08d39f98e2e2f5527ece9731.asciidoc new file mode 100644 index 000000000..a6490dd78 --- /dev/null +++ b/docs/doc_examples/52f4c5eb08d39f98e2e2f5527ece9731.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "sparse_embedding", + inference_id: "alibabacloud_ai_search_sparse", + inference_config: { + service: "alibabacloud-ai-search", + service_settings: { + api_key: "", + service_id: "ops-text-sparse-embedding-001", + host: "default-j01.platform-cn-shanghai.opensearch.aliyuncs.com", + workspace: "default", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/52fd112e970882c4d7cc4b0cca8e2c6f.asciidoc b/docs/doc_examples/52fd112e970882c4d7cc4b0cca8e2c6f.asciidoc new file mode 100644 index 000000000..3044bb476 --- /dev/null +++ b/docs/doc_examples/52fd112e970882c4d7cc4b0cca8e2c6f.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + number_of_bytes: { + type: "integer", + }, + time_in_seconds: { + type: "float", + }, + price: { + type: "scaled_float", + scaling_factor: 100, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5302f4f2bcc0f400ff71c791e6f68d7b.asciidoc b/docs/doc_examples/5302f4f2bcc0f400ff71c791e6f68d7b.asciidoc new file mode 100644 index 000000000..96d2153be --- /dev/null +++ b/docs/doc_examples/5302f4f2bcc0f400ff71c791e6f68d7b.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "whitespace", + filter: [ + { + type: "keyword_marker", + keywords: ["jumping"], + }, + "stemmer", + ], + text: "fox running and jumping", +}); +console.log(response); +---- diff --git a/docs/doc_examples/5305bc07c1bf90bab3e8db1de3e31b26.asciidoc b/docs/doc_examples/5305bc07c1bf90bab3e8db1de3e31b26.asciidoc new file mode 100644 index 000000000..fbb8023e0 --- /dev/null +++ b/docs/doc_examples/5305bc07c1bf90bab3e8db1de3e31b26.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.shutdown.putNode({ + node_id: "USpTGYaBSIKbgSUJR2Z9lg", + type: "restart", + reason: "Demonstrating how the node shutdown API works", + allocation_delay: "20m", +}); +console.log(response); +---- diff --git a/docs/doc_examples/532ddf9afdcd0b1c9c0bb331e74d8df3.asciidoc b/docs/doc_examples/532ddf9afdcd0b1c9c0bb331e74d8df3.asciidoc new file mode 100644 index 000000000..019795cfe --- /dev/null +++ b/docs/doc_examples/532ddf9afdcd0b1c9c0bb331e74d8df3.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "index_long", + mappings: { + properties: { + field: { + type: "long", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/532f371934b61fb4992d37bedcc085de.asciidoc b/docs/doc_examples/532f371934b61fb4992d37bedcc085de.asciidoc new file mode 100644 index 000000000..9cd381107 --- /dev/null +++ b/docs/doc_examples/532f371934b61fb4992d37bedcc085de.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.shutdown.putNode({ + node_id: "USpTGYaBSIKbgSUJR2Z9lg", + type: "restart", + reason: "Demonstrating how the node shutdown API works", + allocation_delay: "10m", +}); +console.log(response); +---- diff --git a/docs/doc_examples/5330191ec9f11281ebf6867bf11c58ae.asciidoc b/docs/doc_examples/5330191ec9f11281ebf6867bf11c58ae.asciidoc new file mode 100644 index 000000000..43089e612 --- /dev/null +++ b/docs/doc_examples/5330191ec9f11281ebf6867bf11c58ae.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.deleteByQuery({ + index: "my-index-000001", + routing: 1, + query: { + range: { + age: { + gte: 10, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5332c4cca5fbb45cc700dcd34f37bc38.asciidoc b/docs/doc_examples/5332c4cca5fbb45cc700dcd34f37bc38.asciidoc new file mode 100644 index 000000000..04bd5892f --- /dev/null +++ b/docs/doc_examples/5332c4cca5fbb45cc700dcd34f37bc38.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + document: { + english: "Some English text", + count: 5, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/537bce129338d9227bccb6a0283dab45.asciidoc b/docs/doc_examples/537bce129338d9227bccb6a0283dab45.asciidoc new file mode 100644 index 000000000..cfeed0dff --- /dev/null +++ b/docs/doc_examples/537bce129338d9227bccb6a0283dab45.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "migrate.data_stream_reindex_max_request_per_second": 10000, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/53aa8b21e2b1c4d48960343711296704.asciidoc b/docs/doc_examples/53aa8b21e2b1c4d48960343711296704.asciidoc new file mode 100644 index 000000000..17dd79085 --- /dev/null +++ b/docs/doc_examples/53aa8b21e2b1c4d48960343711296704.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + regexp: { + "my_field.keyword": "a\\\\.*", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/53b908c3432118c5a6e460f74d32006b.asciidoc b/docs/doc_examples/53b908c3432118c5a6e460f74d32006b.asciidoc index 519bfd36b..6ec1db62e 100644 --- a/docs/doc_examples/53b908c3432118c5a6e460f74d32006b.asciidoc +++ b/docs/doc_examples/53b908c3432118c5a6e460f74d32006b.asciidoc @@ -4,18 +4,12 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - multi_match: { - query: 'this is a test', - fields: [ - 'subject', - 'message' - ] - } - } - } -}) -console.log(response) + query: { + multi_match: { + query: "this is a test", + fields: ["subject", "message"], + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/53bb7f0e3429861aadb8dd3d588085cd.asciidoc b/docs/doc_examples/53bb7f0e3429861aadb8dd3d588085cd.asciidoc new file mode 100644 index 000000000..061f2096c --- /dev/null +++ b/docs/doc_examples/53bb7f0e3429861aadb8dd3d588085cd.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-data-stream", + seq_no_primary_term: true, + query: { + match: { + "user.id": "yWIumJd7", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/53c6256295111524d5ff2885bdcb99a9.asciidoc b/docs/doc_examples/53c6256295111524d5ff2885bdcb99a9.asciidoc new file mode 100644 index 000000000..9822c80fa --- /dev/null +++ b/docs/doc_examples/53c6256295111524d5ff2885bdcb99a9.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.getTransform({ + transform_id: "_stats", + from: 5, + size: 10, +}); +console.log(response); +---- diff --git a/docs/doc_examples/53d938c754f36a912fcbe6473abb463f.asciidoc b/docs/doc_examples/53d938c754f36a912fcbe6473abb463f.asciidoc deleted file mode 100644 index 313a18ae4..000000000 --- a/docs/doc_examples/53d938c754f36a912fcbe6473abb463f.asciidoc +++ /dev/null @@ -1,18 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.reindex({ - body: { - source: { - index: 'users' - }, - dest: { - index: 'new_users' - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/53d9d2ec9cb8d211772d764e76fe6890.asciidoc b/docs/doc_examples/53d9d2ec9cb8d211772d764e76fe6890.asciidoc new file mode 100644 index 000000000..58eee5923 --- /dev/null +++ b/docs/doc_examples/53d9d2ec9cb8d211772d764e76fe6890.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.simulate({ + id: "query_helper_pipeline", + docs: [ + { + _source: { + content: + "artificial intelligence in medicine articles published in the last 12 months", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/53e4ac5a4009fd21024f4b31e54aa83f.asciidoc b/docs/doc_examples/53e4ac5a4009fd21024f4b31e54aa83f.asciidoc new file mode 100644 index 000000000..c8cc6b7df --- /dev/null +++ b/docs/doc_examples/53e4ac5a4009fd21024f4b31e54aa83f.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putUser({ + username: "facilitator", + password: "", + roles: ["facilitator-role"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/54059961f05904368ced52c894a50e23.asciidoc b/docs/doc_examples/54059961f05904368ced52c894a50e23.asciidoc new file mode 100644 index 000000000..c6040d699 --- /dev/null +++ b/docs/doc_examples/54059961f05904368ced52c894a50e23.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_date_histo: { + date_histogram: { + field: "date", + calendar_interval: "1M", + }, + aggs: { + the_sum: { + sum: { + field: "price", + }, + }, + the_moving_max: { + moving_fn: { + buckets_path: "the_sum", + window: 10, + script: "MovingFunctions.max(values)", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/54092c8c646133f5dbbc047990dd458d.asciidoc b/docs/doc_examples/54092c8c646133f5dbbc047990dd458d.asciidoc deleted file mode 100644 index 420836eac..000000000 --- a/docs/doc_examples/54092c8c646133f5dbbc047990dd458d.asciidoc +++ /dev/null @@ -1,36 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'drivers', - body: { - mappings: { - properties: { - driver: { - type: 'nested', - properties: { - last_name: { - type: 'text' - }, - vehicle: { - type: 'nested', - properties: { - make: { - type: 'text' - }, - model: { - type: 'text' - } - } - } - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/540aefc39303c925a4efff71ebe2f002.asciidoc b/docs/doc_examples/540aefc39303c925a4efff71ebe2f002.asciidoc new file mode 100644 index 000000000..3d74dc98c --- /dev/null +++ b/docs/doc_examples/540aefc39303c925a4efff71ebe2f002.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + tags: { + significant_terms: { + field: "tag", + min_doc_count: 10, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5433bb83628cc91d81fbe53c533b2a09.asciidoc b/docs/doc_examples/5433bb83628cc91d81fbe53c533b2a09.asciidoc new file mode 100644 index 000000000..9b1d74392 --- /dev/null +++ b/docs/doc_examples/5433bb83628cc91d81fbe53c533b2a09.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "asciifold_example", + settings: { + analysis: { + analyzer: { + standard_asciifolding: { + tokenizer: "standard", + filter: ["asciifolding"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5457c94f0039c6b95c7f9f305d0c6b58.asciidoc b/docs/doc_examples/5457c94f0039c6b95c7f9f305d0c6b58.asciidoc new file mode 100644 index 000000000..1dfbbd2b4 --- /dev/null +++ b/docs/doc_examples/5457c94f0039c6b95c7f9f305d0c6b58.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.stats({ + metric: "indices", +}); +console.log(response); + +const response1 = await client.nodes.stats({ + metric: "os,process", +}); +console.log(response1); + +const response2 = await client.nodes.stats({ + node_id: "10.0.0.1", + metric: "process", +}); +console.log(response2); +---- diff --git a/docs/doc_examples/548a9b6f447bb820380c1c23e57c18c3.asciidoc b/docs/doc_examples/548a9b6f447bb820380c1c23e57c18c3.asciidoc new file mode 100644 index 000000000..376f11be8 --- /dev/null +++ b/docs/doc_examples/548a9b6f447bb820380c1c23e57c18c3.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "cohere_embeddings_pipeline", + processors: [ + { + inference: { + model_id: "cohere_embeddings", + input_output: { + input_field: "content", + output_field: "content_embedding", + }, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/548b85bd9e6e7d33e36133953869449b.asciidoc b/docs/doc_examples/548b85bd9e6e7d33e36133953869449b.asciidoc new file mode 100644 index 000000000..719ae08ac --- /dev/null +++ b/docs/doc_examples/548b85bd9e6e7d33e36133953869449b.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "xpack.monitoring.collection.enabled": false, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/54a215d242ab65123b09e9dfb71bcbbf.asciidoc b/docs/doc_examples/54a215d242ab65123b09e9dfb71bcbbf.asciidoc new file mode 100644 index 000000000..3c2251e1a --- /dev/null +++ b/docs/doc_examples/54a215d242ab65123b09e9dfb71bcbbf.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + genres: { + terms: { + field: "genre", + order: { + _key: "asc", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/54a47b5d07e7bfbea75c77f35eaae18d.asciidoc b/docs/doc_examples/54a47b5d07e7bfbea75c77f35eaae18d.asciidoc new file mode 100644 index 000000000..3cd177602 --- /dev/null +++ b/docs/doc_examples/54a47b5d07e7bfbea75c77f35eaae18d.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: ".elastic-connectors-sync-jobs-v1", + properties: { + job_type: { + type: "keyword", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/54c12d5099d7b715c15f5bbf65b386a1.asciidoc b/docs/doc_examples/54c12d5099d7b715c15f5bbf65b386a1.asciidoc new file mode 100644 index 000000000..ea16a6a2c --- /dev/null +++ b/docs/doc_examples/54c12d5099d7b715c15f5bbf65b386a1.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "alibabacloud-ai-search-embeddings", + mappings: { + properties: { + content_embedding: { + type: "dense_vector", + dims: 1024, + element_type: "float", + }, + content: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/55085e6a2891040b6ac696561d0787c8.asciidoc b/docs/doc_examples/55085e6a2891040b6ac696561d0787c8.asciidoc new file mode 100644 index 000000000..eebda22c2 --- /dev/null +++ b/docs/doc_examples/55085e6a2891040b6ac696561d0787c8.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000002", + mappings: { + properties: { + attributes: { + type: "passthrough", + priority: 10, + properties: { + id: { + type: "keyword", + }, + }, + }, + "resource.attributes": { + type: "passthrough", + priority: 20, + properties: { + id: { + type: "keyword", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/55096381f811388fafd8e244dd2402c8.asciidoc b/docs/doc_examples/55096381f811388fafd8e244dd2402c8.asciidoc new file mode 100644 index 000000000..044e53645 --- /dev/null +++ b/docs/doc_examples/55096381f811388fafd8e244dd2402c8.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.rollover({ + alias: "my-alias", + settings: { + "index.number_of_shards": 2, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/551467688d8c701315d0a371850a4056.asciidoc b/docs/doc_examples/551467688d8c701315d0a371850a4056.asciidoc new file mode 100644 index 000000000..cc9792da6 --- /dev/null +++ b/docs/doc_examples/551467688d8c701315d0a371850a4056.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + wait_for_completion: "false", + source: { + index: "test-data", + size: 50, + }, + dest: { + index: "hugging-face-embeddings", + pipeline: "hugging_face_embeddings_pipeline", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/551799fef2f86e393db83a967e4a30d1.asciidoc b/docs/doc_examples/551799fef2f86e393db83a967e4a30d1.asciidoc new file mode 100644 index 000000000..ee97fb114 --- /dev/null +++ b/docs/doc_examples/551799fef2f86e393db83a967e4a30d1.asciidoc @@ -0,0 +1,42 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, + }, + }, + mappings: { + properties: { + agg_metric: { + type: "aggregate_metric_double", + metrics: ["min", "max", "sum", "value_count"], + default_metric: "max", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "idx", + id: 1, + document: { + agg_metric: { + min: -302.5, + max: 702.3, + sum: 200, + value_count: 25, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/553904c175a76d5ba83bc5d46fff7373.asciidoc b/docs/doc_examples/553904c175a76d5ba83bc5d46fff7373.asciidoc new file mode 100644 index 000000000..e4289442f --- /dev/null +++ b/docs/doc_examples/553904c175a76d5ba83bc5d46fff7373.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.samlLogout({ + token: "46ToAxZVaXVVZTVKOVF5YU04ZFJVUDVSZlV3", + refresh_token: "mJdXLtmvTUSpoLwMvdBt_w", +}); +console.log(response); +---- diff --git a/docs/doc_examples/553d79817bb1333970e99507c37a159a.asciidoc b/docs/doc_examples/553d79817bb1333970e99507c37a159a.asciidoc new file mode 100644 index 000000000..1097587d1 --- /dev/null +++ b/docs/doc_examples/553d79817bb1333970e99507c37a159a.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "index", + settings: { + index: { + similarity: { + default: { + type: "boolean", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5553cf7a02c22f616cd994747f2dd5a5.asciidoc b/docs/doc_examples/5553cf7a02c22f616cd994747f2dd5a5.asciidoc new file mode 100644 index 000000000..a3365ef4b --- /dev/null +++ b/docs/doc_examples/5553cf7a02c22f616cd994747f2dd5a5.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + bool: { + must: [ + { + match: { + "user.first": "Alice", + }, + }, + { + match: { + "user.last": "Smith", + }, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5566cff431570f522e1fc5475b2ed875.asciidoc b/docs/doc_examples/5566cff431570f522e1fc5475b2ed875.asciidoc new file mode 100644 index 000000000..ac151dd64 --- /dev/null +++ b/docs/doc_examples/5566cff431570f522e1fc5475b2ed875.asciidoc @@ -0,0 +1,71 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "test", + settings: { + index: { + number_of_shards: 1, + analysis: { + analyzer: { + trigram: { + type: "custom", + tokenizer: "standard", + filter: ["lowercase", "shingle"], + }, + reverse: { + type: "custom", + tokenizer: "standard", + filter: ["lowercase", "reverse"], + }, + }, + filter: { + shingle: { + type: "shingle", + min_shingle_size: 2, + max_shingle_size: 3, + }, + }, + }, + }, + }, + mappings: { + properties: { + title: { + type: "text", + fields: { + trigram: { + type: "text", + analyzer: "trigram", + }, + reverse: { + type: "text", + analyzer: "reverse", + }, + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "test", + refresh: "true", + document: { + title: "noble warriors", + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "test", + refresh: "true", + document: { + title: "nobel prize", + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/55838e0b21c4f4da2dc8aaec045a6d5f.asciidoc b/docs/doc_examples/55838e0b21c4f4da2dc8aaec045a6d5f.asciidoc new file mode 100644 index 000000000..35b5bc77a --- /dev/null +++ b/docs/doc_examples/55838e0b21c4f4da2dc8aaec045a6d5f.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "latency", + size: 0, + runtime_mappings: { + "load_time.seconds": { + type: "long", + script: { + source: "emit(doc['load_time'].value / params.timeUnit)", + params: { + timeUnit: 1000, + }, + }, + }, + }, + aggs: { + load_time_outlier: { + percentiles: { + field: "load_time.seconds", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/558b3f9b987771e9f9f35e51a0d7e062.asciidoc b/docs/doc_examples/558b3f9b987771e9f9f35e51a0d7e062.asciidoc new file mode 100644 index 000000000..1e6422794 --- /dev/null +++ b/docs/doc_examples/558b3f9b987771e9f9f35e51a0d7e062.asciidoc @@ -0,0 +1,45 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-dfs-index", + settings: { + number_of_shards: 2, + number_of_replicas: 1, + }, + mappings: { + properties: { + "my-keyword": { + type: "keyword", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "my-dfs-index", + refresh: "true", + operations: [ + { + index: { + _id: "1", + }, + }, + { + "my-keyword": "a", + }, + { + index: { + _id: "2", + }, + }, + { + "my-keyword": "b", + }, + ], +}); +console.log(response1); +---- diff --git a/docs/doc_examples/5597eeb8f43b5d47bd07f27122c24194.asciidoc b/docs/doc_examples/5597eeb8f43b5d47bd07f27122c24194.asciidoc new file mode 100644 index 000000000..6afde064f --- /dev/null +++ b/docs/doc_examples/5597eeb8f43b5d47bd07f27122c24194.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.asyncSearch.submit({ + index: + "my-index-000001,cluster_one:my-index-000001,cluster_two:my-index-000001", + ccs_minimize_roundtrips: "false", + query: { + match: { + "user.id": "kimchy", + }, + }, + _source: ["user.id", "message", "http.response.status_code"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/55d349ccb0efd5e1c06c6dd383a593cf.asciidoc b/docs/doc_examples/55d349ccb0efd5e1c06c6dd383a593cf.asciidoc new file mode 100644 index 000000000..5e71a8116 --- /dev/null +++ b/docs/doc_examples/55d349ccb0efd5e1c06c6dd383a593cf.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.asyncSearch.submit({ + index: "my-index-000001,cluster*:my-index-*,cluster_three:-my-index-000001", + query: { + match: { + "user.id": "kimchy", + }, + }, + _source: ["user.id", "message", "http.response.status_code"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/55e8ddf643726dec51531ada0bec7143.asciidoc b/docs/doc_examples/55e8ddf643726dec51531ada0bec7143.asciidoc new file mode 100644 index 000000000..7e7984e2e --- /dev/null +++ b/docs/doc_examples/55e8ddf643726dec51531ada0bec7143.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.slm.getStats(); +console.log(response); +---- diff --git a/docs/doc_examples/55f0fec6342f677af74de2124b801aa2.asciidoc b/docs/doc_examples/55f0fec6342f677af74de2124b801aa2.asciidoc new file mode 100644 index 000000000..1f7148276 --- /dev/null +++ b/docs/doc_examples/55f0fec6342f677af74de2124b801aa2.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "byte-image-index", + knn: { + field: "byte-image-vector", + query_vector: [-5, 9], + k: 10, + num_candidates: 100, + }, + fields: ["title"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/55f4a15b84b724b9fbf2efd29a4da120.asciidoc b/docs/doc_examples/55f4a15b84b724b9fbf2efd29a4da120.asciidoc new file mode 100644 index 000000000..baa6dae78 --- /dev/null +++ b/docs/doc_examples/55f4a15b84b724b9fbf2efd29a4da120.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.authenticate(); +console.log(response); +---- diff --git a/docs/doc_examples/5619103306878d58a058bce87c5bd82b.asciidoc b/docs/doc_examples/5619103306878d58a058bce87c5bd82b.asciidoc new file mode 100644 index 000000000..5f2179a7b --- /dev/null +++ b/docs/doc_examples/5619103306878d58a058bce87c5bd82b.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.recovery({ + human: "true", + detailed: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/5632c3b947062d3a5fc0e4f3413b3308.asciidoc b/docs/doc_examples/5632c3b947062d3a5fc0e4f3413b3308.asciidoc new file mode 100644 index 000000000..bf8e62960 --- /dev/null +++ b/docs/doc_examples/5632c3b947062d3a5fc0e4f3413b3308.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.createRepository({ + name: "my_fs_backup", + repository: { + type: "fs", + settings: { + location: "/mount/backups/my_fs_backup_location", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/563dfbf421422c837ee6929ae2ede876.asciidoc b/docs/doc_examples/563dfbf421422c837ee6929ae2ede876.asciidoc new file mode 100644 index 000000000..f60a1d8d5 --- /dev/null +++ b/docs/doc_examples/563dfbf421422c837ee6929ae2ede876.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.migrateToDataStream({ + name: "my-logs", +}); +console.log(response); +---- diff --git a/docs/doc_examples/565386eee0951865a684e41fab53b40c.asciidoc b/docs/doc_examples/565386eee0951865a684e41fab53b40c.asciidoc new file mode 100644 index 000000000..149cff486 --- /dev/null +++ b/docs/doc_examples/565386eee0951865a684e41fab53b40c.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "sparse_embedding", + inference_id: "my-elser-model", + inference_config: { + service: "elser", + service_settings: { + adaptive_allocations: { + enabled: true, + min_number_of_allocations: 3, + max_number_of_allocations: 10, + }, + num_threads: 1, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/56563f91d9f0b74e9e4aae9cb221845b.asciidoc b/docs/doc_examples/56563f91d9f0b74e9e4aae9cb221845b.asciidoc new file mode 100644 index 000000000..91ca422d8 --- /dev/null +++ b/docs/doc_examples/56563f91d9f0b74e9e4aae9cb221845b.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "POST", + path: "/_security/cross_cluster/api_key", + body: { + name: "my-cross-cluster-api-key", + expiration: "1d", + access: { + search: [ + { + names: ["logs*"], + }, + ], + replication: [ + { + names: ["archive*"], + }, + ], + }, + metadata: { + description: "phase one", + environment: { + level: 1, + trusted: true, + tags: ["dev", "staging"], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/565908b03edff1d6e6e7cdfb92177faf.asciidoc b/docs/doc_examples/565908b03edff1d6e6e7cdfb92177faf.asciidoc new file mode 100644 index 000000000..6815ee37a --- /dev/null +++ b/docs/doc_examples/565908b03edff1d6e6e7cdfb92177faf.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "exams", + size: 0, + runtime_mappings: { + "grade.weighted": { + type: "double", + script: + "\n emit(doc['grade'].value * doc['weight'].value)\n ", + }, + }, + aggs: { + grades_stats: { + stats: { + field: "grade.weighted", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/568979150ce18739f8d3ea859355aaa3.asciidoc b/docs/doc_examples/568979150ce18739f8d3ea859355aaa3.asciidoc new file mode 100644 index 000000000..c421d5bd2 --- /dev/null +++ b/docs/doc_examples/568979150ce18739f8d3ea859355aaa3.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getUser({ + username: "jacknich", + with_profile_uid: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/569f10fee671632017c722fd983009d4.asciidoc b/docs/doc_examples/569f10fee671632017c722fd983009d4.asciidoc new file mode 100644 index 000000000..cc388a8c9 --- /dev/null +++ b/docs/doc_examples/569f10fee671632017c722fd983009d4.asciidoc @@ -0,0 +1,40 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_buckets: { + composite: { + sources: [ + { + shop: { + terms: { + field: "shop", + }, + }, + }, + { + product: { + terms: { + field: "product", + }, + }, + }, + { + date: { + date_histogram: { + field: "timestamp", + calendar_interval: "1d", + }, + }, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/56a1aa4f7fa62f2289e20607e3039bf3.asciidoc b/docs/doc_examples/56a1aa4f7fa62f2289e20607e3039bf3.asciidoc new file mode 100644 index 000000000..256dc7ce2 --- /dev/null +++ b/docs/doc_examples/56a1aa4f7fa62f2289e20607e3039bf3.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-index-000001", + properties: { + email: { + type: "keyword", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/56a903530990313b753b1be33578997a.asciidoc b/docs/doc_examples/56a903530990313b753b1be33578997a.asciidoc new file mode 100644 index 000000000..789290384 --- /dev/null +++ b/docs/doc_examples/56a903530990313b753b1be33578997a.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + dis_max: { + queries: [ + { + multi_match: { + query: "Will Smith", + type: "cross_fields", + fields: ["first", "last"], + minimum_should_match: "50%", + }, + }, + { + multi_match: { + query: "Will Smith", + type: "cross_fields", + fields: ["*.edge"], + }, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/56b6b50b174a935d368301ebd717231d.asciidoc b/docs/doc_examples/56b6b50b174a935d368301ebd717231d.asciidoc new file mode 100644 index 000000000..710fd7bb1 --- /dev/null +++ b/docs/doc_examples/56b6b50b174a935d368301ebd717231d.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.stats({ + metric: "current_watches", +}); +console.log(response); +---- diff --git a/docs/doc_examples/56da252798b8e7b006738428aa1a7f4c.asciidoc b/docs/doc_examples/56da252798b8e7b006738428aa1a7f4c.asciidoc new file mode 100644 index 000000000..8f272acdb --- /dev/null +++ b/docs/doc_examples/56da252798b8e7b006738428aa1a7f4c.asciidoc @@ -0,0 +1,38 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, + }, + }, + mappings: { + properties: { + my_range: { + type: "long_range", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "idx", + id: 1, + document: { + my_range: { + gt: 200, + lt: 300, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/56da9c55774f4c2e8eadde0579bdc60c.asciidoc b/docs/doc_examples/56da9c55774f4c2e8eadde0579bdc60c.asciidoc new file mode 100644 index 000000000..aeffe4d01 --- /dev/null +++ b/docs/doc_examples/56da9c55774f4c2e8eadde0579bdc60c.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "test*", + filter_path: "aggregations", + aggs: { + tm: { + top_metrics: { + metrics: { + field: "m", + }, + sort: { + s: { + order: "asc", + numeric_type: "double", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/56db76c987106a870357854d3068ad98.asciidoc b/docs/doc_examples/56db76c987106a870357854d3068ad98.asciidoc new file mode 100644 index 000000000..3ae4090ca --- /dev/null +++ b/docs/doc_examples/56db76c987106a870357854d3068ad98.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "GET", + path: "/_query_rules", +}); +console.log(response); +---- diff --git a/docs/doc_examples/56e90a63f94eeb882fe8acbcd74229c2.asciidoc b/docs/doc_examples/56e90a63f94eeb882fe8acbcd74229c2.asciidoc new file mode 100644 index 000000000..608ba2f7f --- /dev/null +++ b/docs/doc_examples/56e90a63f94eeb882fe8acbcd74229c2.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_date_histo: { + date_histogram: { + field: "date", + calendar_interval: "1M", + }, + aggs: { + the_sum: { + sum: { + field: "price", + }, + }, + the_moving_min: { + moving_fn: { + buckets_path: "the_sum", + window: 10, + script: "MovingFunctions.min(values)", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/56f3a6bec7be5a90fb43144c331a5b5a.asciidoc b/docs/doc_examples/56f3a6bec7be5a90fb43144c331a5b5a.asciidoc new file mode 100644 index 000000000..ecc6eaa60 --- /dev/null +++ b/docs/doc_examples/56f3a6bec7be5a90fb43144c331a5b5a.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getSettings({ + index: "my-index-000001", + flat_settings: "false", +}); +console.log(response); +---- diff --git a/docs/doc_examples/56fa6c9e08258157d445e2f92274962b.asciidoc b/docs/doc_examples/56fa6c9e08258157d445e2f92274962b.asciidoc new file mode 100644 index 000000000..47651fd9a --- /dev/null +++ b/docs/doc_examples/56fa6c9e08258157d445e2f92274962b.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "whitespace", + filter: [ + { + type: "shingle", + min_shingle_size: 2, + max_shingle_size: 3, + output_unigrams: false, + }, + ], + text: "quick brown fox jumps", +}); +console.log(response); +---- diff --git a/docs/doc_examples/571314a948e49f1f9614d36fcf79392a.asciidoc b/docs/doc_examples/571314a948e49f1f9614d36fcf79392a.asciidoc new file mode 100644 index 000000000..e70d505ba --- /dev/null +++ b/docs/doc_examples/571314a948e49f1f9614d36fcf79392a.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.asyncSearch.get({ + id: "FjktRGJ1Y2w1U0phLTRhZnVyeUZ2MVEbWEJyeVBPQldTV3FGZGdIeUVabXBldzo5NzA4", +}); +console.log(response); +---- diff --git a/docs/doc_examples/578808065fee8691355b8f25c35782cd.asciidoc b/docs/doc_examples/578808065fee8691355b8f25c35782cd.asciidoc new file mode 100644 index 000000000..ad2b22c3e --- /dev/null +++ b/docs/doc_examples/578808065fee8691355b8f25c35782cd.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + filter_path: "profile.shards.fetch", + profile: true, + query: { + term: { + "user.id": { + value: "elkbee", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5797df4b8e71d821a1488cbb63481104.asciidoc b/docs/doc_examples/5797df4b8e71d821a1488cbb63481104.asciidoc new file mode 100644 index 000000000..00b4a3abb --- /dev/null +++ b/docs/doc_examples/5797df4b8e71d821a1488cbb63481104.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.healthReport({ + feature: "shards_capacity", +}); +console.log(response); +---- diff --git a/docs/doc_examples/57a3e8d2ca64e37e90d658c4cd935399.asciidoc b/docs/doc_examples/57a3e8d2ca64e37e90d658c4cd935399.asciidoc new file mode 100644 index 000000000..22059e18c --- /dev/null +++ b/docs/doc_examples/57a3e8d2ca64e37e90d658c4cd935399.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "items", + query: { + bool: { + must: { + match: { + name: "chocolate", + }, + }, + should: { + distance_feature: { + field: "location", + pivot: "1000m", + origin: [-71.3, 41.15], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/57c690f8fa95bacf4b250803be7467e4.asciidoc b/docs/doc_examples/57c690f8fa95bacf4b250803be7467e4.asciidoc new file mode 100644 index 000000000..a09a17ef2 --- /dev/null +++ b/docs/doc_examples/57c690f8fa95bacf4b250803be7467e4.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: "BBOX (1000.0, 1002.0, 2000.0, 1000.0)", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/57dc15e5ad663c342fd5c1d86fcd1b29.asciidoc b/docs/doc_examples/57dc15e5ad663c342fd5c1d86fcd1b29.asciidoc new file mode 100644 index 000000000..2e5e87c54 --- /dev/null +++ b/docs/doc_examples/57dc15e5ad663c342fd5c1d86fcd1b29.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.oidcPrepareAuthentication({ + realm: "oidc1", + state: "lGYK0EcSLjqH6pkT5EVZjC6eIW5YCGgywj2sxROO", + nonce: "zOBXLJGUooRrbLbQk5YCcyC8AXw3iloynvluYhZ5", +}); +console.log(response); +---- diff --git a/docs/doc_examples/57e0bbab98f17d5b564d1ea146a55fe4.asciidoc b/docs/doc_examples/57e0bbab98f17d5b564d1ea146a55fe4.asciidoc new file mode 100644 index 000000000..9babe8e35 --- /dev/null +++ b/docs/doc_examples/57e0bbab98f17d5b564d1ea146a55fe4.asciidoc @@ -0,0 +1,40 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "template_1", + index_patterns: ["temp*"], + priority: 0, + template: { + settings: { + number_of_shards: 1, + number_of_replicas: 0, + }, + mappings: { + _source: { + enabled: false, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.putIndexTemplate({ + name: "template_2", + index_patterns: ["template*"], + priority: 1, + template: { + settings: { + number_of_shards: 2, + }, + mappings: { + _source: { + enabled: true, + }, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/582c4b05401dbc190b19411282d85310.asciidoc b/docs/doc_examples/582c4b05401dbc190b19411282d85310.asciidoc new file mode 100644 index 000000000..09b9fa2b8 --- /dev/null +++ b/docs/doc_examples/582c4b05401dbc190b19411282d85310.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.update({ + index: "my-index-000001", + id: 1, + script: { + source: + "if (ctx._source.tags.contains(params['tag'])) { ctx.op = 'delete' } else { ctx.op = 'none' }", + lang: "painless", + params: { + tag: "green", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/582da02c09e0597b4396c87e33571e7b.asciidoc b/docs/doc_examples/582da02c09e0597b4396c87e33571e7b.asciidoc new file mode 100644 index 000000000..e5be2eaef --- /dev/null +++ b/docs/doc_examples/582da02c09e0597b4396c87e33571e7b.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.query({ + format: "json", + cursor: + "sDXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAAEWYUpOYklQMHhRUEtld3RsNnFtYU1hQQ==:BAFmBGRhdGUBZgVsaWtlcwFzB21lc3NhZ2UBZgR1c2Vy9f///w8=", +}); +console.log(response); +---- diff --git a/docs/doc_examples/5836b09198feb1269ed12839b416123d.asciidoc b/docs/doc_examples/5836b09198feb1269ed12839b416123d.asciidoc new file mode 100644 index 000000000..12ea79855 --- /dev/null +++ b/docs/doc_examples/5836b09198feb1269ed12839b416123d.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "jinaai-index", + query: { + semantic: { + field: "content", + query: "who inspired taking care of the sea?", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5837d5f50665ac0a26181d3aaeb3f204.asciidoc b/docs/doc_examples/5837d5f50665ac0a26181d3aaeb3f204.asciidoc new file mode 100644 index 000000000..b5d5d91f8 --- /dev/null +++ b/docs/doc_examples/5837d5f50665ac0a26181d3aaeb3f204.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.startTrainedModelDeployment({ + model_id: "my_model", + deployment_id: "my_model_for_search", +}); +console.log(response); +---- diff --git a/docs/doc_examples/584f502cf840134f2db5f39e2483ced1.asciidoc b/docs/doc_examples/584f502cf840134f2db5f39e2483ced1.asciidoc new file mode 100644 index 000000000..15ffff9c6 --- /dev/null +++ b/docs/doc_examples/584f502cf840134f2db5f39e2483ced1.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "portuguese_example", + settings: { + analysis: { + filter: { + portuguese_stop: { + type: "stop", + stopwords: "_portuguese_", + }, + portuguese_keywords: { + type: "keyword_marker", + keywords: ["exemplo"], + }, + portuguese_stemmer: { + type: "stemmer", + language: "light_portuguese", + }, + }, + analyzer: { + rebuilt_portuguese: { + tokenizer: "standard", + filter: [ + "lowercase", + "portuguese_stop", + "portuguese_keywords", + "portuguese_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/585a34ad79aee16678b37da785933ac8.asciidoc b/docs/doc_examples/585a34ad79aee16678b37da785933ac8.asciidoc new file mode 100644 index 000000000..8e62c4d26 --- /dev/null +++ b/docs/doc_examples/585a34ad79aee16678b37da785933ac8.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.stop(); +console.log(response); +---- diff --git a/docs/doc_examples/585b19369cb9b9763a7e8d405f009a47.asciidoc b/docs/doc_examples/585b19369cb9b9763a7e8d405f009a47.asciidoc new file mode 100644 index 000000000..02e8855dc --- /dev/null +++ b/docs/doc_examples/585b19369cb9b9763a7e8d405f009a47.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-index-000001", + runtime: { + day_of_week: null, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5865ca8d2bcd087ed5dbee33fafee57f.asciidoc b/docs/doc_examples/5865ca8d2bcd087ed5dbee33fafee57f.asciidoc new file mode 100644 index 000000000..9063935bf --- /dev/null +++ b/docs/doc_examples/5865ca8d2bcd087ed5dbee33fafee57f.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.explainDataLifecycle({ + index: ".ds-my-data-stream-*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/586cfa0e5fd695b7d451e854f9fb4a9c.asciidoc b/docs/doc_examples/586cfa0e5fd695b7d451e854f9fb4a9c.asciidoc new file mode 100644 index 000000000..0b83eb8c3 --- /dev/null +++ b/docs/doc_examples/586cfa0e5fd695b7d451e854f9fb4a9c.asciidoc @@ -0,0 +1,53 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my_locations", + mappings: { + properties: { + location: { + type: "geo_point", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my_locations", + id: 1, + refresh: "true", + document: { + location: "POINT(4.912350 52.374081)", + city: "Amsterdam", + name: "NEMO Science Museum", + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "my_locations", + id: 2, + refresh: "true", + document: { + location: "POINT(4.405200 51.222900)", + city: "Antwerp", + name: "Letterenhuis", + }, +}); +console.log(response2); + +const response3 = await client.index({ + index: "my_locations", + id: 3, + refresh: "true", + document: { + location: "POINT(2.336389 48.861111)", + city: "Paris", + name: "Musée du Louvre", + }, +}); +console.log(response3); +---- diff --git a/docs/doc_examples/58b5003c0a53a39bf509aa3797aad471.asciidoc b/docs/doc_examples/58b5003c0a53a39bf509aa3797aad471.asciidoc deleted file mode 100644 index 259d93687..000000000 --- a/docs/doc_examples/58b5003c0a53a39bf509aa3797aad471.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - query_string: { - fields: [ - 'content', - 'name.*^5' - ], - query: 'this AND that OR thus' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/58ca855be30049f8f0879e532db51ee2.asciidoc b/docs/doc_examples/58ca855be30049f8f0879e532db51ee2.asciidoc new file mode 100644 index 000000000..4212f2f60 --- /dev/null +++ b/docs/doc_examples/58ca855be30049f8f0879e532db51ee2.asciidoc @@ -0,0 +1,55 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.putTransform({ + transform_id: "ecommerce_transform1", + source: { + index: "kibana_sample_data_ecommerce", + query: { + term: { + "geoip.continent_name": { + value: "Asia", + }, + }, + }, + }, + pivot: { + group_by: { + customer_id: { + terms: { + field: "customer_id", + missing_bucket: true, + }, + }, + }, + aggregations: { + max_price: { + max: { + field: "taxful_total_price", + }, + }, + }, + }, + description: "Maximum priced ecommerce data by customer_id in Asia", + dest: { + index: "kibana_sample_data_ecommerce_transform1", + pipeline: "add_timestamp_pipeline", + }, + frequency: "5m", + sync: { + time: { + field: "order_date", + delay: "60s", + }, + }, + retention_policy: { + time: { + field: "order_date", + max_age: "30d", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/58dd26afc919722e21358c91e112b27a.asciidoc b/docs/doc_examples/58dd26afc919722e21358c91e112b27a.asciidoc new file mode 100644 index 000000000..61938b700 --- /dev/null +++ b/docs/doc_examples/58dd26afc919722e21358c91e112b27a.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "cooking_blog", + query: { + range: { + date: { + gte: "2023-05-01", + lte: "2023-05-31", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/58df61acbfb15b8ef0aaa18b81ae98a6.asciidoc b/docs/doc_examples/58df61acbfb15b8ef0aaa18b81ae98a6.asciidoc deleted file mode 100644 index b4d71e244..000000000 --- a/docs/doc_examples/58df61acbfb15b8ef0aaa18b81ae98a6.asciidoc +++ /dev/null @@ -1,15 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.update({ - index: 'test', - id: '1', - body: { - script: "ctx._source.remove('new_field')" - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/58e684e0b771b4646662fe12d3060c05.asciidoc b/docs/doc_examples/58e684e0b771b4646662fe12d3060c05.asciidoc new file mode 100644 index 000000000..22c848908 --- /dev/null +++ b/docs/doc_examples/58e684e0b771b4646662fe12d3060c05.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "cjk_width_example", + settings: { + analysis: { + analyzer: { + standard_cjk_width: { + tokenizer: "standard", + filter: ["cjk_width"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/58f72be60c25752d7899a35fc60fe6eb.asciidoc b/docs/doc_examples/58f72be60c25752d7899a35fc60fe6eb.asciidoc new file mode 100644 index 000000000..2fc9fec45 --- /dev/null +++ b/docs/doc_examples/58f72be60c25752d7899a35fc60fe6eb.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "logger.org.elasticsearch.indices.recovery": "DEBUG", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/591c7fb7451069829a14bba593136f1f.asciidoc b/docs/doc_examples/591c7fb7451069829a14bba593136f1f.asciidoc new file mode 100644 index 000000000..0b78c8eec --- /dev/null +++ b/docs/doc_examples/591c7fb7451069829a14bba593136f1f.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.forecast({ + job_id: "low_request_rate", + duration: "10d", +}); +console.log(response); +---- diff --git a/docs/doc_examples/5969c446688c8b326acc80276573e9d2.asciidoc b/docs/doc_examples/5969c446688c8b326acc80276573e9d2.asciidoc new file mode 100644 index 000000000..7acc40c12 --- /dev/null +++ b/docs/doc_examples/5969c446688c8b326acc80276573e9d2.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match: { + "user.id": "kimchy", + }, + }, + highlight: { + number_of_fragments: 3, + fragment_size: 150, + fields: { + body: { + pre_tags: [""], + post_tags: [""], + }, + "blog.title": { + number_of_fragments: 0, + }, + "blog.author": { + number_of_fragments: 0, + }, + "blog.comment": { + number_of_fragments: 5, + order: "score", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/59726e3c90e1218487a781508788c243.asciidoc b/docs/doc_examples/59726e3c90e1218487a781508788c243.asciidoc new file mode 100644 index 000000000..b895ddd4d --- /dev/null +++ b/docs/doc_examples/59726e3c90e1218487a781508788c243.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sale_date: { + auto_date_histogram: { + field: "date", + buckets: 10, + missing: "2000/01/01", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/597d456edfcb3d410954a3e9b5babf9a.asciidoc b/docs/doc_examples/597d456edfcb3d410954a3e9b5babf9a.asciidoc new file mode 100644 index 000000000..a57d2e49f --- /dev/null +++ b/docs/doc_examples/597d456edfcb3d410954a3e9b5babf9a.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "index", + mappings: { + dynamic_templates: [ + { + strings: { + match_mapping_type: "string", + mapping: { + type: "keyword", + }, + }, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5987afb2c17c73fe3d860937565ef115.asciidoc b/docs/doc_examples/5987afb2c17c73fe3d860937565ef115.asciidoc new file mode 100644 index 000000000..0107606a5 --- /dev/null +++ b/docs/doc_examples/5987afb2c17c73fe3d860937565ef115.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.openPointInTime({ + index: "my-index-000001", + keep_alive: "1m", +}); +console.log(response); +---- diff --git a/docs/doc_examples/599454613ac699d447537e79e65ae35a.asciidoc b/docs/doc_examples/599454613ac699d447537e79e65ae35a.asciidoc new file mode 100644 index 000000000..3f7bd9397 --- /dev/null +++ b/docs/doc_examples/599454613ac699d447537e79e65ae35a.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + script_fields: { + my_doubled_field: { + script: { + source: "doc['my_field'].value * params['multiplier']", + params: { + multiplier: 2, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/599f693cc7d30b1153f5eeecec8eb23a.asciidoc b/docs/doc_examples/599f693cc7d30b1153f5eeecec8eb23a.asciidoc new file mode 100644 index 000000000..b478fbc11 --- /dev/null +++ b/docs/doc_examples/599f693cc7d30b1153f5eeecec8eb23a.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.deleteTemplate({ + name: "my-legacy-index-template", +}); +console.log(response); +---- diff --git a/docs/doc_examples/59aa5216630f80c5dc298fc5bba4a819.asciidoc b/docs/doc_examples/59aa5216630f80c5dc298fc5bba4a819.asciidoc new file mode 100644 index 000000000..61ac89373 --- /dev/null +++ b/docs/doc_examples/59aa5216630f80c5dc298fc5bba4a819.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getSettings({ + index: ".reindexed-v9-ml-anomalies-custom-example", +}); +console.log(response); +---- diff --git a/docs/doc_examples/59b8b9555f4aa30bc4613f819e9fc8f0.asciidoc b/docs/doc_examples/59b8b9555f4aa30bc4613f819e9fc8f0.asciidoc new file mode 100644 index 000000000..56b04dfe7 --- /dev/null +++ b/docs/doc_examples/59b8b9555f4aa30bc4613f819e9fc8f0.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.close({ + index: "my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/59d015f7bd0eeab40d0885010a62fa70.asciidoc b/docs/doc_examples/59d015f7bd0eeab40d0885010a62fa70.asciidoc new file mode 100644 index 000000000..9915745fb --- /dev/null +++ b/docs/doc_examples/59d015f7bd0eeab40d0885010a62fa70.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "example2", + indices: [ + { + names: ["my-index-000001"], + privileges: ["read"], + query: { + template: { + source: { + term: { + "group.id": "{{_user.metadata.group_id}}", + }, + }, + }, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/59d736a4d064ed2013c7ead8e32e0998.asciidoc b/docs/doc_examples/59d736a4d064ed2013c7ead8e32e0998.asciidoc new file mode 100644 index 000000000..5ea918642 --- /dev/null +++ b/docs/doc_examples/59d736a4d064ed2013c7ead8e32e0998.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "completion", + inference_id: "openai-completion", + inference_config: { + service: "openai", + service_settings: { + api_key: "", + model_id: "gpt-3.5-turbo", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/59f0ad2a6f97200e98e8eb079cdd8334.asciidoc b/docs/doc_examples/59f0ad2a6f97200e98e8eb079cdd8334.asciidoc new file mode 100644 index 000000000..695b35e41 --- /dev/null +++ b/docs/doc_examples/59f0ad2a6f97200e98e8eb079cdd8334.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.mget({ + index: "my-index-000001", + ids: ["1", "2"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/5a006feed86309b547bbaa1baca1c496.asciidoc b/docs/doc_examples/5a006feed86309b547bbaa1baca1c496.asciidoc new file mode 100644 index 000000000..5ab3acc0b --- /dev/null +++ b/docs/doc_examples/5a006feed86309b547bbaa1baca1c496.asciidoc @@ -0,0 +1,69 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + dynamic_templates: [ + { + numeric_counts: { + match_mapping_type: ["long", "double"], + match: "count", + mapping: { + type: "{dynamic_type}", + index: false, + }, + }, + }, + { + integers: { + match_mapping_type: "long", + mapping: { + type: "integer", + }, + }, + }, + { + strings: { + match_mapping_type: "string", + mapping: { + type: "text", + fields: { + raw: { + type: "keyword", + ignore_above: 256, + }, + }, + }, + }, + }, + { + non_objects_keyword: { + match_mapping_type: "*", + unmatch_mapping_type: "object", + mapping: { + type: "keyword", + }, + }, + }, + ], + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + my_integer: 5, + my_string: "Some string", + my_boolean: "false", + field: { + count: 4, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/5a3855f1b3e37d89ab7cbcc4f7ae1dd3.asciidoc b/docs/doc_examples/5a3855f1b3e37d89ab7cbcc4f7ae1dd3.asciidoc new file mode 100644 index 000000000..a0d7ddaaf --- /dev/null +++ b/docs/doc_examples/5a3855f1b3e37d89ab7cbcc4f7ae1dd3.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: [ + { + type: "limit", + max_token_count: 2, + }, + ], + text: "quick fox jumps over lazy dog", +}); +console.log(response); +---- diff --git a/docs/doc_examples/5a3fe9584d203d1fd6c96981ba34e0de.asciidoc b/docs/doc_examples/5a3fe9584d203d1fd6c96981ba34e0de.asciidoc new file mode 100644 index 000000000..bb83361b8 --- /dev/null +++ b/docs/doc_examples/5a3fe9584d203d1fd6c96981ba34e0de.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "postal_codes", + mappings: { + properties: { + location: { + type: "geo_shape", + }, + postal_code: { + type: "keyword", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5a6bb9ac6830668ecc00550c1aa8f2f1.asciidoc b/docs/doc_examples/5a6bb9ac6830668ecc00550c1aa8f2f1.asciidoc new file mode 100644 index 000000000..ab820fab7 --- /dev/null +++ b/docs/doc_examples/5a6bb9ac6830668ecc00550c1aa8f2f1.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "logstash-reader", + indices: [ + { + names: ["logstash-*"], + privileges: ["read_cross_cluster", "read", "view_index_metadata"], + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/5a754dcc854b9154296550a0b581cb9d.asciidoc b/docs/doc_examples/5a754dcc854b9154296550a0b581cb9d.asciidoc new file mode 100644 index 000000000..02bc82b96 --- /dev/null +++ b/docs/doc_examples/5a754dcc854b9154296550a0b581cb9d.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "network-traffic", + size: 0, + aggs: { + "ipv4-subnets": { + ip_prefix: { + field: "ipv4", + prefix_length: 24, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5a7f05ab1d05b4eef5ff327168517165.asciidoc b/docs/doc_examples/5a7f05ab1d05b4eef5ff327168517165.asciidoc new file mode 100644 index 000000000..e163c01b6 --- /dev/null +++ b/docs/doc_examples/5a7f05ab1d05b4eef5ff327168517165.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + routing: "my-routing-value,my-routing-value-2", + query: { + match: { + "user.id": "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5ab9b44939fb30f5b4adbdcc4bcc0733.asciidoc b/docs/doc_examples/5ab9b44939fb30f5b4adbdcc4bcc0733.asciidoc new file mode 100644 index 000000000..090b5022a --- /dev/null +++ b/docs/doc_examples/5ab9b44939fb30f5b4adbdcc4bcc0733.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "datastream_policy", + policy: { + phases: { + hot: { + actions: { + rollover: { + max_age: "5m", + }, + downsample: { + fixed_interval: "1h", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5ad365ed9e1a3c26093a0f09666c133a.asciidoc b/docs/doc_examples/5ad365ed9e1a3c26093a0f09666c133a.asciidoc new file mode 100644 index 000000000..cd5c996c6 --- /dev/null +++ b/docs/doc_examples/5ad365ed9e1a3c26093a0f09666c133a.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "mapping5", + role_templates: [ + { + template: { + source: "{{#tojson}}groups{{/tojson}}", + }, + format: "json", + }, + ], + rules: { + field: { + "realm.name": "saml1", + }, + }, + enabled: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5afbd9caed88c32f8a2968c07054f096.asciidoc b/docs/doc_examples/5afbd9caed88c32f8a2968c07054f096.asciidoc new file mode 100644 index 000000000..9b7bf7d52 --- /dev/null +++ b/docs/doc_examples/5afbd9caed88c32f8a2968c07054f096.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.logstash.deletePipeline({ + id: "my_pipeline", +}); +console.log(response); +---- diff --git a/docs/doc_examples/5b0cc9e186a8f765a11141809b8b17b7.asciidoc b/docs/doc_examples/5b0cc9e186a8f765a11141809b8b17b7.asciidoc new file mode 100644 index 000000000..e110d8fa3 --- /dev/null +++ b/docs/doc_examples/5b0cc9e186a8f765a11141809b8b17b7.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.list({ + from: 0, + size: 3, + q: "app*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/5b191f2dbfa46c774cc9b9b9e8d1d831.asciidoc b/docs/doc_examples/5b191f2dbfa46c774cc9b9b9e8d1d831.asciidoc new file mode 100644 index 000000000..91150c314 --- /dev/null +++ b/docs/doc_examples/5b191f2dbfa46c774cc9b9b9e8d1d831.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getUserPrivileges(); +console.log(response); +---- diff --git a/docs/doc_examples/5b1ae98ad03e2819fc7c3468840ef448.asciidoc b/docs/doc_examples/5b1ae98ad03e2819fc7c3468840ef448.asciidoc new file mode 100644 index 000000000..4caea0a08 --- /dev/null +++ b/docs/doc_examples/5b1ae98ad03e2819fc7c3468840ef448.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-index*", + query: + "\n sample by host\n [any where uptime > 0]\n [any where port > 100]\n [any where bool == true]\n ", +}); +console.log(response); +---- diff --git a/docs/doc_examples/5b266deba5396c7810af1b8315c23596.asciidoc b/docs/doc_examples/5b266deba5396c7810af1b8315c23596.asciidoc new file mode 100644 index 000000000..76ad3f0dc --- /dev/null +++ b/docs/doc_examples/5b266deba5396c7810af1b8315c23596.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_locations", + size: 0, + aggs: { + grouped: { + geohash_grid: { + field: "location", + precision: 2, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5b281956e35a26e734c482b42b356c0d.asciidoc b/docs/doc_examples/5b281956e35a26e734c482b42b356c0d.asciidoc new file mode 100644 index 000000000..54aa6736a --- /dev/null +++ b/docs/doc_examples/5b281956e35a26e734c482b42b356c0d.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.existsAlias({ + name: "my-alias", +}); +console.log(response); +---- diff --git a/docs/doc_examples/5b2a13366bd4e1ab4b25d04d360570dc.asciidoc b/docs/doc_examples/5b2a13366bd4e1ab4b25d04d360570dc.asciidoc new file mode 100644 index 000000000..ffa909eaf --- /dev/null +++ b/docs/doc_examples/5b2a13366bd4e1ab4b25d04d360570dc.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putComponentTemplate({ + name: "template_1", + template: { + settings: { + number_of_shards: 1, + }, + }, + _meta: { + description: "set number of shards to one", + serialization: { + class: "MyComponentTemplate", + id: 10, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5b3384992c398ea8a3064d2e08725e2b.asciidoc b/docs/doc_examples/5b3384992c398ea8a3064d2e08725e2b.asciidoc new file mode 100644 index 000000000..b427412d8 --- /dev/null +++ b/docs/doc_examples/5b3384992c398ea8a3064d2e08725e2b.asciidoc @@ -0,0 +1,77 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "node", + mappings: { + properties: { + ip: { + type: "ip", + }, + date: { + type: "date", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "node", + refresh: "true", + operations: [ + { + index: {}, + }, + { + ip: "192.168.0.1", + date: "2020-01-01T01:01:01", + m: 1, + }, + { + index: {}, + }, + { + ip: "192.168.0.1", + date: "2020-01-01T02:01:01", + m: 2, + }, + { + index: {}, + }, + { + ip: "192.168.0.2", + date: "2020-01-01T02:01:01", + m: 3, + }, + ], +}); +console.log(response1); + +const response2 = await client.search({ + index: "node", + filter_path: "aggregations", + aggs: { + ip: { + terms: { + field: "ip", + }, + aggs: { + tm: { + top_metrics: { + metrics: { + field: "m", + }, + sort: { + date: "desc", + }, + }, + }, + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/5b58007f10700ec7934580f034404652.asciidoc b/docs/doc_examples/5b58007f10700ec7934580f034404652.asciidoc new file mode 100644 index 000000000..0385b42c4 --- /dev/null +++ b/docs/doc_examples/5b58007f10700ec7934580f034404652.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.create({ + index: "my-index-000001", + id: 1, + document: { + "@timestamp": "2099-11-15T13:12:00", + message: "GET /search HTTP/1.1 200 1070000", + user: { + id: "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5b6bc085943e9189236d98b3c05ed62c.asciidoc b/docs/doc_examples/5b6bc085943e9189236d98b3c05ed62c.asciidoc new file mode 100644 index 000000000..3cb27b100 --- /dev/null +++ b/docs/doc_examples/5b6bc085943e9189236d98b3c05ed62c.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + hot: { + actions: { + rollover: { + max_primary_shard_size: "25GB", + }, + }, + }, + delete: { + min_age: "30d", + actions: { + delete: {}, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5b7d6f1db88ca6f42c48fa3dbb4341e8.asciidoc b/docs/doc_examples/5b7d6f1db88ca6f42c48fa3dbb4341e8.asciidoc new file mode 100644 index 000000000..8a1350c9b --- /dev/null +++ b/docs/doc_examples/5b7d6f1db88ca6f42c48fa3dbb4341e8.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getMapping({ + index: "*", +}); +console.log(response); + +const response1 = await client.indices.getMapping({ + index: "_all", +}); +console.log(response1); + +const response2 = await client.indices.getMapping(); +console.log(response2); +---- diff --git a/docs/doc_examples/5b8119b4d9a09f4643be5a5b40875c8f.asciidoc b/docs/doc_examples/5b8119b4d9a09f4643be5a5b40875c8f.asciidoc new file mode 100644 index 000000000..f7dbd3f5e --- /dev/null +++ b/docs/doc_examples/5b8119b4d9a09f4643be5a5b40875c8f.asciidoc @@ -0,0 +1,49 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + refresh: "true", + document: { + is_published: true, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 2, + refresh: "true", + document: { + is_published: false, + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + aggs: { + publish_state: { + terms: { + field: "is_published", + }, + }, + }, + sort: ["is_published"], + fields: [ + { + field: "weight", + }, + ], + runtime_mappings: { + weight: { + type: "long", + script: "emit(doc['is_published'].value ? 10 : 0)", + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/5bb0d84185df2f276f01bb2fba709e1a.asciidoc b/docs/doc_examples/5bb0d84185df2f276f01bb2fba709e1a.asciidoc new file mode 100644 index 000000000..8e695a1e8 --- /dev/null +++ b/docs/doc_examples/5bb0d84185df2f276f01bb2fba709e1a.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "cluster_one:my-data-stream,cluster_two:my-data-stream", + query: '\n process where process.name == "regsvr32.exe"\n ', +}); +console.log(response); +---- diff --git a/docs/doc_examples/5bba213a7f543190139d1a69ab2ed076.asciidoc b/docs/doc_examples/5bba213a7f543190139d1a69ab2ed076.asciidoc new file mode 100644 index 000000000..91478f094 --- /dev/null +++ b/docs/doc_examples/5bba213a7f543190139d1a69ab2ed076.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.esql.asyncQuery({ + format: "json", + query: + "\n FROM cluster_one:my-index*,cluster_two:logs*\n | STATS COUNT(http.response.status_code) BY user.id\n | LIMIT 2\n ", + include_ccs_metadata: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5bbccf103107e505c17ae59863753efd.asciidoc b/docs/doc_examples/5bbccf103107e505c17ae59863753efd.asciidoc new file mode 100644 index 000000000..160980a8a --- /dev/null +++ b/docs/doc_examples/5bbccf103107e505c17ae59863753efd.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.getInfluencers({ + job_id: "high_sum_total_sales", + sort: "influencer_score", + desc: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5be23858b35043fcb7b50fe36b873e6e.asciidoc b/docs/doc_examples/5be23858b35043fcb7b50fe36b873e6e.asciidoc deleted file mode 100644 index 9fc13cadc..000000000 --- a/docs/doc_examples/5be23858b35043fcb7b50fe36b873e6e.asciidoc +++ /dev/null @@ -1,18 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.putMapping({ - index: 'twitter', - body: { - properties: { - email: { - type: 'keyword' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/5c187ba92dd1678fda86b5eec8cc7421.asciidoc b/docs/doc_examples/5c187ba92dd1678fda86b5eec8cc7421.asciidoc new file mode 100644 index 000000000..c077c6112 --- /dev/null +++ b/docs/doc_examples/5c187ba92dd1678fda86b5eec8cc7421.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + bool: { + filter: { + script: { + script: + "\n double amount = doc['amount'].value;\n if (doc['type'].value == 'expense') {\n amount *= -1;\n }\n return amount < 10;\n ", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5c22172a944864a7d138decdc08558b4.asciidoc b/docs/doc_examples/5c22172a944864a7d138decdc08558b4.asciidoc new file mode 100644 index 000000000..8c0f5c121 --- /dev/null +++ b/docs/doc_examples/5c22172a944864a7d138decdc08558b4.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.indices({ + index: "my-data-stream", + v: "true", + h: "health,status,index,docs.count", +}); +console.log(response); +---- diff --git a/docs/doc_examples/5c249eaeb99e6aee07162128288ac1b1.asciidoc b/docs/doc_examples/5c249eaeb99e6aee07162128288ac1b1.asciidoc new file mode 100644 index 000000000..a1c117415 --- /dev/null +++ b/docs/doc_examples/5c249eaeb99e6aee07162128288ac1b1.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_date_histo: { + date_histogram: { + field: "date", + calendar_interval: "1M", + }, + aggs: { + the_percentile: { + percentiles: { + field: "price", + percents: [1, 99], + }, + }, + the_movperc: { + moving_percentiles: { + buckets_path: "the_percentile", + window: 10, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5c24a9a0ddbfa50628dacdb9d25f7ab0.asciidoc b/docs/doc_examples/5c24a9a0ddbfa50628dacdb9d25f7ab0.asciidoc new file mode 100644 index 000000000..40a5a3b59 --- /dev/null +++ b/docs/doc_examples/5c24a9a0ddbfa50628dacdb9d25f7ab0.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "exams", + size: 0, + aggs: { + grades_stats: { + extended_stats: { + field: "grade", + missing: 0, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5c2f486c27bd5346e512265f93375d16.asciidoc b/docs/doc_examples/5c2f486c27bd5346e512265f93375d16.asciidoc index 8455b50c9..cad6dce99 100644 --- a/docs/doc_examples/5c2f486c27bd5346e512265f93375d16.asciidoc +++ b/docs/doc_examples/5c2f486c27bd5346e512265f93375d16.asciidoc @@ -4,18 +4,15 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - range: { - timestamp: { - time_zone: '+01:00', - gte: '2020-01-01T00:00:00', - lte: 'now' - } - } - } - } -}) -console.log(response) + query: { + range: { + timestamp: { + time_zone: "+01:00", + gte: "2020-01-01T00:00:00", + lte: "now", + }, + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/5c6fbeac20dc23b613847f35d431ecab.asciidoc b/docs/doc_examples/5c6fbeac20dc23b613847f35d431ecab.asciidoc new file mode 100644 index 000000000..3d20e9ceb --- /dev/null +++ b/docs/doc_examples/5c6fbeac20dc23b613847f35d431ecab.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + function_score: { + functions: [ + { + gauss: { + price: { + origin: "0", + scale: "20", + }, + }, + }, + { + gauss: { + location: { + origin: "11, 12", + scale: "2km", + }, + }, + }, + ], + query: { + match: { + properties: "balcony", + }, + }, + score_mode: "multiply", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5c7ece1f30267adabdb832424871900a.asciidoc b/docs/doc_examples/5c7ece1f30267adabdb832424871900a.asciidoc new file mode 100644 index 000000000..68956e123 --- /dev/null +++ b/docs/doc_examples/5c7ece1f30267adabdb832424871900a.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.allocation({ + v: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/5c8ac24dd56e85d8f3f6705ec3c6dc32.asciidoc b/docs/doc_examples/5c8ac24dd56e85d8f3f6705ec3c6dc32.asciidoc new file mode 100644 index 000000000..ce39cc7bb --- /dev/null +++ b/docs/doc_examples/5c8ac24dd56e85d8f3f6705ec3c6dc32.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "circles", + mappings: { + properties: { + circle: { + type: "geo_shape", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.ingest.putPipeline({ + id: "polygonize_circles", + description: "translate circle to polygon", + processors: [ + { + circle: { + field: "circle", + error_distance: 28, + shape_type: "geo_shape", + }, + }, + ], +}); +console.log(response1); +---- diff --git a/docs/doc_examples/5ccfd9f4698dcd7cdfbc6bad60081aab.asciidoc b/docs/doc_examples/5ccfd9f4698dcd7cdfbc6bad60081aab.asciidoc new file mode 100644 index 000000000..cb0182201 --- /dev/null +++ b/docs/doc_examples/5ccfd9f4698dcd7cdfbc6bad60081aab.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.getDataFrameAnalytics({ + id: "loganalytics", +}); +console.log(response); +---- diff --git a/docs/doc_examples/5cd792dff7d5891c33bef098d9338ce1.asciidoc b/docs/doc_examples/5cd792dff7d5891c33bef098d9338ce1.asciidoc new file mode 100644 index 000000000..8570b8d2d --- /dev/null +++ b/docs/doc_examples/5cd792dff7d5891c33bef098d9338ce1.asciidoc @@ -0,0 +1,42 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + title: { + type: "text", + store: true, + }, + date: { + type: "date", + store: true, + }, + content: { + type: "text", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + title: "Some short title", + date: "2015-01-01", + content: "A very long content field...", + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + stored_fields: ["title", "date"], +}); +console.log(response2); +---- diff --git a/docs/doc_examples/5ceb734e3affe00e2cdc29af748d95bf.asciidoc b/docs/doc_examples/5ceb734e3affe00e2cdc29af748d95bf.asciidoc new file mode 100644 index 000000000..e2a1539c1 --- /dev/null +++ b/docs/doc_examples/5ceb734e3affe00e2cdc29af748d95bf.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "sparse_embedding", + inference_id: "small_chunk_size", + inference_config: { + service: "elasticsearch", + service_settings: { + num_allocations: 1, + num_threads: 1, + }, + chunking_settings: { + strategy: "sentence", + max_chunk_size: 100, + sentence_overlap: 0, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5cf12cc4f98d98dc79bead7e6556679c.asciidoc b/docs/doc_examples/5cf12cc4f98d98dc79bead7e6556679c.asciidoc new file mode 100644 index 000000000..423a609ea --- /dev/null +++ b/docs/doc_examples/5cf12cc4f98d98dc79bead7e6556679c.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5cfab507e50d8c5182939412a9dbcdc8.asciidoc b/docs/doc_examples/5cfab507e50d8c5182939412a9dbcdc8.asciidoc new file mode 100644 index 000000000..44f1922f2 --- /dev/null +++ b/docs/doc_examples/5cfab507e50d8c5182939412a9dbcdc8.asciidoc @@ -0,0 +1,67 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "places", + mappings: { + properties: { + geometry: { + type: "geo_shape", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "places", + refresh: "true", + operations: [ + { + index: { + _id: 1, + }, + }, + { + name: "NEMO Science Museum", + geometry: "POINT(4.912350 52.374081)", + }, + { + index: { + _id: 2, + }, + }, + { + name: "Sportpark De Weeren", + geometry: { + type: "Polygon", + coordinates: [ + [ + [4.965305328369141, 52.39347642069457], + [4.966979026794433, 52.391721758934835], + [4.969425201416015, 52.39238958618537], + [4.967944622039794, 52.39420969150824], + [4.965305328369141, 52.39347642069457], + ], + ], + }, + }, + ], +}); +console.log(response1); + +const response2 = await client.search({ + index: "places", + size: 0, + aggs: { + centroid: { + geo_centroid: { + field: "geometry", + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/5d03bb385904d20c5323885706738459.asciidoc b/docs/doc_examples/5d03bb385904d20c5323885706738459.asciidoc new file mode 100644 index 000000000..f2bc5d0b8 --- /dev/null +++ b/docs/doc_examples/5d03bb385904d20c5323885706738459.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.updateAliases({ + actions: [ + { + add: { + index: "my-data-stream", + alias: "my-alias", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/5d3ee81bcf6ad57f39052c9065963cc3.asciidoc b/docs/doc_examples/5d3ee81bcf6ad57f39052c9065963cc3.asciidoc new file mode 100644 index 000000000..6079dac32 --- /dev/null +++ b/docs/doc_examples/5d3ee81bcf6ad57f39052c9065963cc3.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "test_index", + mappings: { + dynamic: "strict", + properties: { + description: { + properties: { + notes: { + type: "text", + copy_to: ["description.notes_raw"], + analyzer: "standard", + search_analyzer: "standard", + }, + notes_raw: { + type: "keyword", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5d428ea66252fd252b6a8d6f47605c86.asciidoc b/docs/doc_examples/5d428ea66252fd252b6a8d6f47605c86.asciidoc new file mode 100644 index 000000000..29e5b0bf4 --- /dev/null +++ b/docs/doc_examples/5d428ea66252fd252b6a8d6f47605c86.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "cjk_bigram_example", + settings: { + analysis: { + analyzer: { + han_bigrams: { + tokenizer: "standard", + filter: ["han_bigrams_filter"], + }, + }, + filter: { + han_bigrams_filter: { + type: "cjk_bigram", + ignored_scripts: ["hangul", "hiragana", "katakana"], + output_unigrams: true, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5d5b06468c54308f52c212cca5d58fef.asciidoc b/docs/doc_examples/5d5b06468c54308f52c212cca5d58fef.asciidoc new file mode 100644 index 000000000..5c8ee2a05 --- /dev/null +++ b/docs/doc_examples/5d5b06468c54308f52c212cca5d58fef.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.query({ + format: "json", + cursor: + "sDXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAAEWWWdrRlVfSS1TbDYtcW9lc1FJNmlYdw==:BAFmBmF1dGhvcgFmBG5hbWUBZgpwYWdlX2NvdW50AWYMcmVsZWFzZV9kYXRl+v///w8=", + columnar: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5d5cdbd4c5c62a90ff2a39cba4a59368.asciidoc b/docs/doc_examples/5d5cdbd4c5c62a90ff2a39cba4a59368.asciidoc new file mode 100644 index 000000000..5b625185e --- /dev/null +++ b/docs/doc_examples/5d5cdbd4c5c62a90ff2a39cba4a59368.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.search({ + name: "my_search_application", + params: { + elser: true, + text: true, + query_string: "where is the best mountain climbing?", + elser_fields: [ + { + name: "title", + boost: 1, + }, + { + name: "description", + boost: 1, + }, + ], + text_query_boost: 4, + min_score: 10, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5d689d74062cddd01a0711a2fa7f23fd.asciidoc b/docs/doc_examples/5d689d74062cddd01a0711a2fa7f23fd.asciidoc new file mode 100644 index 000000000..ca39dd250 --- /dev/null +++ b/docs/doc_examples/5d689d74062cddd01a0711a2fa7f23fd.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "logger.org.elasticsearch.transport.TransportService.tracer": "TRACE", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5d7980d8c745abf7ea0fa573e818bd5b.asciidoc b/docs/doc_examples/5d7980d8c745abf7ea0fa573e818bd5b.asciidoc new file mode 100644 index 000000000..5d0a781cd --- /dev/null +++ b/docs/doc_examples/5d7980d8c745abf7ea0fa573e818bd5b.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + en: { + tokenizer: "standard", + filter: ["my_shingle_filter"], + }, + }, + filter: { + my_shingle_filter: { + type: "shingle", + min_shingle_size: 2, + max_shingle_size: 5, + output_unigrams: false, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5d9d7b84e2fec7ecd832145cbb951cf1.asciidoc b/docs/doc_examples/5d9d7b84e2fec7ecd832145cbb951cf1.asciidoc index 78aaba5e1..3f404df64 100644 --- a/docs/doc_examples/5d9d7b84e2fec7ecd832145cbb951cf1.asciidoc +++ b/docs/doc_examples/5d9d7b84e2fec7ecd832145cbb951cf1.asciidoc @@ -4,32 +4,29 @@ [source, js] ---- const response = await client.search({ - body: { - size: 0, - aggs: { - expired_sessions: { - terms: { - field: 'account_id', - include: { - partition: 0, - num_partitions: 20 + size: 0, + aggs: { + expired_sessions: { + terms: { + field: "account_id", + include: { + partition: 0, + num_partitions: 20, + }, + size: 10000, + order: { + last_access: "asc", + }, + }, + aggs: { + last_access: { + max: { + field: "access_date", }, - size: 10000, - order: { - last_access: 'asc' - } }, - aggs: { - last_access: { - max: { - field: 'access_date' - } - } - } - } - } - } -}) -console.log(response) + }, + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/5da6efd5b038ada64c9e853c88c1ec47.asciidoc b/docs/doc_examples/5da6efd5b038ada64c9e853c88c1ec47.asciidoc index 6157dc7f0..e82ef5dc8 100644 --- a/docs/doc_examples/5da6efd5b038ada64c9e853c88c1ec47.asciidoc +++ b/docs/doc_examples/5da6efd5b038ada64c9e853c88c1ec47.asciidoc @@ -4,20 +4,14 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - multi_match: { - query: 'brown fox', - type: 'best_fields', - fields: [ - 'subject', - 'message' - ], - tie_breaker: 0.3 - } - } - } -}) -console.log(response) + query: { + multi_match: { + query: "brown fox", + type: "best_fields", + fields: ["subject", "message"], + tie_breaker: 0.3, + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/5daf8ede198be9b118da5bee9896cb00.asciidoc b/docs/doc_examples/5daf8ede198be9b118da5bee9896cb00.asciidoc new file mode 100644 index 000000000..d554caaf0 --- /dev/null +++ b/docs/doc_examples/5daf8ede198be9b118da5bee9896cb00.asciidoc @@ -0,0 +1,47 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, + }, + }, + mappings: { + properties: { + flattened: { + type: "flattened", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "idx", + id: 1, + document: { + flattened: { + field: [ + "apple", + "apple", + "banana", + "avocado", + "10", + "200", + "AVOCADO", + "Banana", + "Tangerine", + ], + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/5dbf06ca9058843f572676fcaf587f75.asciidoc b/docs/doc_examples/5dbf06ca9058843f572676fcaf587f75.asciidoc new file mode 100644 index 000000000..0b9e73b16 --- /dev/null +++ b/docs/doc_examples/5dbf06ca9058843f572676fcaf587f75.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + prices: { + variable_width_histogram: { + field: "price", + buckets: 2, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5ddc26da6e163fda54f52d33b5157051.asciidoc b/docs/doc_examples/5ddc26da6e163fda54f52d33b5157051.asciidoc new file mode 100644 index 000000000..30bdd7f18 --- /dev/null +++ b/docs/doc_examples/5ddc26da6e163fda54f52d33b5157051.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + query: { + sparse_vector: { + field: "my_tokens", + inference_id: "my-elser-endpoint", + query: "the query string", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5deeed427f35cbaee4b8ddc45002a9d7.asciidoc b/docs/doc_examples/5deeed427f35cbaee4b8ddc45002a9d7.asciidoc new file mode 100644 index 000000000..e5c8b337a --- /dev/null +++ b/docs/doc_examples/5deeed427f35cbaee4b8ddc45002a9d7.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.bulkDeleteRole({ + names: ["my_admin_role", "not_an_existing_role"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/5df3226fdc8f1f66ae92ba2f527af8c0.asciidoc b/docs/doc_examples/5df3226fdc8f1f66ae92ba2f527af8c0.asciidoc new file mode 100644 index 000000000..35334cd9c --- /dev/null +++ b/docs/doc_examples/5df3226fdc8f1f66ae92ba2f527af8c0.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + document: { + my_field: 5, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5dfb23f6e36ef484f1d3271bae76a8d1.asciidoc b/docs/doc_examples/5dfb23f6e36ef484f1d3271bae76a8d1.asciidoc new file mode 100644 index 000000000..db5e254b7 --- /dev/null +++ b/docs/doc_examples/5dfb23f6e36ef484f1d3271bae76a8d1.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.recovery({ + human: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/5dfe24287bb930ad33345caf092a004b.asciidoc b/docs/doc_examples/5dfe24287bb930ad33345caf092a004b.asciidoc new file mode 100644 index 000000000..c35b3e1f3 --- /dev/null +++ b/docs/doc_examples/5dfe24287bb930ad33345caf092a004b.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + bool: { + must_not: { + exists: { + field: "user.id", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5e021307d331a4483a5aa2198168451b.asciidoc b/docs/doc_examples/5e021307d331a4483a5aa2198168451b.asciidoc new file mode 100644 index 000000000..329574d90 --- /dev/null +++ b/docs/doc_examples/5e021307d331a4483a5aa2198168451b.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "only_remote_access_role", + remote_indices: [ + { + clusters: ["my_remote"], + names: ["logs*"], + privileges: ["read", "read_cross_cluster", "view_index_metadata"], + }, + ], + remote_cluster: [ + { + clusters: ["my_remote"], + privileges: ["monitor_stats"], + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/5e099493f135ff7bd614e935c4f2bf5a.asciidoc b/docs/doc_examples/5e099493f135ff7bd614e935c4f2bf5a.asciidoc new file mode 100644 index 000000000..217e7631a --- /dev/null +++ b/docs/doc_examples/5e099493f135ff7bd614e935c4f2bf5a.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + request_cache: "true", + size: 0, + aggs: { + popular_colors: { + terms: { + field: "colors", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5e124875d97c27362ae858160ae1c6d5.asciidoc b/docs/doc_examples/5e124875d97c27362ae858160ae1c6d5.asciidoc new file mode 100644 index 000000000..4f049addd --- /dev/null +++ b/docs/doc_examples/5e124875d97c27362ae858160ae1c6d5.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.getAutoFollowPattern(); +console.log(response); +---- diff --git a/docs/doc_examples/5e21dbac92f34d236a8f0cc0d3a39cdd.asciidoc b/docs/doc_examples/5e21dbac92f34d236a8f0cc0d3a39cdd.asciidoc new file mode 100644 index 000000000..cee356963 --- /dev/null +++ b/docs/doc_examples/5e21dbac92f34d236a8f0cc0d3a39cdd.asciidoc @@ -0,0 +1,42 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "jwt1_users", + refresh: "true", + roles: ["user"], + rules: { + all: [ + { + field: { + "realm.name": "jwt1", + }, + }, + { + field: { + username: "principalname1", + }, + }, + { + field: { + dn: "CN=Principal Name 1,DC=example.com", + }, + }, + { + field: { + groups: "group1", + }, + }, + { + field: { + "metadata.jwt_claim_other": "other1", + }, + }, + ], + }, + enabled: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5e2f7097eb299de553d0fa0087d70a59.asciidoc b/docs/doc_examples/5e2f7097eb299de553d0fa0087d70a59.asciidoc new file mode 100644 index 000000000..0f146561b --- /dev/null +++ b/docs/doc_examples/5e2f7097eb299de553d0fa0087d70a59.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + index: { + "sort.field": ["username", "timestamp"], + "sort.order": ["asc", "desc"], + }, + }, + mappings: { + properties: { + username: { + type: "keyword", + doc_values: true, + }, + timestamp: { + type: "date", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5e3673bcbef5731746e400c4f3fe134d.asciidoc b/docs/doc_examples/5e3673bcbef5731746e400c4f3fe134d.asciidoc new file mode 100644 index 000000000..b68e17a38 --- /dev/null +++ b/docs/doc_examples/5e3673bcbef5731746e400c4f3fe134d.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "test", + id: 1, + document: { + location: [ + { + coordinates: [46.25, 20.14], + type: "point", + }, + { + coordinates: [47.49, 19.04], + type: "point", + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5e415c490a46358643ee2aab554b4876.asciidoc b/docs/doc_examples/5e415c490a46358643ee2aab554b4876.asciidoc new file mode 100644 index 000000000..9b9aeab08 --- /dev/null +++ b/docs/doc_examples/5e415c490a46358643ee2aab554b4876.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.allocationExplain({ + filter_path: + "index,node_allocation_decisions.node_name,node_allocation_decisions.deciders.*", + index: "my-index", + shard: 0, + primary: false, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5e47a407b6ca29dadf6eac5ab1d71163.asciidoc b/docs/doc_examples/5e47a407b6ca29dadf6eac5ab1d71163.asciidoc new file mode 100644 index 000000000..403e117a9 --- /dev/null +++ b/docs/doc_examples/5e47a407b6ca29dadf6eac5ab1d71163.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + bool: { + must: { + match_all: {}, + }, + filter: { + geo_polygon: { + "person.location": { + points: [ + { + lat: 40, + lon: -70, + }, + { + lat: 30, + lon: -80, + }, + { + lat: 20, + lon: -90, + }, + ], + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5e6419bc3e2db0d0f05bce58d8cc9215.asciidoc b/docs/doc_examples/5e6419bc3e2db0d0f05bce58d8cc9215.asciidoc new file mode 100644 index 000000000..2527dc592 --- /dev/null +++ b/docs/doc_examples/5e6419bc3e2db0d0f05bce58d8cc9215.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "my-pipeline", + processors: [ + { + rename: { + description: "Rename 'provider' to 'cloud.provider'", + field: "provider", + target_field: "cloud.provider", + on_failure: [ + { + set: { + description: "Set 'error.message'", + field: "error.message", + value: + "Field 'provider' does not exist. Cannot rename to 'cloud.provider'", + override: false, + }, + }, + ], + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/5e87dd38ac3a0fd59ad794005b16d13e.asciidoc b/docs/doc_examples/5e87dd38ac3a0fd59ad794005b16d13e.asciidoc new file mode 100644 index 000000000..5f9ace6d7 --- /dev/null +++ b/docs/doc_examples/5e87dd38ac3a0fd59ad794005b16d13e.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.slm.getLifecycle({ + policy_id: "nightly-snapshots", +}); +console.log(response); +---- diff --git a/docs/doc_examples/5e9a7845e60b79685aab59877c5fbd1a.asciidoc b/docs/doc_examples/5e9a7845e60b79685aab59877c5fbd1a.asciidoc new file mode 100644 index 000000000..2639ad24b --- /dev/null +++ b/docs/doc_examples/5e9a7845e60b79685aab59877c5fbd1a.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + ignored_fields: { + terms: { + field: "_ignored", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5ea9da129ca70a5fe534f27a82d80b29.asciidoc b/docs/doc_examples/5ea9da129ca70a5fe534f27a82d80b29.asciidoc new file mode 100644 index 000000000..edead60ff --- /dev/null +++ b/docs/doc_examples/5ea9da129ca70a5fe534f27a82d80b29.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "example", + mappings: { + properties: { + comment: { + type: "text", + index_options: "offsets", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5eabcdbf61bfcb484dc694f25c2bba36.asciidoc b/docs/doc_examples/5eabcdbf61bfcb484dc694f25c2bba36.asciidoc deleted file mode 100644 index cbd1a9889..000000000 --- a/docs/doc_examples/5eabcdbf61bfcb484dc694f25c2bba36.asciidoc +++ /dev/null @@ -1,18 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.index({ - index: 'twitter', - id: '1', - body: { - counter: 1, - tags: [ - 'red' - ] - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/5f031b7bd2b7d98d2d10df7420d269ff.asciidoc b/docs/doc_examples/5f031b7bd2b7d98d2d10df7420d269ff.asciidoc new file mode 100644 index 000000000..e364f163d --- /dev/null +++ b/docs/doc_examples/5f031b7bd2b7d98d2d10df7420d269ff.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.resolveIndex({ + name: "new-data-stream*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/5f16358ebb5d14b86f57612d5f92d923.asciidoc b/docs/doc_examples/5f16358ebb5d14b86f57612d5f92d923.asciidoc new file mode 100644 index 000000000..454dd9502 --- /dev/null +++ b/docs/doc_examples/5f16358ebb5d14b86f57612d5f92d923.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + inference_field: { + type: "semantic_text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5f1ed9cfdc149763b444acfbe10b0e16.asciidoc b/docs/doc_examples/5f1ed9cfdc149763b444acfbe10b0e16.asciidoc new file mode 100644 index 000000000..34cc9739c --- /dev/null +++ b/docs/doc_examples/5f1ed9cfdc149763b444acfbe10b0e16.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + user_id: { + type: "keyword", + ignore_above: 20, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5f210f74725ea0c9265190346edfa246.asciidoc b/docs/doc_examples/5f210f74725ea0c9265190346edfa246.asciidoc deleted file mode 100644 index 9f05c2947..000000000 --- a/docs/doc_examples/5f210f74725ea0c9265190346edfa246.asciidoc +++ /dev/null @@ -1,23 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.updateAliases({ - body: { - actions: [ - { - add: { - indices: [ - 'test1', - 'test2' - ], - alias: 'alias1' - } - } - ] - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/5f3373887e8d3dc31239b687a5151449.asciidoc b/docs/doc_examples/5f3373887e8d3dc31239b687a5151449.asciidoc new file mode 100644 index 000000000..92ac2ee4b --- /dev/null +++ b/docs/doc_examples/5f3373887e8d3dc31239b687a5151449.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + number_one: { + type: "integer", + }, + number_two: { + type: "integer", + coerce: false, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + number_one: "10", + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "my-index-000001", + id: 2, + document: { + number_two: "10", + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/5f3549ac7fee94682ca0d7439eebdd2a.asciidoc b/docs/doc_examples/5f3549ac7fee94682ca0d7439eebdd2a.asciidoc index 5a623c1ef..64e8fd61e 100644 --- a/docs/doc_examples/5f3549ac7fee94682ca0d7439eebdd2a.asciidoc +++ b/docs/doc_examples/5f3549ac7fee94682ca0d7439eebdd2a.asciidoc @@ -4,17 +4,14 @@ [source, js] ---- const response = await client.search({ - index: 'index_long,index_double', - body: { - sort: [ - { - field: { - numeric_type: 'date_nanos' - } - } - ] - } -}) -console.log(response) + index: "index_long,index_double", + sort: [ + { + field: { + numeric_type: "date_nanos", + }, + }, + ], +}); +console.log(response); ---- - diff --git a/docs/doc_examples/5f3a3eefeefe6fa85ec49d499212d245.asciidoc b/docs/doc_examples/5f3a3eefeefe6fa85ec49d499212d245.asciidoc deleted file mode 100644 index 2e6c1e650..000000000 --- a/docs/doc_examples/5f3a3eefeefe6fa85ec49d499212d245.asciidoc +++ /dev/null @@ -1,23 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.putMapping({ - index: 'my_index', - body: { - properties: { - city: { - type: 'text', - fields: { - raw: { - type: 'keyword' - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/5f72ab800c3db9d118df95e2a378d411.asciidoc b/docs/doc_examples/5f72ab800c3db9d118df95e2a378d411.asciidoc new file mode 100644 index 000000000..caf925227 --- /dev/null +++ b/docs/doc_examples/5f72ab800c3db9d118df95e2a378d411.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.get({ + index: ".ds-my-data-stream-2099.03.09-000003", + id: 2, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5f7b59d4fad0bdce6b09abb520ddb51d.asciidoc b/docs/doc_examples/5f7b59d4fad0bdce6b09abb520ddb51d.asciidoc new file mode 100644 index 000000000..997f41a37 --- /dev/null +++ b/docs/doc_examples/5f7b59d4fad0bdce6b09abb520ddb51d.asciidoc @@ -0,0 +1,49 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-data-stream", + runtime_mappings: { + "source.ip": { + type: "ip", + script: + "\n String sourceip=grok('%{IPORHOST:sourceip} .*').extract(doc[ \"message\" ].value)?.sourceip;\n if (sourceip != null) emit(sourceip);\n ", + }, + }, + query: { + bool: { + filter: [ + { + range: { + "@timestamp": { + gte: "now-1d/d", + lt: "now/d", + }, + }, + }, + { + range: { + "source.ip": { + gte: "192.0.2.0", + lte: "192.0.2.255", + }, + }, + }, + ], + }, + }, + fields: ["*"], + _source: false, + sort: [ + { + "@timestamp": "desc", + }, + { + "source.ip": "desc", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/5f8acd1e367b048b5542dbc6079bcc88.asciidoc b/docs/doc_examples/5f8acd1e367b048b5542dbc6079bcc88.asciidoc new file mode 100644 index 000000000..c04d41ee5 --- /dev/null +++ b/docs/doc_examples/5f8acd1e367b048b5542dbc6079bcc88.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "hyphenation_decompound_example", + settings: { + analysis: { + analyzer: { + standard_hyphenation_decompound: { + tokenizer: "standard", + filter: ["22_char_hyphenation_decompound"], + }, + }, + filter: { + "22_char_hyphenation_decompound": { + type: "hyphenation_decompounder", + word_list_path: "analysis/example_word_list.txt", + hyphenation_patterns_path: "analysis/hyphenation_patterns.xml", + max_subword_size: 22, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5f8fb5513d4f725434db2f517ad4298f.asciidoc b/docs/doc_examples/5f8fb5513d4f725434db2f517ad4298f.asciidoc new file mode 100644 index 000000000..fb2dc80e0 --- /dev/null +++ b/docs/doc_examples/5f8fb5513d4f725434db2f517ad4298f.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "index", + settings: { + number_of_shards: 1, + similarity: { + scripted_tfidf: { + type: "scripted", + weight_script: { + source: + "double idf = Math.log((field.docCount+1.0)/(term.docFreq+1.0)) + 1.0; return query.boost * idf;", + }, + script: { + source: + "double tf = Math.sqrt(doc.freq); double norm = 1/Math.sqrt(doc.length); return weight * tf * norm;", + }, + }, + }, + }, + mappings: { + properties: { + field: { + type: "text", + similarity: "scripted_tfidf", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5faa121e00a0582160b2adb2b72fed67.asciidoc b/docs/doc_examples/5faa121e00a0582160b2adb2b72fed67.asciidoc new file mode 100644 index 000000000..6ef255e94 --- /dev/null +++ b/docs/doc_examples/5faa121e00a0582160b2adb2b72fed67.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getSettings({ + index: "log_2099_-*", + name: "index.number_*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/5fca6671bc8eaddc44ac488d1c3c6909.asciidoc b/docs/doc_examples/5fca6671bc8eaddc44ac488d1c3c6909.asciidoc new file mode 100644 index 000000000..d3f030697 --- /dev/null +++ b/docs/doc_examples/5fca6671bc8eaddc44ac488d1c3c6909.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.getCalendars({ + calendar_id: "planned-outages", +}); +console.log(response); +---- diff --git a/docs/doc_examples/5fd002a018c589eb73fadad25889dbe9.asciidoc b/docs/doc_examples/5fd002a018c589eb73fadad25889dbe9.asciidoc new file mode 100644 index 000000000..d3d537545 --- /dev/null +++ b/docs/doc_examples/5fd002a018c589eb73fadad25889dbe9.asciidoc @@ -0,0 +1,57 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_query_rules/my-ruleset", + body: { + rules: [ + { + rule_id: "rule1", + type: "pinned", + criteria: [ + { + type: "fuzzy", + metadata: "query_string", + values: ["puggles", "pugs"], + }, + { + type: "exact", + metadata: "user_country", + values: ["us"], + }, + ], + actions: { + ids: ["id1", "id2"], + }, + }, + { + rule_id: "rule2", + type: "exclude", + criteria: [ + { + type: "contains", + metadata: "query_string", + values: ["beagles"], + }, + ], + actions: { + docs: [ + { + _index: "my-index-000001", + _id: "id3", + }, + { + _index: "my-index-000002", + _id: "id4", + }, + ], + }, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/5fde0d78e9b2cc0519f8a63848ed344e.asciidoc b/docs/doc_examples/5fde0d78e9b2cc0519f8a63848ed344e.asciidoc new file mode 100644 index 000000000..f3f9d875c --- /dev/null +++ b/docs/doc_examples/5fde0d78e9b2cc0519f8a63848ed344e.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "GET", + path: "/_query_rules/my-ruleset", +}); +console.log(response); +---- diff --git a/docs/doc_examples/5ffe6fd303400e8678fa1ead291e237f.asciidoc b/docs/doc_examples/5ffe6fd303400e8678fa1ead291e237f.asciidoc new file mode 100644 index 000000000..064493f98 --- /dev/null +++ b/docs/doc_examples/5ffe6fd303400e8678fa1ead291e237f.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "latency", + size: 0, + aggs: { + load_time_outlier: { + percentiles: { + field: "load_time", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/600d33c80f8872dda85c87ed41da95fd.asciidoc b/docs/doc_examples/600d33c80f8872dda85c87ed41da95fd.asciidoc new file mode 100644 index 000000000..f2a3f5ba3 --- /dev/null +++ b/docs/doc_examples/600d33c80f8872dda85c87ed41da95fd.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "azure-ai-studio-embeddings", + knn: { + field: "content_embedding", + query_vector_builder: { + text_embedding: { + model_id: "azure_ai_studio_embeddings", + model_text: "Calculate fuel cost", + }, + }, + k: 10, + num_candidates: 100, + }, + _source: ["id", "content"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/6013ed65d2058da5ce704b47a504b60a.asciidoc b/docs/doc_examples/6013ed65d2058da5ce704b47a504b60a.asciidoc new file mode 100644 index 000000000..90a5764ce --- /dev/null +++ b/docs/doc_examples/6013ed65d2058da5ce704b47a504b60a.asciidoc @@ -0,0 +1,53 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "test", + refresh: "true", + operations: [ + { + index: {}, + }, + { + s: 1, + m: 3.1415, + }, + { + index: {}, + }, + { + s: 2, + m: 1, + }, + { + index: {}, + }, + { + s: 3, + m: 2.71828, + }, + ], +}); +console.log(response); + +const response1 = await client.search({ + index: "test", + filter_path: "aggregations", + aggs: { + tm: { + top_metrics: { + metrics: { + field: "m", + }, + sort: { + s: "desc", + }, + size: 3, + }, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/601ad3b0ceccb3fcd282e5ec36748954.asciidoc b/docs/doc_examples/601ad3b0ceccb3fcd282e5ec36748954.asciidoc new file mode 100644 index 000000000..a5afd1dfd --- /dev/null +++ b/docs/doc_examples/601ad3b0ceccb3fcd282e5ec36748954.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getServiceCredentials({ + namespace: "elastic", + service: "fleet-server", +}); +console.log(response); +---- diff --git a/docs/doc_examples/60299454aa19fec15a604a0dd06fe522.asciidoc b/docs/doc_examples/60299454aa19fec15a604a0dd06fe522.asciidoc new file mode 100644 index 000000000..471a73692 --- /dev/null +++ b/docs/doc_examples/60299454aa19fec15a604a0dd06fe522.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.getSettings({ + include_defaults: "true", + filter_path: "*.cluster.routing.allocation.disk.watermark.high*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/602e04051c092cf77de2f75a563661b8.asciidoc b/docs/doc_examples/602e04051c092cf77de2f75a563661b8.asciidoc new file mode 100644 index 000000000..20697b82b --- /dev/null +++ b/docs/doc_examples/602e04051c092cf77de2f75a563661b8.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.master({ + help: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/604da59fe41160efa10a846a9dacc07a.asciidoc b/docs/doc_examples/604da59fe41160efa10a846a9dacc07a.asciidoc new file mode 100644 index 000000000..162c16e15 --- /dev/null +++ b/docs/doc_examples/604da59fe41160efa10a846a9dacc07a.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.getStatus({ + id: "FkpMRkJGS1gzVDRlM3g4ZzMyRGlLbkEaTXlJZHdNT09TU2VTZVBoNDM3cFZMUToxMDM=", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6061aadb3b870791278212d1e8f52b39.asciidoc b/docs/doc_examples/6061aadb3b870791278212d1e8f52b39.asciidoc new file mode 100644 index 000000000..525e27c32 --- /dev/null +++ b/docs/doc_examples/6061aadb3b870791278212d1e8f52b39.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.getMemoryStats({ + human: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/608cadc6b8a3f194612b69279ccc96de.asciidoc b/docs/doc_examples/608cadc6b8a3f194612b69279ccc96de.asciidoc new file mode 100644 index 000000000..a3e925620 --- /dev/null +++ b/docs/doc_examples/608cadc6b8a3f194612b69279ccc96de.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.put({ + name: "my_search_application", + search_application: { + indices: ["index1"], + template: { + script: { + lang: "mustache", + source: + '\n {\n "query": {\n "script_score": {\n "query": {\n "bool": {\n "filter": {\n "range": {\n "{{field}}": {\n "{{operator}}": {{value}}\n }\n }\n }\n }\n },\n "script": {\n "source": "cosineSimilarity({{#toJson}}query_vector{{/toJson}}, \'{{dense_vector_field}}\') + 1.0"\n }\n }\n }\n }\n ', + params: { + field: "price", + operator: "gte", + value: 1000, + dense_vector_field: "product-vector", + query_vector: [], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/609260ad1d5998be2ca09ff1fe237efa.asciidoc b/docs/doc_examples/609260ad1d5998be2ca09ff1fe237efa.asciidoc deleted file mode 100644 index f3aa65766..000000000 --- a/docs/doc_examples/609260ad1d5998be2ca09ff1fe237efa.asciidoc +++ /dev/null @@ -1,11 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.getMapping({ - index: 'my-index' -}) -console.log(response) ----- - diff --git a/docs/doc_examples/6097ae69c64454a92a89ef01b994e9f9.asciidoc b/docs/doc_examples/6097ae69c64454a92a89ef01b994e9f9.asciidoc new file mode 100644 index 000000000..7dbdf3f08 --- /dev/null +++ b/docs/doc_examples/6097ae69c64454a92a89ef01b994e9f9.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.synonyms.putSynonymRule({ + set_id: "my-synonyms-set", + rule_id: "test-1", + synonyms: "hello => hi => howdy", +}); +console.log(response); +---- diff --git a/docs/doc_examples/60a9aa5dcde9023901f6ff27231a10c4.asciidoc b/docs/doc_examples/60a9aa5dcde9023901f6ff27231a10c4.asciidoc new file mode 100644 index 000000000..1b1a9227e --- /dev/null +++ b/docs/doc_examples/60a9aa5dcde9023901f6ff27231a10c4.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "news", + query: { + match: { + content: "madrid", + }, + }, + aggs: { + tags: { + significant_text: { + field: "content", + background_filter: { + term: { + content: "spain", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/60b0fc1b6ae418621ff1b31591fa1fce.asciidoc b/docs/doc_examples/60b0fc1b6ae418621ff1b31591fa1fce.asciidoc new file mode 100644 index 000000000..fbf950f93 --- /dev/null +++ b/docs/doc_examples/60b0fc1b6ae418621ff1b31591fa1fce.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.deleteWatch({ + id: "cluster_health_watch", +}); +console.log(response); +---- diff --git a/docs/doc_examples/60cab62af1540db2ad3b696b0ee1d7a8.asciidoc b/docs/doc_examples/60cab62af1540db2ad3b696b0ee1d7a8.asciidoc new file mode 100644 index 000000000..6f4065839 --- /dev/null +++ b/docs/doc_examples/60cab62af1540db2ad3b696b0ee1d7a8.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "queries", + query: { + percolate: { + field: "query", + document: { + body: "fox jumps over the lazy dog", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/60d3f9a99cc91b43aaa7524a9a74dba0.asciidoc b/docs/doc_examples/60d3f9a99cc91b43aaa7524a9a74dba0.asciidoc new file mode 100644 index 000000000..627606897 --- /dev/null +++ b/docs/doc_examples/60d3f9a99cc91b43aaa7524a9a74dba0.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.stats({ + metric: "breaker", +}); +console.log(response); +---- diff --git a/docs/doc_examples/60ee33f3acfdd0fe6f288ac77312c780.asciidoc b/docs/doc_examples/60ee33f3acfdd0fe6f288ac77312c780.asciidoc deleted file mode 100644 index 0bb1ce32a..000000000 --- a/docs/doc_examples/60ee33f3acfdd0fe6f288ac77312c780.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - query_string: { - fields: [ - 'title' - ], - query: 'this that thus', - minimum_should_match: 2 - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/60f889fbed5df3185444f7015b48ed76.asciidoc b/docs/doc_examples/60f889fbed5df3185444f7015b48ed76.asciidoc new file mode 100644 index 000000000..5f4a5a357 --- /dev/null +++ b/docs/doc_examples/60f889fbed5df3185444f7015b48ed76.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/610f629d0486a64546d62402a0a5e00f.asciidoc b/docs/doc_examples/610f629d0486a64546d62402a0a5e00f.asciidoc new file mode 100644 index 000000000..d728d2ac9 --- /dev/null +++ b/docs/doc_examples/610f629d0486a64546d62402a0a5e00f.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + query_string: { + query: "kimchy\\!", + fields: ["user.id"], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/612c2e975f833de9815651135735eae5.asciidoc b/docs/doc_examples/612c2e975f833de9815651135735eae5.asciidoc new file mode 100644 index 000000000..0dc50cad8 --- /dev/null +++ b/docs/doc_examples/612c2e975f833de9815651135735eae5.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.tasks.cancel({ + nodes: "nodeId1,nodeId2", + actions: "*reindex", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6138d6919f3cbaaf61e1092f817d295c.asciidoc b/docs/doc_examples/6138d6919f3cbaaf61e1092f817d295c.asciidoc deleted file mode 100644 index 10fbf5e19..000000000 --- a/docs/doc_examples/6138d6919f3cbaaf61e1092f817d295c.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - match: { - message: { - query: 'this is a test', - operator: 'and' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/615dc36f0978c676624fb7d1144b4899.asciidoc b/docs/doc_examples/615dc36f0978c676624fb7d1144b4899.asciidoc new file mode 100644 index 000000000..a1c564297 --- /dev/null +++ b/docs/doc_examples/615dc36f0978c676624fb7d1144b4899.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getDataLifecycleStats({ + human: "true", + pretty: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/618c9d42284c067891fb57034a4fd834.asciidoc b/docs/doc_examples/618c9d42284c067891fb57034a4fd834.asciidoc new file mode 100644 index 000000000..59be85f0f --- /dev/null +++ b/docs/doc_examples/618c9d42284c067891fb57034a4fd834.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.rollup.startJob({ + id: "sensor", +}); +console.log(response); +---- diff --git a/docs/doc_examples/618d5f3d35921d8cb7e9ccfbe9a4c3e3.asciidoc b/docs/doc_examples/618d5f3d35921d8cb7e9ccfbe9a4c3e3.asciidoc deleted file mode 100644 index 988069e6d..000000000 --- a/docs/doc_examples/618d5f3d35921d8cb7e9ccfbe9a4c3e3.asciidoc +++ /dev/null @@ -1,22 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - regexp: { - user: { - value: 'k.*y', - flags: 'ALL', - max_determinized_states: 10000, - rewrite: 'constant_score' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/61bf6ac15ae3e22323454a9a2872a2fa.asciidoc b/docs/doc_examples/61bf6ac15ae3e22323454a9a2872a2fa.asciidoc new file mode 100644 index 000000000..7dd5f988e --- /dev/null +++ b/docs/doc_examples/61bf6ac15ae3e22323454a9a2872a2fa.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + type_count: { + cardinality: { + field: "type", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/61c49cee90c6aa0eafbdd5cc03936e7d.asciidoc b/docs/doc_examples/61c49cee90c6aa0eafbdd5cc03936e7d.asciidoc new file mode 100644 index 000000000..f7b40662e --- /dev/null +++ b/docs/doc_examples/61c49cee90c6aa0eafbdd5cc03936e7d.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "data", + id: 1, + document: { + count: 5, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/61d6b9503459914c436930c3ae87d454.asciidoc b/docs/doc_examples/61d6b9503459914c436930c3ae87d454.asciidoc new file mode 100644 index 000000000..f6356f14a --- /dev/null +++ b/docs/doc_examples/61d6b9503459914c436930c3ae87d454.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "GET", + path: "/_query_rules", + querystring: { + from: "0", + size: "3", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/61e38e95191f4dde791070c6fce8a092.asciidoc b/docs/doc_examples/61e38e95191f4dde791070c6fce8a092.asciidoc new file mode 100644 index 000000000..f29cf90ad --- /dev/null +++ b/docs/doc_examples/61e38e95191f4dde791070c6fce8a092.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_date_histo: { + date_histogram: { + field: "date", + calendar_interval: "1M", + }, + aggs: { + the_sum: { + sum: { + field: "price", + }, + }, + the_movavg: { + moving_fn: { + buckets_path: "the_sum", + window: 10, + script: "MovingFunctions.holt(values, 0.3, 0.1)", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/621665fdbd7fc103c09bfeed28b67b1a.asciidoc b/docs/doc_examples/621665fdbd7fc103c09bfeed28b67b1a.asciidoc new file mode 100644 index 000000000..3dfc8f729 --- /dev/null +++ b/docs/doc_examples/621665fdbd7fc103c09bfeed28b67b1a.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.count({ + filter_path: "-_shards", +}); +console.log(response); +---- diff --git a/docs/doc_examples/621f4553e24592d40c8cdbbdfaeb027e.asciidoc b/docs/doc_examples/621f4553e24592d40c8cdbbdfaeb027e.asciidoc new file mode 100644 index 000000000..164decad8 --- /dev/null +++ b/docs/doc_examples/621f4553e24592d40c8cdbbdfaeb027e.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "image-index", + knn: { + field: "image-vector", + query_vector: [54, 10, -2], + k: 5, + num_candidates: 50, + filter: { + term: { + "file-type": "png", + }, + }, + }, + fields: ["title"], + _source: false, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6220087321e6d288024a70c6b09bd720.asciidoc b/docs/doc_examples/6220087321e6d288024a70c6b09bd720.asciidoc new file mode 100644 index 000000000..b2213798e --- /dev/null +++ b/docs/doc_examples/6220087321e6d288024a70c6b09bd720.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 4, + refresh: "true", + document: { + query: { + match: { + message: "lazy dog", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6244204213f60edf2f23295f9059f2c9.asciidoc b/docs/doc_examples/6244204213f60edf2f23295f9059f2c9.asciidoc new file mode 100644 index 000000000..8c011d9f8 --- /dev/null +++ b/docs/doc_examples/6244204213f60edf2f23295f9059f2c9.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.stats({ + metric: "queued_watches", +}); +console.log(response); +---- diff --git a/docs/doc_examples/624e69dedf42c4877234b87ec1d00068.asciidoc b/docs/doc_examples/624e69dedf42c4877234b87ec1d00068.asciidoc new file mode 100644 index 000000000..80c5388ca --- /dev/null +++ b/docs/doc_examples/624e69dedf42c4877234b87ec1d00068.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.slm.getLifecycle({ + policy_id: "", +}); +console.log(response); +---- diff --git a/docs/doc_examples/625dc94df1f9affb49a082fd99d41620.asciidoc b/docs/doc_examples/625dc94df1f9affb49a082fd99d41620.asciidoc deleted file mode 100644 index 1e5878a4f..000000000 --- a/docs/doc_examples/625dc94df1f9affb49a082fd99d41620.asciidoc +++ /dev/null @@ -1,17 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.index({ - index: 'twitter', - routing: 'kimchy', - body: { - user: 'kimchy', - post_date: '2009-11-15T14:12:12', - message: 'trying out Elasticsearch' - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/62c311e7ab4de8b79e532929a5069975.asciidoc b/docs/doc_examples/62c311e7ab4de8b79e532929a5069975.asciidoc new file mode 100644 index 000000000..f1fbcf539 --- /dev/null +++ b/docs/doc_examples/62c311e7ab4de8b79e532929a5069975.asciidoc @@ -0,0 +1,83 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + topics: { + type: "rank_features", + }, + negative_reviews: { + type: "rank_features", + positive_score_impact: false, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + topics: { + politics: 20, + economics: 50.8, + }, + negative_reviews: { + "1star": 10, + "2star": 100, + }, + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "my-index-000001", + id: 2, + document: { + topics: { + politics: 5.2, + sports: 80.1, + }, + negative_reviews: { + "1star": 1, + "2star": 10, + }, + }, +}); +console.log(response2); + +const response3 = await client.search({ + index: "my-index-000001", + query: { + rank_feature: { + field: "topics.politics", + }, + }, +}); +console.log(response3); + +const response4 = await client.search({ + index: "my-index-000001", + query: { + rank_feature: { + field: "negative_reviews.1star", + }, + }, +}); +console.log(response4); + +const response5 = await client.search({ + index: "my-index-000001", + query: { + term: { + topics: "economics", + }, + }, +}); +console.log(response5); +---- diff --git a/docs/doc_examples/62ccee6ad356428c2d625742f961ceb7.asciidoc b/docs/doc_examples/62ccee6ad356428c2d625742f961ceb7.asciidoc new file mode 100644 index 000000000..eb4ca18cf --- /dev/null +++ b/docs/doc_examples/62ccee6ad356428c2d625742f961ceb7.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.updateApiKey({ + id: "VuaCfGcBCdbkQm-e5aOx", + role_descriptors: {}, +}); +console.log(response); +---- diff --git a/docs/doc_examples/62d3c8fccb11471bdc12555c1a7777f2.asciidoc b/docs/doc_examples/62d3c8fccb11471bdc12555c1a7777f2.asciidoc new file mode 100644 index 000000000..f26be6d36 --- /dev/null +++ b/docs/doc_examples/62d3c8fccb11471bdc12555c1a7777f2.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "idx", + id: 1, + document: { + foo: [ + { + bar: 1, + }, + { + baz: 2, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/62eafc5b3ab75cc67314d5a8567d6077.asciidoc b/docs/doc_examples/62eafc5b3ab75cc67314d5a8567d6077.asciidoc new file mode 100644 index 000000000..49b4bbe2c --- /dev/null +++ b/docs/doc_examples/62eafc5b3ab75cc67314d5a8567d6077.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getApiKey({ + username: "myuser", +}); +console.log(response); +---- diff --git a/docs/doc_examples/62f1ec1bb5cc5a9c2efd536a7474f549.asciidoc b/docs/doc_examples/62f1ec1bb5cc5a9c2efd536a7474f549.asciidoc new file mode 100644 index 000000000..e03e20e45 --- /dev/null +++ b/docs/doc_examples/62f1ec1bb5cc5a9c2efd536a7474f549.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: [ + { + type: "hunspell", + locale: "en_US", + }, + ], + text: "the foxes jumping quickly", +}); +console.log(response); +---- diff --git a/docs/doc_examples/630d127ccedd25a6cff31ea098ac2847.asciidoc b/docs/doc_examples/630d127ccedd25a6cff31ea098ac2847.asciidoc new file mode 100644 index 000000000..e728af9fe --- /dev/null +++ b/docs/doc_examples/630d127ccedd25a6cff31ea098ac2847.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "node_upgrade", + size: 0, + aggs: { + startup_time_ttest: { + t_test: { + a: { + field: "startup_time_before", + filter: { + term: { + group: "A", + }, + }, + }, + b: { + field: "startup_time_before", + filter: { + term: { + group: "B", + }, + }, + }, + type: "heteroscedastic", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6326f5c6fd2a6e6b1aff9a643b94f455.asciidoc b/docs/doc_examples/6326f5c6fd2a6e6b1aff9a643b94f455.asciidoc new file mode 100644 index 000000000..ce4495d70 --- /dev/null +++ b/docs/doc_examples/6326f5c6fd2a6e6b1aff9a643b94f455.asciidoc @@ -0,0 +1,47 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + refresh: "true", + document: { + text: "quick brown fox", + popularity: 1, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 2, + refresh: "true", + document: { + text: "quick fox", + popularity: 5, + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + query: { + function_score: { + query: { + match: { + text: "quick brown fox", + }, + }, + script_score: { + script: { + lang: "expression", + source: "_score * doc['popularity']", + }, + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/6329fb2840a4373ff6d342f2653247cb.asciidoc b/docs/doc_examples/6329fb2840a4373ff6d342f2653247cb.asciidoc new file mode 100644 index 000000000..cbb113a2e --- /dev/null +++ b/docs/doc_examples/6329fb2840a4373ff6d342f2653247cb.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getMapping({ + index: "books", +}); +console.log(response); +---- diff --git a/docs/doc_examples/634ecacf14b83c5f0bb8b6273cf6418e.asciidoc b/docs/doc_examples/634ecacf14b83c5f0bb8b6273cf6418e.asciidoc new file mode 100644 index 000000000..039b6cb0b --- /dev/null +++ b/docs/doc_examples/634ecacf14b83c5f0bb8b6273cf6418e.asciidoc @@ -0,0 +1,55 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.put({ + name: "website-product-search", + search_application: { + indices: ["website-products"], + template: { + script: { + source: { + query: { + term: { + "{{field_name}}": "{{field_value}}", + }, + }, + aggs: { + color_facet: { + terms: { + field: "color", + size: "{{agg_size}}", + }, + }, + }, + }, + params: { + field_name: "product_name", + field_value: "hello world", + agg_size: 5, + }, + }, + dictionary: { + properties: { + field_name: { + type: "string", + enum: ["name", "color", "description"], + }, + field_value: { + type: "string", + }, + agg_size: { + type: "integer", + minimum: 1, + maximum: 10, + }, + }, + required: ["field_name"], + additionalProperties: false, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/63521e0089c631d6668c44a0a9d7fdcc.asciidoc b/docs/doc_examples/63521e0089c631d6668c44a0a9d7fdcc.asciidoc new file mode 100644 index 000000000..ffee3ec5d --- /dev/null +++ b/docs/doc_examples/63521e0089c631d6668c44a0a9d7fdcc.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "custom_limit_example", + settings: { + analysis: { + analyzer: { + whitespace_five_token_limit: { + tokenizer: "whitespace", + filter: ["five_token_limit"], + }, + }, + filter: { + five_token_limit: { + type: "limit", + max_token_count: 5, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6352e846bb83725ae6d853aa64d8697d.asciidoc b/docs/doc_examples/6352e846bb83725ae6d853aa64d8697d.asciidoc new file mode 100644 index 000000000..d5b4d57bf --- /dev/null +++ b/docs/doc_examples/6352e846bb83725ae6d853aa64d8697d.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_locations", + query: { + bool: { + must: { + match_all: {}, + }, + filter: { + geo_distance: { + distance: "12km", + "pin.location": { + lat: 40, + lon: -70, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6365312d470426cab1b77e9ffde49170.asciidoc b/docs/doc_examples/6365312d470426cab1b77e9ffde49170.asciidoc new file mode 100644 index 000000000..b8033119e --- /dev/null +++ b/docs/doc_examples/6365312d470426cab1b77e9ffde49170.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "click_role", + indices: [ + { + names: ["events-*"], + privileges: ["read"], + query: '{"match": {"category": "click"}}', + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/636ee2066450605247ec1f68d04b8ee4.asciidoc b/docs/doc_examples/636ee2066450605247ec1f68d04b8ee4.asciidoc new file mode 100644 index 000000000..91d90ea7a --- /dev/null +++ b/docs/doc_examples/636ee2066450605247ec1f68d04b8ee4.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + match: { + "http.clientip": "40.135.0.0", + }, + }, + fields: ["*"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/63893e7e9479a9b60db71dcddcc79aaf.asciidoc b/docs/doc_examples/63893e7e9479a9b60db71dcddcc79aaf.asciidoc new file mode 100644 index 000000000..d8ded8387 --- /dev/null +++ b/docs/doc_examples/63893e7e9479a9b60db71dcddcc79aaf.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.deleteCalendar({ + calendar_id: "planned-outages", +}); +console.log(response); +---- diff --git a/docs/doc_examples/63a53fcb0717ae9033a679cbfc932851.asciidoc b/docs/doc_examples/63a53fcb0717ae9033a679cbfc932851.asciidoc new file mode 100644 index 000000000..742fed240 --- /dev/null +++ b/docs/doc_examples/63a53fcb0717ae9033a679cbfc932851.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "completion", + inference_id: "alibabacloud_ai_search_completion", + inference_config: { + service: "alibabacloud-ai-search", + service_settings: { + host: "default-j01.platform-cn-shanghai.opensearch.aliyuncs.com", + api_key: "{{API_KEY}}", + service_id: "ops-qwen-turbo", + workspace: "default", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/63bf3480627a89b4b4ede4150e1d6bc0.asciidoc b/docs/doc_examples/63bf3480627a89b4b4ede4150e1d6bc0.asciidoc new file mode 100644 index 000000000..041c60f0a --- /dev/null +++ b/docs/doc_examples/63bf3480627a89b4b4ede4150e1d6bc0.asciidoc @@ -0,0 +1,59 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.bulkPutRole({ + roles: { + my_admin_role: { + cluster: ["all"], + indices: [ + { + names: ["index1", "index2"], + privileges: ["all"], + field_security: { + grant: ["title", "body"], + }, + query: '{"match": {"title": "foo"}}', + }, + ], + applications: [ + { + application: "myapp", + privileges: ["admin", "read"], + resources: ["*"], + }, + ], + run_as: ["other_user"], + metadata: { + version: 1, + }, + }, + my_user_role: { + cluster: ["all"], + indices: [ + { + names: ["index1"], + privileges: ["read"], + field_security: { + grant: ["title", "body"], + }, + query: '{"match": {"title": "foo"}}', + }, + ], + applications: [ + { + application: "myapp", + privileges: ["admin", "read"], + resources: ["*"], + }, + ], + run_as: ["other_user"], + metadata: { + version: 1, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/63cc960215ae83b359c12df3c0993bfa.asciidoc b/docs/doc_examples/63cc960215ae83b359c12df3c0993bfa.asciidoc new file mode 100644 index 000000000..3ad9daa12 --- /dev/null +++ b/docs/doc_examples/63cc960215ae83b359c12df3c0993bfa.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + index: { + number_of_shards: 3, + number_of_replicas: 2, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/63e20883732ec30b5400046be2efb0f1.asciidoc b/docs/doc_examples/63e20883732ec30b5400046be2efb0f1.asciidoc new file mode 100644 index 000000000..57d1e690f --- /dev/null +++ b/docs/doc_examples/63e20883732ec30b5400046be2efb0f1.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.flush({ + index: "my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/63ecdab34940af053acc409164914c32.asciidoc b/docs/doc_examples/63ecdab34940af053acc409164914c32.asciidoc new file mode 100644 index 000000000..b19b74623 --- /dev/null +++ b/docs/doc_examples/63ecdab34940af053acc409164914c32.asciidoc @@ -0,0 +1,81 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + text: { + type: "text", + analyzer: "standard", + }, + impact: { + type: "sparse_vector", + }, + positive: { + type: "sparse_vector", + }, + negative: { + type: "sparse_vector", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + document: { + text: "I had some terribly delicious carrots.", + impact: [ + { + I: 0.55, + had: 0.4, + some: 0.28, + terribly: 0.01, + delicious: 1.2, + carrots: 0.8, + }, + { + I: 0.54, + had: 0.4, + some: 0.28, + terribly: 2.01, + delicious: 0.02, + carrots: 0.4, + }, + ], + positive: { + I: 0.55, + had: 0.4, + some: 0.28, + terribly: 0.01, + delicious: 1.2, + carrots: 0.8, + }, + negative: { + I: 0.54, + had: 0.4, + some: 0.28, + terribly: 2.01, + delicious: 0.02, + carrots: 0.4, + }, + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + query: { + term: { + impact: { + value: "delicious", + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/640621cea39cdeeb76fbc95bff31a18d.asciidoc b/docs/doc_examples/640621cea39cdeeb76fbc95bff31a18d.asciidoc new file mode 100644 index 000000000..feecc4a39 --- /dev/null +++ b/docs/doc_examples/640621cea39cdeeb76fbc95bff31a18d.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.connector.lastSync({ + connector_id: "my-connector", + last_access_control_sync_error: "Houston, we have a problem!", + last_access_control_sync_scheduled_at: "2023-11-09T15:13:08.231Z", + last_access_control_sync_status: "pending", + last_deleted_document_count: 42, + last_incremental_sync_scheduled_at: "2023-11-09T15:13:08.231Z", + last_indexed_document_count: 42, + last_sync_error: "Houston, we have a problem!", + last_sync_scheduled_at: "2024-11-09T15:13:08.231Z", + last_sync_status: "completed", + last_synced: "2024-11-09T15:13:08.231Z", +}); +console.log(response); +---- diff --git a/docs/doc_examples/640a89d0b39630269433425ff476faf3.asciidoc b/docs/doc_examples/640a89d0b39630269433425ff476faf3.asciidoc new file mode 100644 index 000000000..a64753ac7 --- /dev/null +++ b/docs/doc_examples/640a89d0b39630269433425ff476faf3.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "archived.*": null, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/640da6dd719a34975b5627dfa5fcdd55.asciidoc b/docs/doc_examples/640da6dd719a34975b5627dfa5fcdd55.asciidoc new file mode 100644 index 000000000..a61d31c9e --- /dev/null +++ b/docs/doc_examples/640da6dd719a34975b5627dfa5fcdd55.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "xpack.monitoring.collection.enabled": true, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/640dbeecb736bd25f6f2b392b76a7531.asciidoc b/docs/doc_examples/640dbeecb736bd25f6f2b392b76a7531.asciidoc new file mode 100644 index 000000000..54d88b41e --- /dev/null +++ b/docs/doc_examples/640dbeecb736bd25f6f2b392b76a7531.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.stats({ + include_remotes: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/640e4f2c2d29f9851320a70927bd7a6c.asciidoc b/docs/doc_examples/640e4f2c2d29f9851320a70927bd7a6c.asciidoc new file mode 100644 index 000000000..e3b853969 --- /dev/null +++ b/docs/doc_examples/640e4f2c2d29f9851320a70927bd7a6c.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "indices.lifecycle.poll_interval": null, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/641009f2147e1ca56215c701f45c970b.asciidoc b/docs/doc_examples/641009f2147e1ca56215c701f45c970b.asciidoc new file mode 100644 index 000000000..a07dc7981 --- /dev/null +++ b/docs/doc_examples/641009f2147e1ca56215c701f45c970b.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "museums", + size: 0, + aggregations: { + "tiles-in-bounds": { + geotile_grid: { + field: "location", + precision: 22, + bounds: { + top_left: "POINT (4.9 52.4)", + bottom_right: "POINT (5.0 52.3)", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6414b9276ba1c63898c3ff5cbe03c54e.asciidoc b/docs/doc_examples/6414b9276ba1c63898c3ff5cbe03c54e.asciidoc new file mode 100644 index 000000000..c6274ed1e --- /dev/null +++ b/docs/doc_examples/6414b9276ba1c63898c3ff5cbe03c54e.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.segments(); +console.log(response); +---- diff --git a/docs/doc_examples/641f75862c70e25e79d249d9e0a79f03.asciidoc b/docs/doc_examples/641f75862c70e25e79d249d9e0a79f03.asciidoc new file mode 100644 index 000000000..002fbb288 --- /dev/null +++ b/docs/doc_examples/641f75862c70e25e79d249d9e0a79f03.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + nested: { + path: "obj1", + query: { + bool: { + must: [ + { + match: { + "obj1.name": "blue", + }, + }, + { + range: { + "obj1.count": { + gt: 5, + }, + }, + }, + ], + }, + }, + score_mode: "avg", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/642161d70dacf7d153767d37d3726838.asciidoc b/docs/doc_examples/642161d70dacf7d153767d37d3726838.asciidoc new file mode 100644 index 000000000..d3906eec1 --- /dev/null +++ b/docs/doc_examples/642161d70dacf7d153767d37d3726838.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.rollup.getRollupIndexCaps({ + index: "*_rollup", +}); +console.log(response); +---- diff --git a/docs/doc_examples/642c0c1c76e9bf226cd216ebae9ab958.asciidoc b/docs/doc_examples/642c0c1c76e9bf226cd216ebae9ab958.asciidoc new file mode 100644 index 000000000..832c25a7a --- /dev/null +++ b/docs/doc_examples/642c0c1c76e9bf226cd216ebae9ab958.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "keep_words_example", + settings: { + analysis: { + analyzer: { + standard_keep_word_array: { + tokenizer: "standard", + filter: ["keep_word_array"], + }, + standard_keep_word_file: { + tokenizer: "standard", + filter: ["keep_word_file"], + }, + }, + filter: { + keep_word_array: { + type: "keep", + keep_words: ["one", "two", "three"], + }, + keep_word_file: { + type: "keep", + keep_words_path: "analysis/example_word_list.txt", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/643b9506d1129d5215f9a1bb0b509aba.asciidoc b/docs/doc_examples/643b9506d1129d5215f9a1bb0b509aba.asciidoc new file mode 100644 index 000000000..1d7d9df89 --- /dev/null +++ b/docs/doc_examples/643b9506d1129d5215f9a1bb0b509aba.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + dynamic_templates: [ + { + full_name: { + path_match: "name.*", + path_unmatch: "*.middle", + mapping: { + type: "text", + copy_to: "full_name", + }, + }, + }, + ], + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + name: { + first: "John", + middle: "Winston", + last: "Lennon", + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/643e19c3b6ac1134554dd890e2249c2b.asciidoc b/docs/doc_examples/643e19c3b6ac1134554dd890e2249c2b.asciidoc new file mode 100644 index 000000000..0bc357bb7 --- /dev/null +++ b/docs/doc_examples/643e19c3b6ac1134554dd890e2249c2b.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "my-index-template", + index_patterns: ["logs-*"], + data_stream: {}, + template: { + settings: { + "index.mode": "logsdb", + }, + }, + priority: 101, +}); +console.log(response); +---- diff --git a/docs/doc_examples/645136747d37368a14ab34de8bd046c6.asciidoc b/docs/doc_examples/645136747d37368a14ab34de8bd046c6.asciidoc deleted file mode 100644 index ef507a7b1..000000000 --- a/docs/doc_examples/645136747d37368a14ab34de8bd046c6.asciidoc +++ /dev/null @@ -1,57 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response0 = await client.indices.create({ - index: 'my_index', - body: { - mappings: { - properties: { - date: { - type: 'date' - } - } - } - } -}) -console.log(response0) - -const response1 = await client.index({ - index: 'my_index', - id: '1', - body: { - date: '2015-01-01' - } -}) -console.log(response1) - -const response2 = await client.index({ - index: 'my_index', - id: '2', - body: { - date: '2015-01-01T12:10:30Z' - } -}) -console.log(response2) - -const response3 = await client.index({ - index: 'my_index', - id: '3', - body: { - date: 1420070400001 - } -}) -console.log(response3) - -const response4 = await client.search({ - index: 'my_index', - body: { - sort: { - date: 'asc' - } - } -}) -console.log(response4) ----- - diff --git a/docs/doc_examples/645433e8e479e5d71c100f66dd2de5d0.asciidoc b/docs/doc_examples/645433e8e479e5d71c100f66dd2de5d0.asciidoc new file mode 100644 index 000000000..ed93f04b5 --- /dev/null +++ b/docs/doc_examples/645433e8e479e5d71c100f66dd2de5d0.asciidoc @@ -0,0 +1,534 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "my-data-stream", + refresh: "true", + pipeline: "my-timestamp-pipeline", + operations: [ + { + create: {}, + }, + { + "@timestamp": "2022-06-21T15:49:00Z", + kubernetes: { + host: "gke-apps-0", + node: "gke-apps-0-0", + pod: "gke-apps-0-0-0", + container: { + cpu: { + usage: { + nanocores: 91153, + core: { + ns: 12828317850, + }, + node: { + pct: 0.0000277905, + }, + limit: { + pct: 0.0000277905, + }, + }, + }, + memory: { + available: { + bytes: 463314616, + }, + usage: { + bytes: 307007078, + node: { + pct: 0.01770037710617187, + }, + limit: { + pct: 0.00009923134671484496, + }, + }, + workingset: { + bytes: 585236, + }, + rss: { + bytes: 102728, + }, + pagefaults: 120901, + majorpagefaults: 0, + }, + start_time: "2021-03-30T07:59:06Z", + name: "container-name-44", + }, + namespace: "namespace26", + }, + }, + { + create: {}, + }, + { + "@timestamp": "2022-06-21T15:45:50Z", + kubernetes: { + host: "gke-apps-0", + node: "gke-apps-0-0", + pod: "gke-apps-0-0-0", + container: { + cpu: { + usage: { + nanocores: 124501, + core: { + ns: 12828317850, + }, + node: { + pct: 0.0000277905, + }, + limit: { + pct: 0.0000277905, + }, + }, + }, + memory: { + available: { + bytes: 982546514, + }, + usage: { + bytes: 360035574, + node: { + pct: 0.01770037710617187, + }, + limit: { + pct: 0.00009923134671484496, + }, + }, + workingset: { + bytes: 1339884, + }, + rss: { + bytes: 381174, + }, + pagefaults: 178473, + majorpagefaults: 0, + }, + start_time: "2021-03-30T07:59:06Z", + name: "container-name-44", + }, + namespace: "namespace26", + }, + }, + { + create: {}, + }, + { + "@timestamp": "2022-06-21T15:44:50Z", + kubernetes: { + host: "gke-apps-0", + node: "gke-apps-0-0", + pod: "gke-apps-0-0-0", + container: { + cpu: { + usage: { + nanocores: 38907, + core: { + ns: 12828317850, + }, + node: { + pct: 0.0000277905, + }, + limit: { + pct: 0.0000277905, + }, + }, + }, + memory: { + available: { + bytes: 862723768, + }, + usage: { + bytes: 379572388, + node: { + pct: 0.01770037710617187, + }, + limit: { + pct: 0.00009923134671484496, + }, + }, + workingset: { + bytes: 431227, + }, + rss: { + bytes: 386580, + }, + pagefaults: 233166, + majorpagefaults: 0, + }, + start_time: "2021-03-30T07:59:06Z", + name: "container-name-44", + }, + namespace: "namespace26", + }, + }, + { + create: {}, + }, + { + "@timestamp": "2022-06-21T15:44:40Z", + kubernetes: { + host: "gke-apps-0", + node: "gke-apps-0-0", + pod: "gke-apps-0-0-0", + container: { + cpu: { + usage: { + nanocores: 86706, + core: { + ns: 12828317850, + }, + node: { + pct: 0.0000277905, + }, + limit: { + pct: 0.0000277905, + }, + }, + }, + memory: { + available: { + bytes: 567160996, + }, + usage: { + bytes: 103266017, + node: { + pct: 0.01770037710617187, + }, + limit: { + pct: 0.00009923134671484496, + }, + }, + workingset: { + bytes: 1724908, + }, + rss: { + bytes: 105431, + }, + pagefaults: 233166, + majorpagefaults: 0, + }, + start_time: "2021-03-30T07:59:06Z", + name: "container-name-44", + }, + namespace: "namespace26", + }, + }, + { + create: {}, + }, + { + "@timestamp": "2022-06-21T15:44:00Z", + kubernetes: { + host: "gke-apps-0", + node: "gke-apps-0-0", + pod: "gke-apps-0-0-0", + container: { + cpu: { + usage: { + nanocores: 150069, + core: { + ns: 12828317850, + }, + node: { + pct: 0.0000277905, + }, + limit: { + pct: 0.0000277905, + }, + }, + }, + memory: { + available: { + bytes: 639054643, + }, + usage: { + bytes: 265142477, + node: { + pct: 0.01770037710617187, + }, + limit: { + pct: 0.00009923134671484496, + }, + }, + workingset: { + bytes: 1786511, + }, + rss: { + bytes: 189235, + }, + pagefaults: 138172, + majorpagefaults: 0, + }, + start_time: "2021-03-30T07:59:06Z", + name: "container-name-44", + }, + namespace: "namespace26", + }, + }, + { + create: {}, + }, + { + "@timestamp": "2022-06-21T15:42:40Z", + kubernetes: { + host: "gke-apps-0", + node: "gke-apps-0-0", + pod: "gke-apps-0-0-0", + container: { + cpu: { + usage: { + nanocores: 82260, + core: { + ns: 12828317850, + }, + node: { + pct: 0.0000277905, + }, + limit: { + pct: 0.0000277905, + }, + }, + }, + memory: { + available: { + bytes: 854735585, + }, + usage: { + bytes: 309798052, + node: { + pct: 0.01770037710617187, + }, + limit: { + pct: 0.00009923134671484496, + }, + }, + workingset: { + bytes: 924058, + }, + rss: { + bytes: 110838, + }, + pagefaults: 259073, + majorpagefaults: 0, + }, + start_time: "2021-03-30T07:59:06Z", + name: "container-name-44", + }, + namespace: "namespace26", + }, + }, + { + create: {}, + }, + { + "@timestamp": "2022-06-21T15:42:10Z", + kubernetes: { + host: "gke-apps-0", + node: "gke-apps-0-0", + pod: "gke-apps-0-0-0", + container: { + cpu: { + usage: { + nanocores: 153404, + core: { + ns: 12828317850, + }, + node: { + pct: 0.0000277905, + }, + limit: { + pct: 0.0000277905, + }, + }, + }, + memory: { + available: { + bytes: 279586406, + }, + usage: { + bytes: 214904955, + node: { + pct: 0.01770037710617187, + }, + limit: { + pct: 0.00009923134671484496, + }, + }, + workingset: { + bytes: 1047265, + }, + rss: { + bytes: 91914, + }, + pagefaults: 302252, + majorpagefaults: 0, + }, + start_time: "2021-03-30T07:59:06Z", + name: "container-name-44", + }, + namespace: "namespace26", + }, + }, + { + create: {}, + }, + { + "@timestamp": "2022-06-21T15:40:20Z", + kubernetes: { + host: "gke-apps-0", + node: "gke-apps-0-0", + pod: "gke-apps-0-0-0", + container: { + cpu: { + usage: { + nanocores: 125613, + core: { + ns: 12828317850, + }, + node: { + pct: 0.0000277905, + }, + limit: { + pct: 0.0000277905, + }, + }, + }, + memory: { + available: { + bytes: 822782853, + }, + usage: { + bytes: 100475044, + node: { + pct: 0.01770037710617187, + }, + limit: { + pct: 0.00009923134671484496, + }, + }, + workingset: { + bytes: 2109932, + }, + rss: { + bytes: 278446, + }, + pagefaults: 74843, + majorpagefaults: 0, + }, + start_time: "2021-03-30T07:59:06Z", + name: "container-name-44", + }, + namespace: "namespace26", + }, + }, + { + create: {}, + }, + { + "@timestamp": "2022-06-21T15:40:10Z", + kubernetes: { + host: "gke-apps-0", + node: "gke-apps-0-0", + pod: "gke-apps-0-0-0", + container: { + cpu: { + usage: { + nanocores: 100046, + core: { + ns: 12828317850, + }, + node: { + pct: 0.0000277905, + }, + limit: { + pct: 0.0000277905, + }, + }, + }, + memory: { + available: { + bytes: 567160996, + }, + usage: { + bytes: 362826547, + node: { + pct: 0.01770037710617187, + }, + limit: { + pct: 0.00009923134671484496, + }, + }, + workingset: { + bytes: 1986724, + }, + rss: { + bytes: 402801, + }, + pagefaults: 296495, + majorpagefaults: 0, + }, + start_time: "2021-03-30T07:59:06Z", + name: "container-name-44", + }, + namespace: "namespace26", + }, + }, + { + create: {}, + }, + { + "@timestamp": "2022-06-21T15:38:30Z", + kubernetes: { + host: "gke-apps-0", + node: "gke-apps-0-0", + pod: "gke-apps-0-0-0", + container: { + cpu: { + usage: { + nanocores: 40018, + core: { + ns: 12828317850, + }, + node: { + pct: 0.0000277905, + }, + limit: { + pct: 0.0000277905, + }, + }, + }, + memory: { + available: { + bytes: 1062428344, + }, + usage: { + bytes: 265142477, + node: { + pct: 0.01770037710617187, + }, + limit: { + pct: 0.00009923134671484496, + }, + }, + workingset: { + bytes: 2294743, + }, + rss: { + bytes: 340623, + }, + pagefaults: 224530, + majorpagefaults: 0, + }, + start_time: "2021-03-30T07:59:06Z", + name: "container-name-44", + }, + namespace: "namespace26", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/645796e8047967ca4a7635a22a876f4c.asciidoc b/docs/doc_examples/645796e8047967ca4a7635a22a876f4c.asciidoc deleted file mode 100644 index 76b389764..000000000 --- a/docs/doc_examples/645796e8047967ca4a7635a22a876f4c.asciidoc +++ /dev/null @@ -1,31 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'bank', - body: { - size: 0, - aggs: { - group_by_state: { - terms: { - field: 'state.keyword', - order: { - average_balance: 'desc' - } - }, - aggs: { - average_balance: { - avg: { - field: 'balance' - } - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/645c4c6e209719d3a4d25b1a629cb23b.asciidoc b/docs/doc_examples/645c4c6e209719d3a4d25b1a629cb23b.asciidoc deleted file mode 100644 index 89ed665b3..000000000 --- a/docs/doc_examples/645c4c6e209719d3a4d25b1a629cb23b.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - function_score: { - random_score: { - seed: 10, - field: '_seq_no' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/64622409407316d2d47094e692d9b516.asciidoc b/docs/doc_examples/64622409407316d2d47094e692d9b516.asciidoc new file mode 100644 index 000000000..19d1e10d5 --- /dev/null +++ b/docs/doc_examples/64622409407316d2d47094e692d9b516.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.evaluateDataFrame({ + index: "student_performance_mathematics_reg", + query: { + term: { + "ml.is_training": { + value: false, + }, + }, + }, + evaluation: { + regression: { + actual_field: "G3", + predicted_field: "ml.G3_prediction", + metrics: { + r_squared: {}, + mse: {}, + msle: {}, + huber: {}, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6464124d1677f4552ddddd95a340ca3a.asciidoc b/docs/doc_examples/6464124d1677f4552ddddd95a340ca3a.asciidoc new file mode 100644 index 000000000..81f43eb4f --- /dev/null +++ b/docs/doc_examples/6464124d1677f4552ddddd95a340ca3a.asciidoc @@ -0,0 +1,42 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "library", + refresh: "true", + document: { + title: "Book #1", + rating: 200.1, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "library", + refresh: "true", + document: { + title: "Book #2", + rating: 1.7, + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "library", + refresh: "true", + document: { + title: "Book #3", + rating: 0.1, + }, +}); +console.log(response2); + +const response3 = await client.search({ + filter_path: "hits.hits._source", + _source: "title", + sort: "rating:desc", +}); +console.log(response3); +---- diff --git a/docs/doc_examples/646d71869f1a18c5bede7759559bfc47.asciidoc b/docs/doc_examples/646d71869f1a18c5bede7759559bfc47.asciidoc new file mode 100644 index 000000000..9f9b95638 --- /dev/null +++ b/docs/doc_examples/646d71869f1a18c5bede7759559bfc47.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getFieldMapping({ + index: "_all", + fields: "message", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6490d89a4e43cac5e6b9bc19840d5478.asciidoc b/docs/doc_examples/6490d89a4e43cac5e6b9bc19840d5478.asciidoc new file mode 100644 index 000000000..6406a2c30 --- /dev/null +++ b/docs/doc_examples/6490d89a4e43cac5e6b9bc19840d5478.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + analyzer: "fingerprint", + text: "Yes yes, Gödel said this sentence is consistent and.", +}); +console.log(response); +---- diff --git a/docs/doc_examples/64a6fb4bcb8cfea139a0e5d3765c063a.asciidoc b/docs/doc_examples/64a6fb4bcb8cfea139a0e5d3765c063a.asciidoc new file mode 100644 index 000000000..64d437f6f --- /dev/null +++ b/docs/doc_examples/64a6fb4bcb8cfea139a0e5d3765c063a.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.translate({ + query: "SELECT * FROM library ORDER BY page_count DESC", + fetch_size: 10, +}); +console.log(response); +---- diff --git a/docs/doc_examples/64a79861225553799b26e118d7851dcc.asciidoc b/docs/doc_examples/64a79861225553799b26e118d7851dcc.asciidoc new file mode 100644 index 000000000..b093d850f --- /dev/null +++ b/docs/doc_examples/64a79861225553799b26e118d7851dcc.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.explainLifecycle({ + index: "my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/64aff98cf477555e7411714c17006572.asciidoc b/docs/doc_examples/64aff98cf477555e7411714c17006572.asciidoc new file mode 100644 index 000000000..906360d56 --- /dev/null +++ b/docs/doc_examples/64aff98cf477555e7411714c17006572.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + range: { + timestamp: { + gte: "now-1d/d", + lte: "now/d", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/64b9baa6d7556b960b29698f3383aa31.asciidoc b/docs/doc_examples/64b9baa6d7556b960b29698f3383aa31.asciidoc deleted file mode 100644 index 4c2382017..000000000 --- a/docs/doc_examples/64b9baa6d7556b960b29698f3383aa31.asciidoc +++ /dev/null @@ -1,27 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.reindex({ - body: { - source: { - remote: { - host: 'http://otherhost:9200/' - }, - index: 'source', - size: 10, - query: { - match: { - test: 'data' - } - } - }, - dest: { - index: 'dest' - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/64c572abc23394a77b6cca0b5368ee1d.asciidoc b/docs/doc_examples/64c572abc23394a77b6cca0b5368ee1d.asciidoc new file mode 100644 index 000000000..2f0e298e1 --- /dev/null +++ b/docs/doc_examples/64c572abc23394a77b6cca0b5368ee1d.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.features.getFeatures(); +console.log(response); +---- diff --git a/docs/doc_examples/64c804869ddfbcb9075817d0bbf71b5c.asciidoc b/docs/doc_examples/64c804869ddfbcb9075817d0bbf71b5c.asciidoc new file mode 100644 index 000000000..71c886d2a --- /dev/null +++ b/docs/doc_examples/64c804869ddfbcb9075817d0bbf71b5c.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.search({ + name: "my_search_application", + params: { + elser: true, + query_string: "where is the best mountain climbing?", + elser_fields: [ + { + name: "title", + boost: 1, + }, + { + name: "description", + boost: 1, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/64ca2ccb79a8f4add5b8fe2d3322ae92.asciidoc b/docs/doc_examples/64ca2ccb79a8f4add5b8fe2d3322ae92.asciidoc new file mode 100644 index 000000000..c82166c2e --- /dev/null +++ b/docs/doc_examples/64ca2ccb79a8f4add5b8fe2d3322ae92.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "exams", + size: 0, + aggs: { + avg_grade: { + avg: { + field: "grade", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/64d24f4b2a57dba48092dafe3eb68ad1.asciidoc b/docs/doc_examples/64d24f4b2a57dba48092dafe3eb68ad1.asciidoc new file mode 100644 index 000000000..b5f8b138e --- /dev/null +++ b/docs/doc_examples/64d24f4b2a57dba48092dafe3eb68ad1.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.mget({ + index: "test", + stored_fields: "field1,field2", + docs: [ + { + _id: "1", + }, + { + _id: "2", + stored_fields: ["field3", "field4"], + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/64ffaa6814ec1ec4f59b8f33b47cffb4.asciidoc b/docs/doc_examples/64ffaa6814ec1ec4f59b8f33b47cffb4.asciidoc new file mode 100644 index 000000000..cac6f7e52 --- /dev/null +++ b/docs/doc_examples/64ffaa6814ec1ec4f59b8f33b47cffb4.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "my-index", + settings: { + "archived.*": null, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/650a0fb27c66a790c4687267423af1da.asciidoc b/docs/doc_examples/650a0fb27c66a790c4687267423af1da.asciidoc new file mode 100644 index 000000000..a82a5ec0e --- /dev/null +++ b/docs/doc_examples/650a0fb27c66a790c4687267423af1da.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.updateAliases({ + actions: [ + { + remove: { + index: "logs-nginx.access-prod", + alias: "logs", + }, + }, + { + add: { + index: "logs-my_app-default", + alias: "logs", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/6521c3578dc4ad4a6db697700986e78e.asciidoc b/docs/doc_examples/6521c3578dc4ad4a6db697700986e78e.asciidoc new file mode 100644 index 000000000..f0438bf28 --- /dev/null +++ b/docs/doc_examples/6521c3578dc4ad4a6db697700986e78e.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "place", + pretty: "true", + suggest: { + place_suggestion: { + prefix: "tim", + completion: { + field: "suggest", + size: 10, + contexts: { + location: [ + { + lat: 43.6624803, + lon: -79.3863353, + precision: 2, + }, + { + context: { + lat: 43.6624803, + lon: -79.3863353, + }, + boost: 2, + }, + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/653c0d0ef146c997ef6bc6450d4f5f94.asciidoc b/docs/doc_examples/653c0d0ef146c997ef6bc6450d4f5f94.asciidoc new file mode 100644 index 000000000..40435f67a --- /dev/null +++ b/docs/doc_examples/653c0d0ef146c997ef6bc6450d4f5f94.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + actors: { + terms: { + field: "actors", + size: 10, + }, + aggs: { + costars: { + terms: { + field: "actors", + size: 5, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/654882f545eca8d7047695f867c63072.asciidoc b/docs/doc_examples/654882f545eca8d7047695f867c63072.asciidoc new file mode 100644 index 000000000..678ccc1fe --- /dev/null +++ b/docs/doc_examples/654882f545eca8d7047695f867c63072.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.stopTransform({ + transform_id: "ecommerce_transform", +}); +console.log(response); +---- diff --git a/docs/doc_examples/65578c390837cb4c0fcc77fb17857714.asciidoc b/docs/doc_examples/65578c390837cb4c0fcc77fb17857714.asciidoc new file mode 100644 index 000000000..8030d9270 --- /dev/null +++ b/docs/doc_examples/65578c390837cb4c0fcc77fb17857714.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + sales_per_month: { + date_histogram: { + field: "date", + calendar_interval: "month", + }, + aggs: { + sales: { + sum: { + field: "price", + }, + }, + }, + }, + max_monthly_sales: { + max_bucket: { + buckets_path: "sales_per_month>sales", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/657cf67bbc48f3b8c7fa15e275a5ef72.asciidoc b/docs/doc_examples/657cf67bbc48f3b8c7fa15e275a5ef72.asciidoc new file mode 100644 index 000000000..85ccdf9af --- /dev/null +++ b/docs/doc_examples/657cf67bbc48f3b8c7fa15e275a5ef72.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putComponentTemplate({ + name: "logs-foo_component1", + template: { + mappings: { + properties: { + "host.name": { + type: "keyword", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/658842bf41e0fcb7969937155946a0ff.asciidoc b/docs/doc_examples/658842bf41e0fcb7969937155946a0ff.asciidoc new file mode 100644 index 000000000..46cdd17c1 --- /dev/null +++ b/docs/doc_examples/658842bf41e0fcb7969937155946a0ff.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "slm-read-only", + cluster: ["read_slm"], + indices: [ + { + names: [".slm-history-*"], + privileges: ["read"], + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/65b6185356f16f2f0d84bc5aee2ed0fc.asciidoc b/docs/doc_examples/65b6185356f16f2f0d84bc5aee2ed0fc.asciidoc new file mode 100644 index 000000000..3fe957756 --- /dev/null +++ b/docs/doc_examples/65b6185356f16f2f0d84bc5aee2ed0fc.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + sparse_vector: { + field: "ml.tokens", + inference_id: "the inference ID to produce the token weights", + query: "the query string", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/65c671fbecdb5b0d75c13d63f87e36f0.asciidoc b/docs/doc_examples/65c671fbecdb5b0d75c13d63f87e36f0.asciidoc new file mode 100644 index 000000000..5644ea387 --- /dev/null +++ b/docs/doc_examples/65c671fbecdb5b0d75c13d63f87e36f0.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "museums", + size: 0, + aggs: { + rings_around_amsterdam: { + geo_distance: { + field: "location", + origin: "POINT (4.894 52.3760)", + ranges: [ + { + to: 100000, + }, + { + from: 100000, + to: 300000, + }, + { + from: 300000, + }, + ], + keyed: true, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6606d46685d10377b996b5f20f1229b5.asciidoc b/docs/doc_examples/6606d46685d10377b996b5f20f1229b5.asciidoc new file mode 100644 index 000000000..5cbb57477 --- /dev/null +++ b/docs/doc_examples/6606d46685d10377b996b5f20f1229b5.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.connector.updateIndexName({ + connector_id: "my-connector", + index_name: "data-from-my-google-drive", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6636701d31b0c9eb8316f1f8e99cc918.asciidoc b/docs/doc_examples/6636701d31b0c9eb8316f1f8e99cc918.asciidoc new file mode 100644 index 000000000..9856c1923 --- /dev/null +++ b/docs/doc_examples/6636701d31b0c9eb8316f1f8e99cc918.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "ledger", + size: 0, + query: { + match_all: {}, + }, + aggs: { + profit: { + scripted_metric: { + init_script: "state.transactions = []", + map_script: + "state.transactions.add(doc.type.value == 'sale' ? doc.amount.value : -1 * doc.amount.value)", + combine_script: + "double profit = 0; for (t in state.transactions) { profit += t } return profit", + reduce_script: + "double profit = 0; for (a in states) { profit += a } return profit", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/66539dc6011dd2e0282cf81db1f3df27.asciidoc b/docs/doc_examples/66539dc6011dd2e0282cf81db1f3df27.asciidoc new file mode 100644 index 000000000..a1e318f4e --- /dev/null +++ b/docs/doc_examples/66539dc6011dd2e0282cf81db1f3df27.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.nodes({ + h: "ip,port,heapPercent,name", +}); +console.log(response); +---- diff --git a/docs/doc_examples/666c420fe61fa122386da3c356a64943.asciidoc b/docs/doc_examples/666c420fe61fa122386da3c356a64943.asciidoc new file mode 100644 index 000000000..1da863638 --- /dev/null +++ b/docs/doc_examples/666c420fe61fa122386da3c356a64943.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + term: { + user: "kimchy", + }, + }, + sort: { + _script: { + type: "number", + script: { + lang: "painless", + source: "doc['field_name'].value * params.factor", + params: { + factor: 1.1, + }, + }, + order: "asc", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6689aa213884196b47a6f482d4993749.asciidoc b/docs/doc_examples/6689aa213884196b47a6f482d4993749.asciidoc new file mode 100644 index 000000000..437216410 --- /dev/null +++ b/docs/doc_examples/6689aa213884196b47a6f482d4993749.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "my-pipeline-id", + description: "My optional pipeline description", + processors: [ + { + set: { + description: "My optional processor description", + field: "my-keyword-field", + value: "foo", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/66915e95b723ee2f6e5164a94b8f98c1.asciidoc b/docs/doc_examples/66915e95b723ee2f6e5164a94b8f98c1.asciidoc new file mode 100644 index 000000000..c541fd004 --- /dev/null +++ b/docs/doc_examples/66915e95b723ee2f6e5164a94b8f98c1.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.createFrom({ + source: "my-index", + dest: "my-new-index", + create_from: null, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6693f0ffa0de3229b5dedda197810e70.asciidoc b/docs/doc_examples/6693f0ffa0de3229b5dedda197810e70.asciidoc new file mode 100644 index 000000000..6e92e6268 --- /dev/null +++ b/docs/doc_examples/6693f0ffa0de3229b5dedda197810e70.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.get({ + id: "FmNJRUZ1YWZCU3dHY1BIOUhaenVSRkEaaXFlZ3h4c1RTWFNocDdnY2FSaERnUTozNDE=", + keep_alive: "5d", +}); +console.log(response); +---- diff --git a/docs/doc_examples/669773766b041be768003055ad523038.asciidoc b/docs/doc_examples/669773766b041be768003055ad523038.asciidoc new file mode 100644 index 000000000..0934ce2fc --- /dev/null +++ b/docs/doc_examples/669773766b041be768003055ad523038.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.get({ + index: ".ds-my-data-stream-2099.03.08-000002", + id: 2, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6705eca2095ade294548cfb25bf2dd86.asciidoc b/docs/doc_examples/6705eca2095ade294548cfb25bf2dd86.asciidoc new file mode 100644 index 000000000..7b190a4c5 --- /dev/null +++ b/docs/doc_examples/6705eca2095ade294548cfb25bf2dd86.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.shards({ + v: "true", + h: "index,shard,prirep,state,node,unassigned.reason", + s: "state", +}); +console.log(response); +---- diff --git a/docs/doc_examples/672d30eb3af573140d966e88b14814f8.asciidoc b/docs/doc_examples/672d30eb3af573140d966e88b14814f8.asciidoc new file mode 100644 index 000000000..02dd6b739 --- /dev/null +++ b/docs/doc_examples/672d30eb3af573140d966e88b14814f8.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index", + id: 1, + pipeline: "monthlyindex", + document: { + date1: "2016-04-25T12:02:01.789Z", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6742a8cd0b7b4c1c325ce2f22faf6cb4.asciidoc b/docs/doc_examples/6742a8cd0b7b4c1c325ce2f22faf6cb4.asciidoc new file mode 100644 index 000000000..7d1c236b7 --- /dev/null +++ b/docs/doc_examples/6742a8cd0b7b4c1c325ce2f22faf6cb4.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "log-messages", + filter_path: "aggregations", + aggs: { + categories: { + categorize_text: { + field: "message", + categorization_filters: ["\\w+\\_\\d{3}"], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/674bb755111c6fbaa4c5ac759395c122.asciidoc b/docs/doc_examples/674bb755111c6fbaa4c5ac759395c122.asciidoc new file mode 100644 index 000000000..21120aeb3 --- /dev/null +++ b/docs/doc_examples/674bb755111c6fbaa4c5ac759395c122.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getSettings({ + index: "my-index", + flat_settings: "true", + include_defaults: "true", +}); +console.log(response); + +const response1 = await client.cluster.getSettings({ + flat_settings: "true", + include_defaults: "true", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/67967388db610dcb9d24fb59ede348d8.asciidoc b/docs/doc_examples/67967388db610dcb9d24fb59ede348d8.asciidoc new file mode 100644 index 000000000..577b3ea88 --- /dev/null +++ b/docs/doc_examples/67967388db610dcb9d24fb59ede348d8.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + min_price: { + min: { + field: "price", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6799d132c1c7ca3970763acde2337ef9.asciidoc b/docs/doc_examples/6799d132c1c7ca3970763acde2337ef9.asciidoc deleted file mode 100644 index e22d0dadc..000000000 --- a/docs/doc_examples/6799d132c1c7ca3970763acde2337ef9.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.updateAliases({ - body: { - actions: [ - { - add: { - index: 'test*', - alias: 'all_test_indices' - } - } - ] - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/67a1f31cf60773a2378c2c30723c4b96.asciidoc b/docs/doc_examples/67a1f31cf60773a2378c2c30723c4b96.asciidoc new file mode 100644 index 000000000..a64330970 --- /dev/null +++ b/docs/doc_examples/67a1f31cf60773a2378c2c30723c4b96.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "latency", + size: 0, + aggs: { + load_time_ranks: { + percentile_ranks: { + field: "load_time", + values: [500, 600], + missing: 10, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/67a490d749a0c3bb16a266663423893d.asciidoc b/docs/doc_examples/67a490d749a0c3bb16a266663423893d.asciidoc new file mode 100644 index 000000000..f650cc07a --- /dev/null +++ b/docs/doc_examples/67a490d749a0c3bb16a266663423893d.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.deleteWatch({ + id: "log_error_watch", +}); +console.log(response); +---- diff --git a/docs/doc_examples/67a55ac3aaee09f4aeeb7d2763da3335.asciidoc b/docs/doc_examples/67a55ac3aaee09f4aeeb7d2763da3335.asciidoc new file mode 100644 index 000000000..13c543473 --- /dev/null +++ b/docs/doc_examples/67a55ac3aaee09f4aeeb7d2763da3335.asciidoc @@ -0,0 +1,67 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "places", + mappings: { + properties: { + geometry: { + type: "geo_shape", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "places", + refresh: "true", + operations: [ + { + index: { + _id: 1, + }, + }, + { + name: "NEMO Science Museum", + geometry: "POINT(4.912350 52.374081)", + }, + { + index: { + _id: 2, + }, + }, + { + name: "Sportpark De Weeren", + geometry: { + type: "Polygon", + coordinates: [ + [ + [4.965305328369141, 52.39347642069457], + [4.966979026794433, 52.391721758934835], + [4.969425201416015, 52.39238958618537], + [4.967944622039794, 52.39420969150824], + [4.965305328369141, 52.39347642069457], + ], + ], + }, + }, + ], +}); +console.log(response1); + +const response2 = await client.search({ + index: "places", + size: 0, + aggs: { + viewport: { + geo_bounds: { + field: "geometry", + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/67aac8882fa476db8a5878b67ea08eb3.asciidoc b/docs/doc_examples/67aac8882fa476db8a5878b67ea08eb3.asciidoc new file mode 100644 index 000000000..3dc53206e --- /dev/null +++ b/docs/doc_examples/67aac8882fa476db8a5878b67ea08eb3.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "POST", + path: "/_snapshot/my_repository/_analyze", + querystring: { + blob_count: "10", + max_blob_size: "1mb", + timeout: "120s", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/67b71a95b6fe6c83faae51ea038a1bf1.asciidoc b/docs/doc_examples/67b71a95b6fe6c83faae51ea038a1bf1.asciidoc new file mode 100644 index 000000000..09b647d22 --- /dev/null +++ b/docs/doc_examples/67b71a95b6fe6c83faae51ea038a1bf1.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.esql.asyncQueryDelete({ + id: "FmdMX2pIang3UWhLRU5QS0lqdlppYncaMUpYQ05oSkpTc3kwZ21EdC1tbFJXQToxOTI=", +}); +console.log(response); +---- diff --git a/docs/doc_examples/67bab07fda27ef77e3bc948211051a33.asciidoc b/docs/doc_examples/67bab07fda27ef77e3bc948211051a33.asciidoc new file mode 100644 index 000000000..5bc95aa00 --- /dev/null +++ b/docs/doc_examples/67bab07fda27ef77e3bc948211051a33.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.threadPool({ + thread_pool_patterns: "write,search", + v: "true", + s: "n,nn", + h: "n,nn,q,a,r,c", +}); +console.log(response); +---- diff --git a/docs/doc_examples/67bba546d835bca8f31df13e3587c348.asciidoc b/docs/doc_examples/67bba546d835bca8f31df13e3587c348.asciidoc deleted file mode 100644 index 9e6ddc036..000000000 --- a/docs/doc_examples/67bba546d835bca8f31df13e3587c348.asciidoc +++ /dev/null @@ -1,12 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.get({ - index: 'test', - id: '1' -}) -console.log(response) ----- - diff --git a/docs/doc_examples/67c3808751223eef69a57e6fd02ddf4f.asciidoc b/docs/doc_examples/67c3808751223eef69a57e6fd02ddf4f.asciidoc new file mode 100644 index 000000000..37ef597f7 --- /dev/null +++ b/docs/doc_examples/67c3808751223eef69a57e6fd02ddf4f.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + more_like_this: { + fields: ["title", "description"], + like: [ + { + _index: "imdb", + _id: "1", + }, + { + _index: "imdb", + _id: "2", + }, + "and potentially some more text here as well", + ], + min_term_freq: 1, + max_query_terms: 12, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/67ceac4bf2d9ac7cc500390544cdcb41.asciidoc b/docs/doc_examples/67ceac4bf2d9ac7cc500390544cdcb41.asciidoc deleted file mode 100644 index b847c0c06..000000000 --- a/docs/doc_examples/67ceac4bf2d9ac7cc500390544cdcb41.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - range: { - timestamp: { - gte: 'now-1d/d', - lt: 'now/d' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/67ffa135c50c43d6788636c88078c7d1.asciidoc b/docs/doc_examples/67ffa135c50c43d6788636c88078c7d1.asciidoc new file mode 100644 index 000000000..16ae9d9b2 --- /dev/null +++ b/docs/doc_examples/67ffa135c50c43d6788636c88078c7d1.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.simulate({ + id: "my-pipeline-id", + docs: [ + { + _index: "index", + _id: "id", + _source: { + foo: "bar", + }, + }, + { + _index: "index", + _id: "id", + _source: { + foo: "rab", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/682336e5232c9ad3d866cb203d1c58c1.asciidoc b/docs/doc_examples/682336e5232c9ad3d866cb203d1c58c1.asciidoc new file mode 100644 index 000000000..6da7fb4fa --- /dev/null +++ b/docs/doc_examples/682336e5232c9ad3d866cb203d1c58c1.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "azure-openai-embeddings", + mappings: { + properties: { + content_embedding: { + type: "dense_vector", + dims: 1536, + element_type: "float", + similarity: "dot_product", + }, + content: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6843d859e2965d17cad4f033c81db83f.asciidoc b/docs/doc_examples/6843d859e2965d17cad4f033c81db83f.asciidoc new file mode 100644 index 000000000..440cd6741 --- /dev/null +++ b/docs/doc_examples/6843d859e2965d17cad4f033c81db83f.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "my-data-stream-template", + index_patterns: ["my-data-stream*"], + data_stream: {}, + priority: 500, + template: { + settings: { + "sort.field": ["@timestamp"], + "sort.order": ["desc"], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6856f7c6a732ab55ca71c1ee2ec2bbad.asciidoc b/docs/doc_examples/6856f7c6a732ab55ca71c1ee2ec2bbad.asciidoc new file mode 100644 index 000000000..5184e4c0a --- /dev/null +++ b/docs/doc_examples/6856f7c6a732ab55ca71c1ee2ec2bbad.asciidoc @@ -0,0 +1,59 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "metrics_index", + mappings: { + properties: { + latency_histo: { + type: "histogram", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "metrics_index", + id: 1, + refresh: "true", + document: { + "network.name": "net-1", + latency_histo: { + values: [0.1, 0.2, 0.3, 0.4, 0.5], + counts: [3, 7, 23, 12, 6], + }, + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "metrics_index", + id: 2, + refresh: "true", + document: { + "network.name": "net-2", + latency_histo: { + values: [0.1, 0.2, 0.3, 0.4, 0.5], + counts: [8, 17, 8, 7, 6], + }, + }, +}); +console.log(response2); + +const response3 = await client.search({ + index: "metrics_index", + size: 0, + filter_path: "aggregations", + aggs: { + max_latency: { + max: { + field: "latency_histo", + }, + }, + }, +}); +console.log(response3); +---- diff --git a/docs/doc_examples/6859530dd9d85e59bd33a53ec96a3836.asciidoc b/docs/doc_examples/6859530dd9d85e59bd33a53ec96a3836.asciidoc new file mode 100644 index 000000000..0558f50d4 --- /dev/null +++ b/docs/doc_examples/6859530dd9d85e59bd33a53ec96a3836.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "users", + id: 1, + refresh: "wait_for", + document: { + email: "mardy.brown@asciidocsmith.com", + first_name: "Mardy", + last_name: "Brown", + city: "New Orleans", + county: "Orleans", + state: "LA", + zip: 70116, + web: "mardy.asciidocsmith.com", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/686bc640b877de845c46bef372a9866c.asciidoc b/docs/doc_examples/686bc640b877de845c46bef372a9866c.asciidoc new file mode 100644 index 000000000..e081be4f1 --- /dev/null +++ b/docs/doc_examples/686bc640b877de845c46bef372a9866c.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "parent_example", + size: 0, + aggs: { + "top-names": { + terms: { + field: "owner.display_name.keyword", + size: 10, + }, + aggs: { + "to-questions": { + parent: { + type: "answer", + }, + aggs: { + "top-tags": { + terms: { + field: "tags.keyword", + size: 10, + }, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/68721288dc9ad8aa1b55099b4d303051.asciidoc b/docs/doc_examples/68721288dc9ad8aa1b55099b4d303051.asciidoc index 7dc6e69b6..6b3caa8d2 100644 --- a/docs/doc_examples/68721288dc9ad8aa1b55099b4d303051.asciidoc +++ b/docs/doc_examples/68721288dc9ad8aa1b55099b4d303051.asciidoc @@ -4,19 +4,13 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - multi_match: { - query: 'quick brown f', - type: 'bool_prefix', - fields: [ - 'subject', - 'message' - ] - } - } - } -}) -console.log(response) + query: { + multi_match: { + query: "quick brown f", + type: "bool_prefix", + fields: ["subject", "message"], + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/68738b4fd0dda177022be45be95b4c84.asciidoc b/docs/doc_examples/68738b4fd0dda177022be45be95b4c84.asciidoc index f98801129..950fc0229 100644 --- a/docs/doc_examples/68738b4fd0dda177022be45be95b4c84.asciidoc +++ b/docs/doc_examples/68738b4fd0dda177022be45be95b4c84.asciidoc @@ -4,9 +4,8 @@ [source, js] ---- const response = await client.reindexRethrottle({ - task_id: 'r1A2WoRbTwKZ516z6NEs5A:36619', - requests_per_second: '-1' -}) -console.log(response) + task_id: "r1A2WoRbTwKZ516z6NEs5A:36619", + requests_per_second: "-1", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/6884454f57c3a41059037ea762f48d77.asciidoc b/docs/doc_examples/6884454f57c3a41059037ea762f48d77.asciidoc new file mode 100644 index 000000000..c870eedf0 --- /dev/null +++ b/docs/doc_examples/6884454f57c3a41059037ea762f48d77.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + analyzer: "standard", + text: "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone.", +}); +console.log(response); +---- diff --git a/docs/doc_examples/68a891f609ca3a379d2d64e4914f3067.asciidoc b/docs/doc_examples/68a891f609ca3a379d2d64e4914f3067.asciidoc new file mode 100644 index 000000000..324247ffd --- /dev/null +++ b/docs/doc_examples/68a891f609ca3a379d2d64e4914f3067.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: ["kstem"], + text: "the foxes jumping quickly", +}); +console.log(response); +---- diff --git a/docs/doc_examples/68b64313bf89ec3f2c645da61999dbb4.asciidoc b/docs/doc_examples/68b64313bf89ec3f2c645da61999dbb4.asciidoc new file mode 100644 index 000000000..ed5756abf --- /dev/null +++ b/docs/doc_examples/68b64313bf89ec3f2c645da61999dbb4.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.info({ + node_id: "plugins", +}); +console.log(response); +---- diff --git a/docs/doc_examples/68cb8a452e780ca78b0cb761be3629af.asciidoc b/docs/doc_examples/68cb8a452e780ca78b0cb761be3629af.asciidoc new file mode 100644 index 000000000..df5f75e1f --- /dev/null +++ b/docs/doc_examples/68cb8a452e780ca78b0cb761be3629af.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + stored_fields: "_none_", + query: { + term: { + user: "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/68d7f7d4d268ee98caead5aef19933d6.asciidoc b/docs/doc_examples/68d7f7d4d268ee98caead5aef19933d6.asciidoc new file mode 100644 index 000000000..021aa7e19 --- /dev/null +++ b/docs/doc_examples/68d7f7d4d268ee98caead5aef19933d6.asciidoc @@ -0,0 +1,53 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putComponentTemplate({ + name: "destination_template", + template: { + settings: { + index: { + number_of_replicas: 0, + number_of_shards: 4, + mode: "time_series", + routing_path: ["metricset"], + time_series: { + end_time: "2023-09-01T14:00:00.000Z", + start_time: "2023-09-01T06:00:00.000Z", + }, + }, + }, + mappings: { + properties: { + "@timestamp": { + type: "date", + }, + metricset: { + type: "keyword", + time_series_dimension: true, + }, + k8s: { + properties: { + tx: { + type: "long", + }, + rx: { + type: "long", + }, + }, + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.putIndexTemplate({ + name: 2, + index_patterns: ["k9s*"], + composed_of: ["destination_template"], + data_stream: {}, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/691fe20d467324ed43a36fd15852c492.asciidoc b/docs/doc_examples/691fe20d467324ed43a36fd15852c492.asciidoc new file mode 100644 index 000000000..e97c3e203 --- /dev/null +++ b/docs/doc_examples/691fe20d467324ed43a36fd15852c492.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.follow({ + index: "kibana_sample_data_ecommerce", + wait_for_active_shards: 1, + remote_cluster: "clusterB", + leader_index: "kibana_sample_data_ecommerce2", +}); +console.log(response); +---- diff --git a/docs/doc_examples/692606cc6d6462becc321d92961a3bac.asciidoc b/docs/doc_examples/692606cc6d6462becc321d92961a3bac.asciidoc new file mode 100644 index 000000000..368ac4df2 --- /dev/null +++ b/docs/doc_examples/692606cc6d6462becc321d92961a3bac.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.textStructure.testGrokPattern({ + grok_pattern: "Hello %{WORD:first_name} %{WORD:last_name}", + text: ["Hello John Doe", "this does not match"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/69541f0bb81ab3797926bb2a00607cda.asciidoc b/docs/doc_examples/69541f0bb81ab3797926bb2a00607cda.asciidoc new file mode 100644 index 000000000..bf62637fc --- /dev/null +++ b/docs/doc_examples/69541f0bb81ab3797926bb2a00607cda.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "rerank", + inference_id: "my-msmarco-minilm-model", + inference_config: { + service: "elasticsearch", + service_settings: { + num_allocations: 1, + num_threads: 1, + model_id: "cross-encoder__ms-marco-minilm-l-6-v2", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/69582847099ee62ed34feddfaba83ef6.asciidoc b/docs/doc_examples/69582847099ee62ed34feddfaba83ef6.asciidoc new file mode 100644 index 000000000..836a958ed --- /dev/null +++ b/docs/doc_examples/69582847099ee62ed34feddfaba83ef6.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + quantity: { + histogram: { + field: "quantity", + interval: 10, + missing: 0, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/698e0a2b67ba7842caa801d9ef46ebe3.asciidoc b/docs/doc_examples/698e0a2b67ba7842caa801d9ef46ebe3.asciidoc new file mode 100644 index 000000000..55aba4454 --- /dev/null +++ b/docs/doc_examples/698e0a2b67ba7842caa801d9ef46ebe3.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match: { + "user.id": "kimchy", + }, + }, + highlight: { + require_field_match: false, + fields: { + body: { + pre_tags: [""], + post_tags: [""], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/69a08e7bdcc616f3bdcb8ae842d9e30e.asciidoc b/docs/doc_examples/69a08e7bdcc616f3bdcb8ae842d9e30e.asciidoc new file mode 100644 index 000000000..07b79978d --- /dev/null +++ b/docs/doc_examples/69a08e7bdcc616f3bdcb8ae842d9e30e.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.get({ + index: "my-index-000001", + id: 1, + stored_fields: "tags,counter", +}); +console.log(response); +---- diff --git a/docs/doc_examples/69a7be47f85138b10437113ab2f0d72d.asciidoc b/docs/doc_examples/69a7be47f85138b10437113ab2f0d72d.asciidoc deleted file mode 100644 index c5f90d612..000000000 --- a/docs/doc_examples/69a7be47f85138b10437113ab2f0d72d.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.get({ - index: 'twitter', - id: '2', - routing: 'user1', - stored_fields: 'tags,counter' -}) -console.log(response) ----- - diff --git a/docs/doc_examples/69ab708fe65a75f870223d2289c3d171.asciidoc b/docs/doc_examples/69ab708fe65a75f870223d2289c3d171.asciidoc new file mode 100644 index 000000000..58f053630 --- /dev/null +++ b/docs/doc_examples/69ab708fe65a75f870223d2289c3d171.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.simulate({ + pipeline: { + description: "Hide my IP", + processors: [ + { + redact: { + field: "message", + patterns: ["%{IP:REDACTED}", "%{EMAILADDRESS:REDACTED}"], + prefix: "*", + suffix: "*", + }, + }, + ], + }, + docs: [ + { + _source: { + message: "55.3.244.1 GET /index.html 15824 0.043 test@elastic.co", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/69c07cfdf8054c301cd6186c5d71aa02.asciidoc b/docs/doc_examples/69c07cfdf8054c301cd6186c5d71aa02.asciidoc new file mode 100644 index 000000000..a367d97aa --- /dev/null +++ b/docs/doc_examples/69c07cfdf8054c301cd6186c5d71aa02.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.updateByQuery({ + index: "my-index-000001,my-index-000002", +}); +console.log(response); +---- diff --git a/docs/doc_examples/69d5710bdec73041c66f21d5f96637e8.asciidoc b/docs/doc_examples/69d5710bdec73041c66f21d5f96637e8.asciidoc new file mode 100644 index 000000000..a4e5f6f09 --- /dev/null +++ b/docs/doc_examples/69d5710bdec73041c66f21d5f96637e8.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "index_long", + mappings: { + properties: { + field: { + type: "date_nanos", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/69d9b8fd364596aa37eae6864d8a6d89.asciidoc b/docs/doc_examples/69d9b8fd364596aa37eae6864d8a6d89.asciidoc new file mode 100644 index 000000000..04aafb486 --- /dev/null +++ b/docs/doc_examples/69d9b8fd364596aa37eae6864d8a6d89.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: ".watcher-history*", + pretty: "true", + sort: [ + { + "result.execution_time": "desc", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/69daf5ec2a9bc07096e1833286c36076.asciidoc b/docs/doc_examples/69daf5ec2a9bc07096e1833286c36076.asciidoc new file mode 100644 index 000000000..426c6447d --- /dev/null +++ b/docs/doc_examples/69daf5ec2a9bc07096e1833286c36076.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "timeseries_template", + index_patterns: ["timeseries-*"], + template: { + settings: { + number_of_shards: 1, + number_of_replicas: 1, + "index.lifecycle.name": "timeseries_policy", + "index.lifecycle.rollover_alias": "timeseries", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/69f8b0f2a9ba47e11f363d788cee9d6d.asciidoc b/docs/doc_examples/69f8b0f2a9ba47e11f363d788cee9d6d.asciidoc new file mode 100644 index 000000000..a17ce6569 --- /dev/null +++ b/docs/doc_examples/69f8b0f2a9ba47e11f363d788cee9d6d.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.migration.deprecations({ + index: "logstash-*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6a1702dd50690cae833572e48a0ddf25.asciidoc b/docs/doc_examples/6a1702dd50690cae833572e48a0ddf25.asciidoc index f4959e703..16ed57458 100644 --- a/docs/doc_examples/6a1702dd50690cae833572e48a0ddf25.asciidoc +++ b/docs/doc_examples/6a1702dd50690cae833572e48a0ddf25.asciidoc @@ -4,18 +4,12 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - multi_match: { - query: 'Will Smith', - fields: [ - 'title', - '*_name' - ] - } - } - } -}) -console.log(response) + query: { + multi_match: { + query: "Will Smith", + fields: ["title", "*_name"], + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/6a350a17701e8c8158407191f2718b66.asciidoc b/docs/doc_examples/6a350a17701e8c8158407191f2718b66.asciidoc new file mode 100644 index 000000000..e9a2ea7cb --- /dev/null +++ b/docs/doc_examples/6a350a17701e8c8158407191f2718b66.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.unfollow({ + index: "follower_index", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6a3a578ce37fb2c63ccfab7f75db9bae.asciidoc b/docs/doc_examples/6a3a578ce37fb2c63ccfab7f75db9bae.asciidoc new file mode 100644 index 000000000..da66cc8b8 --- /dev/null +++ b/docs/doc_examples/6a3a578ce37fb2c63ccfab7f75db9bae.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "ingest.geoip.downloader.enabled": false, + "indices.lifecycle.history_index_enabled": false, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6a3a86ff58e5f20950d429cf2832c229.asciidoc b/docs/doc_examples/6a3a86ff58e5f20950d429cf2832c229.asciidoc new file mode 100644 index 000000000..34eea3284 --- /dev/null +++ b/docs/doc_examples/6a3a86ff58e5f20950d429cf2832c229.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.getPipeline({ + id: "my-pipeline-id", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6a3f06962cceb3dfd3cd4fb5c679fa75.asciidoc b/docs/doc_examples/6a3f06962cceb3dfd3cd4fb5c679fa75.asciidoc new file mode 100644 index 000000000..174de7224 --- /dev/null +++ b/docs/doc_examples/6a3f06962cceb3dfd3cd4fb5c679fa75.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + index: "my-index-000001", + tokenizer: "keyword", + char_filter: ["my_mappings_char_filter"], + text: "I'm delighted about it :(", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6a4679531e64c492fce16dc12de6dcb0.asciidoc b/docs/doc_examples/6a4679531e64c492fce16dc12de6dcb0.asciidoc deleted file mode 100644 index 344a431a2..000000000 --- a/docs/doc_examples/6a4679531e64c492fce16dc12de6dcb0.asciidoc +++ /dev/null @@ -1,22 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - aggs: { - genres: { - terms: { - field: 'genre', - order: { - _count: 'asc' - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/6a50c1c53673fe9cc3cbda38a2853cdd.asciidoc b/docs/doc_examples/6a50c1c53673fe9cc3cbda38a2853cdd.asciidoc new file mode 100644 index 000000000..fe9c85ee0 --- /dev/null +++ b/docs/doc_examples/6a50c1c53673fe9cc3cbda38a2853cdd.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.deleteAsync({ + id: "FmdMX2pIang3UWhLRU5QS0lqdlppYncaMUpYQ05oSkpTc3kwZ21EdC1tbFJXQToxOTI=", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6a55dbba114c6c1408474f7e9cfdbb94.asciidoc b/docs/doc_examples/6a55dbba114c6c1408474f7e9cfdbb94.asciidoc new file mode 100644 index 000000000..70459caa9 --- /dev/null +++ b/docs/doc_examples/6a55dbba114c6c1408474f7e9cfdbb94.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.createRepository({ + name: "my_unverified_backup", + verify: "false", + repository: { + type: "fs", + settings: { + location: "my_unverified_backup_location", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6a81d00f0d73bc5985e76b3cadab645e.asciidoc b/docs/doc_examples/6a81d00f0d73bc5985e76b3cadab645e.asciidoc deleted file mode 100644 index 9a500641a..000000000 --- a/docs/doc_examples/6a81d00f0d73bc5985e76b3cadab645e.asciidoc +++ /dev/null @@ -1,26 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'my_index', - body: { - mappings: { - properties: { - tag: { - type: 'text', - fielddata: true, - fielddata_frequency_filter: { - min: 0.001, - max: 0.1, - min_segment_size: 500 - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/6a9655fe22fa5db7a540c145bcf1fb31.asciidoc b/docs/doc_examples/6a9655fe22fa5db7a540c145bcf1fb31.asciidoc new file mode 100644 index 000000000..f5f3bbd42 --- /dev/null +++ b/docs/doc_examples/6a9655fe22fa5db7a540c145bcf1fb31.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "stats-index", + id: 1, + document: { + agg_metric: { + min: -302.5, + max: 702.3, + sum: 200, + value_count: 25, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "stats-index", + id: 2, + document: { + agg_metric: { + min: -93, + max: 1702.3, + sum: 300, + value_count: 25, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/6a969ebe7490d93d35be895b14e5a42a.asciidoc b/docs/doc_examples/6a969ebe7490d93d35be895b14e5a42a.asciidoc new file mode 100644 index 000000000..fed00c64b --- /dev/null +++ b/docs/doc_examples/6a969ebe7490d93d35be895b14e5a42a.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.get({ + index: "logs-my_app-default", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6aa2941855d13f365f70aa8767ecb137.asciidoc b/docs/doc_examples/6aa2941855d13f365f70aa8767ecb137.asciidoc new file mode 100644 index 000000000..ff7934808 --- /dev/null +++ b/docs/doc_examples/6aa2941855d13f365f70aa8767ecb137.asciidoc @@ -0,0 +1,60 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + city: { + type: "text", + fields: { + raw: { + type: "keyword", + }, + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + city: "New York", + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "my-index-000001", + id: 2, + document: { + city: "York", + }, +}); +console.log(response2); + +const response3 = await client.search({ + index: "my-index-000001", + query: { + match: { + city: "york", + }, + }, + sort: { + "city.raw": "asc", + }, + aggs: { + Cities: { + terms: { + field: "city.raw", + }, + }, + }, +}); +console.log(response3); +---- diff --git a/docs/doc_examples/6aca241c0361d26f134712821e2d09a9.asciidoc b/docs/doc_examples/6aca241c0361d26f134712821e2d09a9.asciidoc new file mode 100644 index 000000000..63445d9ea --- /dev/null +++ b/docs/doc_examples/6aca241c0361d26f134712821e2d09a9.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.cleanupRepository({ + name: "my_repository", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6af9dc1c3240aa8e623ff3622bcb1b48.asciidoc b/docs/doc_examples/6af9dc1c3240aa8e623ff3622bcb1b48.asciidoc new file mode 100644 index 000000000..41332985c --- /dev/null +++ b/docs/doc_examples/6af9dc1c3240aa8e623ff3622bcb1b48.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "cluster.routing.allocation.exclude._ip": "192.168.2.*", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6b0288acb739c4667d41339e5100c327.asciidoc b/docs/doc_examples/6b0288acb739c4667d41339e5100c327.asciidoc new file mode 100644 index 000000000..bc220ac1f --- /dev/null +++ b/docs/doc_examples/6b0288acb739c4667d41339e5100c327.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match: { + message: { + query: "this is a testt", + fuzziness: "AUTO", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6b0d492c0f50103fefeab385a7bebd01.asciidoc b/docs/doc_examples/6b0d492c0f50103fefeab385a7bebd01.asciidoc new file mode 100644 index 000000000..494d76385 --- /dev/null +++ b/docs/doc_examples/6b0d492c0f50103fefeab385a7bebd01.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "logs-debug", + mappings: { + properties: { + "@timestamp": { + type: "date", + }, + message: { + type: "text", + }, + level: { + type: "constant_keyword", + value: "debug", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6b104a66ab47fc1e1f24a5738f82feb4.asciidoc b/docs/doc_examples/6b104a66ab47fc1e1f24a5738f82feb4.asciidoc new file mode 100644 index 000000000..6565c34d0 --- /dev/null +++ b/docs/doc_examples/6b104a66ab47fc1e1f24a5738f82feb4.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.putAutoFollowPattern({ + name: "beats", + remote_cluster: "leader", + leader_index_patterns: ["metricbeat-*", "packetbeat-*"], + follow_index_pattern: "{{leader_index}}-copy", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6b1336ff477f91d4a0db0b06db546ff0.asciidoc b/docs/doc_examples/6b1336ff477f91d4a0db0b06db546ff0.asciidoc new file mode 100644 index 000000000..dc348caaf --- /dev/null +++ b/docs/doc_examples/6b1336ff477f91d4a0db0b06db546ff0.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.stop(); +console.log(response); +---- diff --git a/docs/doc_examples/6b1e837a8469eca2d03d5c36f5910f34.asciidoc b/docs/doc_examples/6b1e837a8469eca2d03d5c36f5910f34.asciidoc new file mode 100644 index 000000000..496a684a3 --- /dev/null +++ b/docs/doc_examples/6b1e837a8469eca2d03d5c36f5910f34.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + filter_path: "aggregations", + aggs: { + avg_price: { + avg: { + field: "price", + }, + }, + t_shirts: { + filter: { + term: { + type: "t-shirt", + }, + }, + aggs: { + avg_price: { + avg: { + field: "price", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6b3dcde0656d3a96dbcfed1ec814e10a.asciidoc b/docs/doc_examples/6b3dcde0656d3a96dbcfed1ec814e10a.asciidoc new file mode 100644 index 000000000..a892432a6 --- /dev/null +++ b/docs/doc_examples/6b3dcde0656d3a96dbcfed1ec814e10a.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.shutdown.deleteNode({ + node_id: "USpTGYaBSIKbgSUJR2Z9lg", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6b67c6121efb86ee100d40c2646f77b5.asciidoc b/docs/doc_examples/6b67c6121efb86ee100d40c2646f77b5.asciidoc new file mode 100644 index 000000000..3226a57c7 --- /dev/null +++ b/docs/doc_examples/6b67c6121efb86ee100d40c2646f77b5.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "*", + settings: { + "index.search.slowlog.include.user": true, + "index.search.slowlog.threshold.fetch.warn": "30s", + "index.search.slowlog.threshold.query.warn": "30s", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6b6e275efe3d2aafe0fc3443f2c96868.asciidoc b/docs/doc_examples/6b6e275efe3d2aafe0fc3443f2c96868.asciidoc new file mode 100644 index 000000000..ddb9c53f2 --- /dev/null +++ b/docs/doc_examples/6b6e275efe3d2aafe0fc3443f2c96868.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + wait_for_completion: "false", + source: { + index: "test-data", + size: 50, + }, + dest: { + index: "google-vertex-ai-embeddings", + pipeline: "google_vertex_ai_embeddings_pipeline", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6b6f5e0ab4ef523fc9a3a4a655848f64.asciidoc b/docs/doc_examples/6b6f5e0ab4ef523fc9a3a4a655848f64.asciidoc new file mode 100644 index 000000000..b7f2cb320 --- /dev/null +++ b/docs/doc_examples/6b6f5e0ab4ef523fc9a3a4a655848f64.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + sparse_vector: { + field: "ml.tokens", + query_vector: { + token1: 0.5, + token2: 0.3, + token3: 0.2, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6b6fd0a5942dfb9762ad2790cf421a80.asciidoc b/docs/doc_examples/6b6fd0a5942dfb9762ad2790cf421a80.asciidoc new file mode 100644 index 000000000..30adbe22e --- /dev/null +++ b/docs/doc_examples/6b6fd0a5942dfb9762ad2790cf421a80.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.put({ + name: "my-example-app", + search_application: { + indices: ["example-index"], + template: { + script: { + lang: "mustache", + source: + '\n {\n "query": {\n "bool": {\n "must": [\n {{#query}}\n {{/query}}\n ],\n "filter": {{#toJson}}_es_filters{{/toJson}}\n }\n },\n "_source": {\n "includes": ["title", "plot"]\n },\n "aggs": {{#toJson}}_es_aggs{{/toJson}},\n "from": {{from}},\n "size": {{size}},\n "sort": {{#toJson}}_es_sort_fields{{/toJson}}\n }\n ', + params: { + query: "", + _es_filters: {}, + _es_aggs: {}, + _es_sort_fields: {}, + size: 10, + from: 0, + }, + dictionary: {}, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6b77795e9249c8d9865f7a49fd86a863.asciidoc b/docs/doc_examples/6b77795e9249c8d9865f7a49fd86a863.asciidoc new file mode 100644 index 000000000..fa4b12cc5 --- /dev/null +++ b/docs/doc_examples/6b77795e9249c8d9865f7a49fd86a863.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + range: { + age: { + gte: 10, + lte: 20, + boost: 2, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6b8c5c8145c287c4fc535fa57ccf95a7.asciidoc b/docs/doc_examples/6b8c5c8145c287c4fc535fa57ccf95a7.asciidoc new file mode 100644 index 000000000..ae22a8b4d --- /dev/null +++ b/docs/doc_examples/6b8c5c8145c287c4fc535fa57ccf95a7.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "GET", + path: "/_connector/_sync_job", + querystring: { + status: "pending", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6ba332596f5eb29660c90ab2d480e7dc.asciidoc b/docs/doc_examples/6ba332596f5eb29660c90ab2d480e7dc.asciidoc new file mode 100644 index 000000000..9250d0925 --- /dev/null +++ b/docs/doc_examples/6ba332596f5eb29660c90ab2d480e7dc.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putTemplate({ + name: "template_1", + index_patterns: ["te*"], + order: 0, + settings: { + number_of_shards: 1, + }, + mappings: { + _source: { + enabled: false, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.putTemplate({ + name: "template_2", + index_patterns: ["tes*"], + order: 1, + settings: { + number_of_shards: 1, + }, + mappings: { + _source: { + enabled: true, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/6baf72c04d48cb04c2f8be609ff3b3b5.asciidoc b/docs/doc_examples/6baf72c04d48cb04c2f8be609ff3b3b5.asciidoc new file mode 100644 index 000000000..41c42d206 --- /dev/null +++ b/docs/doc_examples/6baf72c04d48cb04c2f8be609ff3b3b5.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "test-index", + query: { + match: { + my_semantic_field: "Which country is Paris in?", + }, + }, + highlight: { + fields: { + my_semantic_field: { + number_of_fragments: 2, + order: "score", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6bbc613bd4f9aec1bbdbabf5db021d28.asciidoc b/docs/doc_examples/6bbc613bd4f9aec1bbdbabf5db021d28.asciidoc index 9d44450c3..51a32d243 100644 --- a/docs/doc_examples/6bbc613bd4f9aec1bbdbabf5db021d28.asciidoc +++ b/docs/doc_examples/6bbc613bd4f9aec1bbdbabf5db021d28.asciidoc @@ -4,30 +4,27 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - bool: { - should: [ - { - match: { - title: 'quick brown fox' - } + query: { + bool: { + should: [ + { + match: { + title: "quick brown fox", }, - { - match: { - 'title.original': 'quick brown fox' - } + }, + { + match: { + "title.original": "quick brown fox", }, - { - match: { - 'title.shingles': 'quick brown fox' - } - } - ] - } - } - } -}) -console.log(response) + }, + { + match: { + "title.shingles": "quick brown fox", + }, + }, + ], + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/6be70810d6ebd6f09d8a49f9df847765.asciidoc b/docs/doc_examples/6be70810d6ebd6f09d8a49f9df847765.asciidoc deleted file mode 100644 index 66307ee3c..000000000 --- a/docs/doc_examples/6be70810d6ebd6f09d8a49f9df847765.asciidoc +++ /dev/null @@ -1,37 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'my_index', - body: { - query: { - nested: { - path: 'obj1', - query: { - bool: { - must: [ - { - match: { - 'obj1.name': 'blue' - } - }, - { - range: { - 'obj1.count': { - gt: 5 - } - } - } - ] - } - }, - score_mode: 'avg' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/6bf63f2ec6ba55fcaf1092f48212bf25.asciidoc b/docs/doc_examples/6bf63f2ec6ba55fcaf1092f48212bf25.asciidoc deleted file mode 100644 index a252f320f..000000000 --- a/docs/doc_examples/6bf63f2ec6ba55fcaf1092f48212bf25.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'my_index', - body: { - mappings: { - properties: { - user_identifier: { - type: 'keyword' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/6bfa0a9a50c4e94276c7d63af1c31d9e.asciidoc b/docs/doc_examples/6bfa0a9a50c4e94276c7d63af1c31d9e.asciidoc new file mode 100644 index 000000000..0c0ea6b3e --- /dev/null +++ b/docs/doc_examples/6bfa0a9a50c4e94276c7d63af1c31d9e.asciidoc @@ -0,0 +1,56 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "place", + mappings: { + properties: { + suggest: { + type: "completion", + contexts: [ + { + name: "place_type", + type: "category", + }, + { + name: "location", + type: "geo", + precision: 4, + }, + ], + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.create({ + index: "place_path_category", + mappings: { + properties: { + suggest: { + type: "completion", + contexts: [ + { + name: "place_type", + type: "category", + path: "cat", + }, + { + name: "location", + type: "geo", + precision: 4, + path: "loc", + }, + ], + }, + loc: { + type: "geo_point", + }, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/6c00dae1a456ae5e854e98e895dca2ab.asciidoc b/docs/doc_examples/6c00dae1a456ae5e854e98e895dca2ab.asciidoc new file mode 100644 index 000000000..e80bc72a3 --- /dev/null +++ b/docs/doc_examples/6c00dae1a456ae5e854e98e895dca2ab.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + function_score: { + query: { + match: { + message: "elasticsearch", + }, + }, + script_score: { + script: { + source: "Math.log(2 + doc['my-int'].value)", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6c0acbff2df9003ccaf4350c9e2e186e.asciidoc b/docs/doc_examples/6c0acbff2df9003ccaf4350c9e2e186e.asciidoc new file mode 100644 index 000000000..73adb18c2 --- /dev/null +++ b/docs/doc_examples/6c0acbff2df9003ccaf4350c9e2e186e.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + bool: { + must: { + match_all: {}, + }, + filter: { + geo_polygon: { + "person.location": { + points: [ + [-70, 40], + [-80, 30], + [-90, 20], + ], + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6c3f7c8601e8cc13d36eef98a5e2cb34.asciidoc b/docs/doc_examples/6c3f7c8601e8cc13d36eef98a5e2cb34.asciidoc new file mode 100644 index 000000000..38e73da9a --- /dev/null +++ b/docs/doc_examples/6c3f7c8601e8cc13d36eef98a5e2cb34.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "drivers", + mappings: { + properties: { + driver: { + type: "nested", + properties: { + last_name: { + type: "text", + }, + vehicle: { + type: "nested", + properties: { + make: { + type: "text", + }, + model: { + type: "text", + }, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6c70b022a8a74b887fe46e514feb38c0.asciidoc b/docs/doc_examples/6c70b022a8a74b887fe46e514feb38c0.asciidoc new file mode 100644 index 000000000..f9d6b2ee4 --- /dev/null +++ b/docs/doc_examples/6c70b022a8a74b887fe46e514feb38c0.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.recovery({ + index: "my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6c72460570307f23478100db04a84c8e.asciidoc b/docs/doc_examples/6c72460570307f23478100db04a84c8e.asciidoc new file mode 100644 index 000000000..b7c25f62e --- /dev/null +++ b/docs/doc_examples/6c72460570307f23478100db04a84c8e.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.getComponentTemplate({ + name: "temp*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6c72f6791ba9223943f7556c5bfaa728.asciidoc b/docs/doc_examples/6c72f6791ba9223943f7556c5bfaa728.asciidoc new file mode 100644 index 000000000..93e7af9bf --- /dev/null +++ b/docs/doc_examples/6c72f6791ba9223943f7556c5bfaa728.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + match: { + "user.id": "kimchy", + }, + }, + fields: [ + "user.id", + "http.response.*", + { + field: "@timestamp", + format: "epoch_millis", + }, + ], + _source: false, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6c8bf6d4d68b7756f953be4c07655337.asciidoc b/docs/doc_examples/6c8bf6d4d68b7756f953be4c07655337.asciidoc new file mode 100644 index 000000000..624074238 --- /dev/null +++ b/docs/doc_examples/6c8bf6d4d68b7756f953be4c07655337.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.reloadSecureSettings({ + secure_settings_password: "keystore-password", +}); +console.log(response); + +const response1 = await client.nodes.reloadSecureSettings({ + node_id: "nodeId1,nodeId2", + secure_settings_password: "keystore-password", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/6c927313867647e0ef3cd3a37cb410cc.asciidoc b/docs/doc_examples/6c927313867647e0ef3cd3a37cb410cc.asciidoc new file mode 100644 index 000000000..49ffbd241 --- /dev/null +++ b/docs/doc_examples/6c927313867647e0ef3cd3a37cb410cc.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.invalidateApiKey({ + username: "myuser", + realm_name: "native1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6cb1dae368c945ecf7c9ec332a5743a2.asciidoc b/docs/doc_examples/6cb1dae368c945ecf7c9ec332a5743a2.asciidoc new file mode 100644 index 000000000..fd3ab96e4 --- /dev/null +++ b/docs/doc_examples/6cb1dae368c945ecf7c9ec332a5743a2.asciidoc @@ -0,0 +1,44 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, + }, + }, + mappings: { + properties: { + text: { + type: "text", + fields: { + raw: { + type: "keyword", + }, + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "idx", + id: 1, + document: { + text: [ + "the quick brown fox", + "the quick brown fox", + "jumped over the lazy dog", + ], + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/6cd083045bf06e80b83889a939a18451.asciidoc b/docs/doc_examples/6cd083045bf06e80b83889a939a18451.asciidoc new file mode 100644 index 000000000..9fb10298a --- /dev/null +++ b/docs/doc_examples/6cd083045bf06e80b83889a939a18451.asciidoc @@ -0,0 +1,95 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + user: { + type: "nested", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + group: "fans", + user: [ + { + first: "John", + last: "Smith", + }, + { + first: "Alice", + last: "White", + }, + ], + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + query: { + nested: { + path: "user", + query: { + bool: { + must: [ + { + match: { + "user.first": "Alice", + }, + }, + { + match: { + "user.last": "Smith", + }, + }, + ], + }, + }, + }, + }, +}); +console.log(response2); + +const response3 = await client.search({ + index: "my-index-000001", + query: { + nested: { + path: "user", + query: { + bool: { + must: [ + { + match: { + "user.first": "Alice", + }, + }, + { + match: { + "user.last": "White", + }, + }, + ], + }, + }, + inner_hits: { + highlight: { + fields: { + "user.first": {}, + }, + }, + }, + }, + }, +}); +console.log(response3); +---- diff --git a/docs/doc_examples/6ce6cac9df216c52371c2e77e6e07ba1.asciidoc b/docs/doc_examples/6ce6cac9df216c52371c2e77e6e07ba1.asciidoc new file mode 100644 index 000000000..396d11fe4 --- /dev/null +++ b/docs/doc_examples/6ce6cac9df216c52371c2e77e6e07ba1.asciidoc @@ -0,0 +1,57 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_query_rules/my-ruleset", + body: { + rules: [ + { + rule_id: "my-rule1", + type: "pinned", + criteria: [ + { + type: "contains", + metadata: "user_query", + values: ["pugs", "puggles"], + }, + { + type: "exact", + metadata: "user_country", + values: ["us"], + }, + ], + actions: { + ids: ["id1", "id2"], + }, + }, + { + rule_id: "my-rule2", + type: "exclude", + criteria: [ + { + type: "fuzzy", + metadata: "user_query", + values: ["rescue dogs"], + }, + ], + actions: { + docs: [ + { + _index: "index1", + _id: "id3", + }, + { + _index: "index2", + _id: "id4", + }, + ], + }, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6ce8334def48552ba7d44025580d9105.asciidoc b/docs/doc_examples/6ce8334def48552ba7d44025580d9105.asciidoc new file mode 100644 index 000000000..2333655e3 --- /dev/null +++ b/docs/doc_examples/6ce8334def48552ba7d44025580d9105.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "", + aliases: { + "my-alias": {}, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6cf3307c00f464c46475e352e067d714.asciidoc b/docs/doc_examples/6cf3307c00f464c46475e352e067d714.asciidoc new file mode 100644 index 000000000..489e7448a --- /dev/null +++ b/docs/doc_examples/6cf3307c00f464c46475e352e067d714.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_geoshapes", + query: { + bool: { + must: { + match_all: {}, + }, + filter: { + geo_bounding_box: { + "pin.location": { + top_left: { + lat: 40.73, + lon: -74.1, + }, + bottom_right: { + lat: 40.01, + lon: -71.12, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6d48f83c4a36d0544d876d3eff48dcef.asciidoc b/docs/doc_examples/6d48f83c4a36d0544d876d3eff48dcef.asciidoc new file mode 100644 index 000000000..3dd4b3ef8 --- /dev/null +++ b/docs/doc_examples/6d48f83c4a36d0544d876d3eff48dcef.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.slm.executeRetention(); +console.log(response); +---- diff --git a/docs/doc_examples/6d81c749ff9554044ee5f3ad92dcb89a.asciidoc b/docs/doc_examples/6d81c749ff9554044ee5f3ad92dcb89a.asciidoc new file mode 100644 index 000000000..2b2033138 --- /dev/null +++ b/docs/doc_examples/6d81c749ff9554044ee5f3ad92dcb89a.asciidoc @@ -0,0 +1,55 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my-weather-sensor-lifecycle-policy", + policy: { + phases: { + hot: { + actions: { + rollover: { + max_age: "1d", + max_primary_shard_size: "50gb", + }, + }, + }, + warm: { + min_age: "30d", + actions: { + shrink: { + number_of_shards: 1, + }, + forcemerge: { + max_num_segments: 1, + }, + }, + }, + cold: { + min_age: "60d", + actions: { + searchable_snapshot: { + snapshot_repository: "found-snapshots", + }, + }, + }, + frozen: { + min_age: "90d", + actions: { + searchable_snapshot: { + snapshot_repository: "found-snapshots", + }, + }, + }, + delete: { + min_age: "735d", + actions: { + delete: {}, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6db118771354792646229e7a3c30c7e9.asciidoc b/docs/doc_examples/6db118771354792646229e7a3c30c7e9.asciidoc new file mode 100644 index 000000000..9623daabc --- /dev/null +++ b/docs/doc_examples/6db118771354792646229e7a3c30c7e9.asciidoc @@ -0,0 +1,67 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "my-index-000001", + refresh: "true", + operations: [ + { + index: {}, + }, + { + timestamp: 1516729294000, + temperature: 200, + voltage: 5.2, + node: "a", + }, + { + index: {}, + }, + { + timestamp: 1516642894000, + temperature: 201, + voltage: 5.8, + node: "b", + }, + { + index: {}, + }, + { + timestamp: 1516556494000, + temperature: 202, + voltage: 5.1, + node: "a", + }, + { + index: {}, + }, + { + timestamp: 1516470094000, + temperature: 198, + voltage: 5.6, + node: "b", + }, + { + index: {}, + }, + { + timestamp: 1516383694000, + temperature: 200, + voltage: 4.2, + node: "c", + }, + { + index: {}, + }, + { + timestamp: 1516297294000, + temperature: 202, + voltage: 4, + node: "c", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/6dbfe5565a95508e65d304131847f9fc.asciidoc b/docs/doc_examples/6dbfe5565a95508e65d304131847f9fc.asciidoc new file mode 100644 index 000000000..f308c6785 --- /dev/null +++ b/docs/doc_examples/6dbfe5565a95508e65d304131847f9fc.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: [ + { + type: "edge_ngram", + min_gram: 1, + max_gram: 2, + }, + ], + text: "the quick brown fox jumps", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6dcd3916679f6aa64f79524c75991ebd.asciidoc b/docs/doc_examples/6dcd3916679f6aa64f79524c75991ebd.asciidoc new file mode 100644 index 000000000..fdbb82230 --- /dev/null +++ b/docs/doc_examples/6dcd3916679f6aa64f79524c75991ebd.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.esql.query({ + query: + '\n FROM library\n | EVAL year = DATE_EXTRACT("year", release_date)\n | WHERE page_count > 300 AND author == "Frank Herbert"\n | STATS count = COUNT(*) by year\n | WHERE count > 0\n | LIMIT 5\n ', +}); +console.log(response); +---- diff --git a/docs/doc_examples/6dd2a107bc64fd6f058fb17c21640649.asciidoc b/docs/doc_examples/6dd2a107bc64fd6f058fb17c21640649.asciidoc new file mode 100644 index 000000000..76f1aa692 --- /dev/null +++ b/docs/doc_examples/6dd2a107bc64fd6f058fb17c21640649.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.invalidateToken({ + username: "myuser", + realm_name: "saml1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6dd4c02fe3d6b800648a04d3e2d29fc1.asciidoc b/docs/doc_examples/6dd4c02fe3d6b800648a04d3e2d29fc1.asciidoc new file mode 100644 index 000000000..018089d85 --- /dev/null +++ b/docs/doc_examples/6dd4c02fe3d6b800648a04d3e2d29fc1.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.delete({ + repository: "my_repository", + snapshot: "snapshot_2,snapshot_3", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6ddd4e657efbf45def430a6419825796.asciidoc b/docs/doc_examples/6ddd4e657efbf45def430a6419825796.asciidoc new file mode 100644 index 000000000..5bd1b226c --- /dev/null +++ b/docs/doc_examples/6ddd4e657efbf45def430a6419825796.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "completion", + inference_id: "azure_ai_studio_completion", + inference_config: { + service: "azureaistudio", + service_settings: { + api_key: "", + target: "", + provider: "", + endpoint_type: "", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6e000496a1fa8b57148518eaad692f35.asciidoc b/docs/doc_examples/6e000496a1fa8b57148518eaad692f35.asciidoc new file mode 100644 index 000000000..c909cdf1a --- /dev/null +++ b/docs/doc_examples/6e000496a1fa8b57148518eaad692f35.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match_none: {}, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6e0b675eff7ed73c09a76a415930a486.asciidoc b/docs/doc_examples/6e0b675eff7ed73c09a76a415930a486.asciidoc new file mode 100644 index 000000000..9ddf0e410 --- /dev/null +++ b/docs/doc_examples/6e0b675eff7ed73c09a76a415930a486.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + "my-join-field": { + type: "join", + relations: { + "my-parent": "my-child", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6e1157f3184fa192d47a3d0e3ea17a6c.asciidoc b/docs/doc_examples/6e1157f3184fa192d47a3d0e3ea17a6c.asciidoc new file mode 100644 index 000000000..97695fc48 --- /dev/null +++ b/docs/doc_examples/6e1157f3184fa192d47a3d0e3ea17a6c.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "letter_unique_pos_example", + settings: { + analysis: { + analyzer: { + letter_unique_pos: { + tokenizer: "letter", + filter: ["unique_pos"], + }, + }, + filter: { + unique_pos: { + type: "unique", + only_on_same_position: true, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6e1ae8d6103e0b77f14fb0ea1bfb7ffa.asciidoc b/docs/doc_examples/6e1ae8d6103e0b77f14fb0ea1bfb7ffa.asciidoc new file mode 100644 index 000000000..2b2e6635e --- /dev/null +++ b/docs/doc_examples/6e1ae8d6103e0b77f14fb0ea1bfb7ffa.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: + "GEOMETRYCOLLECTION (POINT (1000.0 100.0), LINESTRING (1001.0 100.0, 1002.0 100.0))", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6e498b9dc753b94abf2618c407fa5cd8.asciidoc b/docs/doc_examples/6e498b9dc753b94abf2618c407fa5cd8.asciidoc new file mode 100644 index 000000000..fdd6ab8f3 --- /dev/null +++ b/docs/doc_examples/6e498b9dc753b94abf2618c407fa5cd8.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + wait_for_completion: "false", + source: { + index: ".ml-anomalies-custom-example", + }, + dest: { + index: ".reindexed-v9-ml-anomalies-custom-example", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6e6b78e6b689a5d6aa637271b6d084e2.asciidoc b/docs/doc_examples/6e6b78e6b689a5d6aa637271b6d084e2.asciidoc new file mode 100644 index 000000000..a541500cb --- /dev/null +++ b/docs/doc_examples/6e6b78e6b689a5d6aa637271b6d084e2.asciidoc @@ -0,0 +1,45 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "movies", + retriever: { + rrf: { + retrievers: [ + { + standard: { + query: { + sparse_vector: { + field: "plot_embedding", + inference_id: "my-elser-model", + query: "films that explore psychological depths", + }, + }, + }, + }, + { + standard: { + query: { + multi_match: { + query: "crime", + fields: ["plot", "title"], + }, + }, + }, + }, + { + knn: { + field: "vector", + query_vector: [10, 22, 77], + k: 10, + num_candidates: 10, + }, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6e86225ed4a6e3be8078b83ef301f731.asciidoc b/docs/doc_examples/6e86225ed4a6e3be8078b83ef301f731.asciidoc new file mode 100644 index 000000000..13215e509 --- /dev/null +++ b/docs/doc_examples/6e86225ed4a6e3be8078b83ef301f731.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + percolate: { + field: "query", + document: { + message: "A new bonsai tree in the office", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6ea062455229151e311869a81ee40252.asciidoc b/docs/doc_examples/6ea062455229151e311869a81ee40252.asciidoc new file mode 100644 index 000000000..b8f766fc4 --- /dev/null +++ b/docs/doc_examples/6ea062455229151e311869a81ee40252.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match: { + "user.id": "kimchy", + }, + }, +}); +console.log(response); + +const response1 = await client.search({ + index: "_all", + query: { + match: { + "user.id": "kimchy", + }, + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "*", + query: { + match: { + "user.id": "kimchy", + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/6edfc35a66afd9b884431fccf48fdbf5.asciidoc b/docs/doc_examples/6edfc35a66afd9b884431fccf48fdbf5.asciidoc new file mode 100644 index 000000000..ffbcb2402 --- /dev/null +++ b/docs/doc_examples/6edfc35a66afd9b884431fccf48fdbf5.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: [ + "lowercase", + { + type: "synonym_graph", + synonyms: ["pc => personal computer", "computer, pc, laptop"], + }, + ], + text: "Check how PC synonyms work", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6eead05dd3b04722ef0ea5644c2e047d.asciidoc b/docs/doc_examples/6eead05dd3b04722ef0ea5644c2e047d.asciidoc new file mode 100644 index 000000000..8c778d24d --- /dev/null +++ b/docs/doc_examples/6eead05dd3b04722ef0ea5644c2e047d.asciidoc @@ -0,0 +1,49 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sales_per_month: { + date_histogram: { + field: "date", + calendar_interval: "month", + }, + aggs: { + total_sales: { + sum: { + field: "price", + }, + }, + "t-shirts": { + filter: { + term: { + type: "t-shirt", + }, + }, + aggs: { + sales: { + sum: { + field: "price", + }, + }, + }, + }, + "t-shirt-percentage": { + bucket_script: { + buckets_path: { + tShirtSales: "t-shirts>sales", + totalSales: "total_sales", + }, + script: "params.tShirtSales / params.totalSales * 100", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6f0389ac52808df23bb6081a1acd4eed.asciidoc b/docs/doc_examples/6f0389ac52808df23bb6081a1acd4eed.asciidoc new file mode 100644 index 000000000..7ca6b0211 --- /dev/null +++ b/docs/doc_examples/6f0389ac52808df23bb6081a1acd4eed.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.enableUser({ + username: "logstash_system", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6f07152055e99416deb10e95b428b847.asciidoc b/docs/doc_examples/6f07152055e99416deb10e95b428b847.asciidoc new file mode 100644 index 000000000..5257c944a --- /dev/null +++ b/docs/doc_examples/6f07152055e99416deb10e95b428b847.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "edge_ngram_custom_example", + settings: { + analysis: { + analyzer: { + default: { + tokenizer: "whitespace", + filter: ["3_5_edgegrams"], + }, + }, + filter: { + "3_5_edgegrams": { + type: "edge_ngram", + min_gram: 3, + max_gram: 5, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6f097c298a7abf4c032c4314920c49c8.asciidoc b/docs/doc_examples/6f097c298a7abf4c032c4314920c49c8.asciidoc deleted file mode 100644 index 670e86ca3..000000000 --- a/docs/doc_examples/6f097c298a7abf4c032c4314920c49c8.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.reindex({ - body: { - source: { - index: [ - 'twitter', - 'blog' - ] - }, - dest: { - index: 'all_together' - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/6f21a878fee3b43c5332b81aaddbeac7.asciidoc b/docs/doc_examples/6f21a878fee3b43c5332b81aaddbeac7.asciidoc deleted file mode 100644 index 08d4db379..000000000 --- a/docs/doc_examples/6f21a878fee3b43c5332b81aaddbeac7.asciidoc +++ /dev/null @@ -1,23 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - query_string: { - fields: [ - 'title', - 'content' - ], - query: 'this OR that OR thus', - type: 'cross_fields', - minimum_should_match: 2 - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/6f34e27481460a95e59ffbacb76bd637.asciidoc b/docs/doc_examples/6f34e27481460a95e59ffbacb76bd637.asciidoc new file mode 100644 index 000000000..719ffc1b4 --- /dev/null +++ b/docs/doc_examples/6f34e27481460a95e59ffbacb76bd637.asciidoc @@ -0,0 +1,46 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_custom_analyzer: { + char_filter: ["emoticons"], + tokenizer: "punctuation", + filter: ["lowercase", "english_stop"], + }, + }, + tokenizer: { + punctuation: { + type: "pattern", + pattern: "[ .,!?]", + }, + }, + char_filter: { + emoticons: { + type: "mapping", + mappings: [":) => _happy_", ":( => _sad_"], + }, + }, + filter: { + english_stop: { + type: "stop", + stopwords: "_english_", + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.analyze({ + index: "my-index-000001", + analyzer: "my_custom_analyzer", + text: "I'm a :) person, and you?", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/6f3b723bf6179b96c3413597ed7f49e1.asciidoc b/docs/doc_examples/6f3b723bf6179b96c3413597ed7f49e1.asciidoc new file mode 100644 index 000000000..47b3cfd86 --- /dev/null +++ b/docs/doc_examples/6f3b723bf6179b96c3413597ed7f49e1.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.bulkUpdateApiKeys({ + ids: ["VuaCfGcBCdbkQm-e5aOx", "H3_AhoIBA9hmeQJdg7ij"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/6f48ab7cbb8a4a46d0e9272c07166eaf.asciidoc b/docs/doc_examples/6f48ab7cbb8a4a46d0e9272c07166eaf.asciidoc new file mode 100644 index 000000000..64d437f6f --- /dev/null +++ b/docs/doc_examples/6f48ab7cbb8a4a46d0e9272c07166eaf.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.translate({ + query: "SELECT * FROM library ORDER BY page_count DESC", + fetch_size: 10, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6f4cbebfd6d2cee54aa3e7a86a755ef8.asciidoc b/docs/doc_examples/6f4cbebfd6d2cee54aa3e7a86a755ef8.asciidoc new file mode 100644 index 000000000..f105ce8e3 --- /dev/null +++ b/docs/doc_examples/6f4cbebfd6d2cee54aa3e7a86a755ef8.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-image-index", + size: 3, + query: { + bool: { + should: [ + { + match: { + title: { + query: "mountain lake", + boost: 1, + }, + }, + }, + { + knn: { + field: "image-vector", + query_vector: [-5, 9, -12], + k: 10, + boost: 2, + }, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6f5adbd55a3a2760e7fe9d32df18b1a1.asciidoc b/docs/doc_examples/6f5adbd55a3a2760e7fe9d32df18b1a1.asciidoc new file mode 100644 index 000000000..3287480a9 --- /dev/null +++ b/docs/doc_examples/6f5adbd55a3a2760e7fe9d32df18b1a1.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "logs", + document: { + timestamp: "2015-05-17T18:12:07.613Z", + request: "GET index.html", + status_code: 404, + message: "Error: File not found", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6f6d5a4a90e1265822628d4ced963639.asciidoc b/docs/doc_examples/6f6d5a4a90e1265822628d4ced963639.asciidoc new file mode 100644 index 000000000..674318811 --- /dev/null +++ b/docs/doc_examples/6f6d5a4a90e1265822628d4ced963639.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + document: { + create_date: "2015/09/02", + }, +}); +console.log(response); + +const response1 = await client.indices.getMapping({ + index: "my-index-000001", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/6f842819c50e8490080dd085e0c6aca3.asciidoc b/docs/doc_examples/6f842819c50e8490080dd085e0c6aca3.asciidoc new file mode 100644 index 000000000..c9537fe0f --- /dev/null +++ b/docs/doc_examples/6f842819c50e8490080dd085e0c6aca3.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "index", + size: 0, + aggs: { + foo_terms: { + terms: { + field: "foo", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6f855bc92b4cc6e6a63f95bce1cb4441.asciidoc b/docs/doc_examples/6f855bc92b4cc6e6a63f95bce1cb4441.asciidoc new file mode 100644 index 000000000..bbf47db33 --- /dev/null +++ b/docs/doc_examples/6f855bc92b4cc6e6a63f95bce1cb4441.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.logstash.getPipeline({ + id: "my_pipeline", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6f8a682c908b826ca90cadd9d2f582b4.asciidoc b/docs/doc_examples/6f8a682c908b826ca90cadd9d2f582b4.asciidoc new file mode 100644 index 000000000..61c814366 --- /dev/null +++ b/docs/doc_examples/6f8a682c908b826ca90cadd9d2f582b4.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + stored_fields: ["user", "postDate"], + query: { + term: { + user: "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6f8bdca97e43aac75e32de655aa4314a.asciidoc b/docs/doc_examples/6f8bdca97e43aac75e32de655aa4314a.asciidoc new file mode 100644 index 000000000..b1295fe7d --- /dev/null +++ b/docs/doc_examples/6f8bdca97e43aac75e32de655aa4314a.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.connector.delete({ + connector_id: "my-connector-id&delete_sync_jobs=true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6fa02c2ad485bbe91f44b321158250f3.asciidoc b/docs/doc_examples/6fa02c2ad485bbe91f44b321158250f3.asciidoc new file mode 100644 index 000000000..afea3d985 --- /dev/null +++ b/docs/doc_examples/6fa02c2ad485bbe91f44b321158250f3.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + multi_match: { + query: "brown f", + type: "bool_prefix", + fields: ["my_field", "my_field._2gram", "my_field._3gram"], + }, + }, + highlight: { + fields: { + my_field: { + matched_fields: ["my_field._index_prefix"], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6fa570ae7039171e2ab722344ec1063f.asciidoc b/docs/doc_examples/6fa570ae7039171e2ab722344ec1063f.asciidoc new file mode 100644 index 000000000..0e259c87b --- /dev/null +++ b/docs/doc_examples/6fa570ae7039171e2ab722344ec1063f.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getFieldMapping({ + index: "my-index-000001", + fields: "user", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6fbb88f399618e1b47412082062ce2bd.asciidoc b/docs/doc_examples/6fbb88f399618e1b47412082062ce2bd.asciidoc new file mode 100644 index 000000000..ca9fca0c6 --- /dev/null +++ b/docs/doc_examples/6fbb88f399618e1b47412082062ce2bd.asciidoc @@ -0,0 +1,44 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.previewTransform({ + source: { + index: "kibana_sample_data_logs", + }, + pivot: { + group_by: { + timestamp: { + date_histogram: { + field: "timestamp", + fixed_interval: "1h", + }, + }, + }, + aggregations: { + "bytes.max": { + max: { + field: "bytes", + }, + }, + top: { + top_metrics: { + metrics: [ + { + field: "clientip", + }, + { + field: "geo.src", + }, + ], + sort: { + bytes: "desc", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6fbbf40cab0187f544ff7bca31d18d57.asciidoc b/docs/doc_examples/6fbbf40cab0187f544ff7bca31d18d57.asciidoc new file mode 100644 index 000000000..a8101d857 --- /dev/null +++ b/docs/doc_examples/6fbbf40cab0187f544ff7bca31d18d57.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: { + type: "Polygon", + coordinates: [ + [ + [100, 0], + [101, 0], + [101, 1], + [100, 1], + [100, 0], + ], + [ + [100.2, 0.2], + [100.8, 0.2], + [100.8, 0.8], + [100.2, 0.8], + [100.2, 0.2], + ], + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6fc778e9a888b16b937c5c2a7a1ec140.asciidoc b/docs/doc_examples/6fc778e9a888b16b937c5c2a7a1ec140.asciidoc new file mode 100644 index 000000000..f55316e25 --- /dev/null +++ b/docs/doc_examples/6fc778e9a888b16b937c5c2a7a1ec140.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchableSnapshots.clearCache({ + index: "my-index", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6fd82baa17a48e09e3d2eed514af7f46.asciidoc b/docs/doc_examples/6fd82baa17a48e09e3d2eed514af7f46.asciidoc new file mode 100644 index 000000000..42f8f8f12 --- /dev/null +++ b/docs/doc_examples/6fd82baa17a48e09e3d2eed514af7f46.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: { + type: "MultiLineString", + coordinates: [ + [ + [102, 2], + [103, 2], + [103, 3], + [102, 3], + ], + [ + [100, 0], + [101, 0], + [101, 1], + [100, 1], + ], + [ + [100.2, 0.2], + [100.8, 0.2], + [100.8, 0.8], + [100.2, 0.8], + ], + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/6fe6c095c6995e0f2214f5f3bc85d74e.asciidoc b/docs/doc_examples/6fe6c095c6995e0f2214f5f3bc85d74e.asciidoc new file mode 100644 index 000000000..324d41871 --- /dev/null +++ b/docs/doc_examples/6fe6c095c6995e0f2214f5f3bc85d74e.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.deleteDataLifecycle({ + name: "my-data-stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/6febf0e6883b23b15ac213abc4bac326.asciidoc b/docs/doc_examples/6febf0e6883b23b15ac213abc4bac326.asciidoc new file mode 100644 index 000000000..b44743e74 --- /dev/null +++ b/docs/doc_examples/6febf0e6883b23b15ac213abc4bac326.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "place", + suggest: { + place_suggestion: { + prefix: "tim", + completion: { + field: "suggest", + size: 10, + contexts: { + location: { + lat: 43.662, + lon: -79.38, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7011fcdd231804f9c3894154ae2c3fbc.asciidoc b/docs/doc_examples/7011fcdd231804f9c3894154ae2c3fbc.asciidoc new file mode 100644 index 000000000..ccbea663c --- /dev/null +++ b/docs/doc_examples/7011fcdd231804f9c3894154ae2c3fbc.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index", + mappings: { + properties: { + "text.tokens": { + type: "sparse_vector", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/701f1fffc65e9e51c96aa60261e2eae3.asciidoc b/docs/doc_examples/701f1fffc65e9e51c96aa60261e2eae3.asciidoc new file mode 100644 index 000000000..64ab745d0 --- /dev/null +++ b/docs/doc_examples/701f1fffc65e9e51c96aa60261e2eae3.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getApiKey({ + id: "VuaCfGcBCdbkQm-e5aOx", +}); +console.log(response); +---- diff --git a/docs/doc_examples/7021ddb273a3a00847324d2f670c4c04.asciidoc b/docs/doc_examples/7021ddb273a3a00847324d2f670c4c04.asciidoc new file mode 100644 index 000000000..c1cc5cbae --- /dev/null +++ b/docs/doc_examples/7021ddb273a3a00847324d2f670c4c04.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "image-index", + query: { + match: { + title: { + query: "mountain lake", + boost: 0.9, + }, + }, + }, + knn: [ + { + field: "image-vector", + query_vector: [54, 10, -2], + k: 5, + num_candidates: 50, + boost: 0.1, + }, + { + field: "title-vector", + query_vector: [1, 20, -52, 23, 10], + k: 10, + num_candidates: 10, + boost: 0.5, + }, + ], + size: 10, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7067a498bb6c788854a26443a64b843a.asciidoc b/docs/doc_examples/7067a498bb6c788854a26443a64b843a.asciidoc new file mode 100644 index 000000000..40a3c3ca0 --- /dev/null +++ b/docs/doc_examples/7067a498bb6c788854a26443a64b843a.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + runtime_mappings: { + "amount.signed": { + type: "double", + script: + "\n double amount = doc['amount'].value;\n if (doc['type'].value == 'expense') {\n amount *= -1;\n }\n emit(amount);\n ", + }, + }, + query: { + bool: { + filter: { + range: { + "amount.signed": { + lt: 10, + }, + }, + }, + }, + }, + fields: [ + { + field: "amount.signed", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/708e7ec681be41791f232817a07cda82.asciidoc b/docs/doc_examples/708e7ec681be41791f232817a07cda82.asciidoc new file mode 100644 index 000000000..3533d112b --- /dev/null +++ b/docs/doc_examples/708e7ec681be41791f232817a07cda82.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.get({ + repository: "my_repository", + snapshot: "snapshot*", + size: 2, + sort: "name", + offset: 2, +}); +console.log(response); +---- diff --git a/docs/doc_examples/70bbe14bc4d5a5d58e81ab2b02408817.asciidoc b/docs/doc_examples/70bbe14bc4d5a5d58e81ab2b02408817.asciidoc new file mode 100644 index 000000000..cac8da9a9 --- /dev/null +++ b/docs/doc_examples/70bbe14bc4d5a5d58e81ab2b02408817.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "users", + roles: ["user"], + rules: { + field: { + dn: "cn=John Doe,ou=example,o=com", + }, + }, + enabled: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/70c736ecb3746dbe839af0e468712805.asciidoc b/docs/doc_examples/70c736ecb3746dbe839af0e468712805.asciidoc new file mode 100644 index 000000000..7691586b6 --- /dev/null +++ b/docs/doc_examples/70c736ecb3746dbe839af0e468712805.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.deleteTransform({ + transform_id: "ecommerce_transform", +}); +console.log(response); +---- diff --git a/docs/doc_examples/70cc66bf4054ebf0ad4955cb99d9ab80.asciidoc b/docs/doc_examples/70cc66bf4054ebf0ad4955cb99d9ab80.asciidoc new file mode 100644 index 000000000..ee695268c --- /dev/null +++ b/docs/doc_examples/70cc66bf4054ebf0ad4955cb99d9ab80.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.updateTrainedModelDeployment({ + model_id: "elastic__distilbert-base-uncased-finetuned-conll03-english", + number_of_allocations: 4, +}); +console.log(response); +---- diff --git a/docs/doc_examples/70f89dd6b71ea890ad3cf47d83e43344.asciidoc b/docs/doc_examples/70f89dd6b71ea890ad3cf47d83e43344.asciidoc new file mode 100644 index 000000000..efaf4c5c5 --- /dev/null +++ b/docs/doc_examples/70f89dd6b71ea890ad3cf47d83e43344.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "my-pipeline", + description: "My optional pipeline description", + processors: [ + { + set: { + description: "My optional processor description", + field: "my-long-field", + value: 10, + }, + }, + { + set: { + description: "Set 'my-boolean-field' to true", + field: "my-boolean-field", + value: true, + }, + }, + { + lowercase: { + field: "my-keyword-field", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/7106e6317e6368b9863cf64df9c6f0c9.asciidoc b/docs/doc_examples/7106e6317e6368b9863cf64df9c6f0c9.asciidoc new file mode 100644 index 000000000..5802bc29b --- /dev/null +++ b/docs/doc_examples/7106e6317e6368b9863cf64df9c6f0c9.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.putTransform({ + transform_id: "ecommerce_transform2", + source: { + index: "kibana_sample_data_ecommerce", + }, + latest: { + unique_key: ["customer_id"], + sort: "order_date", + }, + description: "Latest order for each customer", + dest: { + index: "kibana_sample_data_ecommerce_transform2", + }, + frequency: "5m", + sync: { + time: { + field: "order_date", + delay: "60s", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/710c7871f20f176d51209b1574b0d61b.asciidoc b/docs/doc_examples/710c7871f20f176d51209b1574b0d61b.asciidoc deleted file mode 100644 index 677e6bdfd..000000000 --- a/docs/doc_examples/710c7871f20f176d51209b1574b0d61b.asciidoc +++ /dev/null @@ -1,13 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.get({ - index: 'twitter', - id: '1', - stored_fields: 'tags,counter' -}) -console.log(response) ----- - diff --git a/docs/doc_examples/711443504b69d0d296e717c716a223e2.asciidoc b/docs/doc_examples/711443504b69d0d296e717c716a223e2.asciidoc new file mode 100644 index 000000000..773424205 --- /dev/null +++ b/docs/doc_examples/711443504b69d0d296e717c716a223e2.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "drivers", + query: { + nested: { + path: "driver", + query: { + nested: { + path: "driver.vehicle", + query: { + bool: { + must: [ + { + match: { + "driver.vehicle.make": "Powell Motors", + }, + }, + { + match: { + "driver.vehicle.model": "Canyonero", + }, + }, + ], + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7148c8512079d378af70302e65502dd2.asciidoc b/docs/doc_examples/7148c8512079d378af70302e65502dd2.asciidoc new file mode 100644 index 000000000..e2d513793 --- /dev/null +++ b/docs/doc_examples/7148c8512079d378af70302e65502dd2.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "timeseries-000001", + aliases: { + timeseries: { + is_write_index: true, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7163346755400594d1dd7e445aa19ff0.asciidoc b/docs/doc_examples/7163346755400594d1dd7e445aa19ff0.asciidoc new file mode 100644 index 000000000..cbb6b62da --- /dev/null +++ b/docs/doc_examples/7163346755400594d1dd7e445aa19ff0.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "music", +}); +console.log(response); +---- diff --git a/docs/doc_examples/719141517d83b7e8e929b347a8d67c9f.asciidoc b/docs/doc_examples/719141517d83b7e8e929b347a8d67c9f.asciidoc new file mode 100644 index 000000000..358afa030 --- /dev/null +++ b/docs/doc_examples/719141517d83b7e8e929b347a8d67c9f.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.get({ + index: "kibana_sample_data_flights,.ds-my-data-stream-2022.06.17-000001", + features: "settings", + flat_settings: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/71998bb300ac2a58419b0772cdc1c586.asciidoc b/docs/doc_examples/71998bb300ac2a58419b0772cdc1c586.asciidoc new file mode 100644 index 000000000..2efade53d --- /dev/null +++ b/docs/doc_examples/71998bb300ac2a58419b0772cdc1c586.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, + }, + }, + mappings: { + properties: { + versions: { + type: "version", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "idx", + id: 1, + document: { + versions: ["8.0.0-beta1", "8.5.0", "0.90.12", "2.6.1", "1.3.4", "1.3.4"], + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/71b5b2ba9557d0f296ff2de91727d2f6.asciidoc b/docs/doc_examples/71b5b2ba9557d0f296ff2de91727d2f6.asciidoc deleted file mode 100644 index 8018c6c2c..000000000 --- a/docs/doc_examples/71b5b2ba9557d0f296ff2de91727d2f6.asciidoc +++ /dev/null @@ -1,29 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - aggs: { - genres: { - terms: { - field: 'genre', - order: { - max_play_count: 'desc' - } - }, - aggs: { - max_play_count: { - max: { - field: 'play_count' - } - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/71ba9033107882f61cdc3b32fc73568d.asciidoc b/docs/doc_examples/71ba9033107882f61cdc3b32fc73568d.asciidoc deleted file mode 100644 index b2e8f8a5c..000000000 --- a/docs/doc_examples/71ba9033107882f61cdc3b32fc73568d.asciidoc +++ /dev/null @@ -1,19 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.putMapping({ - index: 'my-index', - body: { - properties: { - 'employee-id': { - type: 'keyword', - index: false - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/71c629c44bf3c542a0daacbfc253c4b0.asciidoc b/docs/doc_examples/71c629c44bf3c542a0daacbfc253c4b0.asciidoc new file mode 100644 index 000000000..ccc77f823 --- /dev/null +++ b/docs/doc_examples/71c629c44bf3c542a0daacbfc253c4b0.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.stats({ + node_id: "node1,node*,master:false", +}); +console.log(response); +---- diff --git a/docs/doc_examples/71de08a2d962c66f0c60677eff23f8d1.asciidoc b/docs/doc_examples/71de08a2d962c66f0c60677eff23f8d1.asciidoc new file mode 100644 index 000000000..87fdcbfec --- /dev/null +++ b/docs/doc_examples/71de08a2d962c66f0c60677eff23f8d1.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + aggs: { + price_ranges: { + range: { + field: "price", + keyed: true, + ranges: [ + { + key: "cheap", + to: 100, + }, + { + key: "average", + from: 100, + to: 200, + }, + { + key: "expensive", + from: 200, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/71e47a83f632ef159956287bbfe4ca12.asciidoc b/docs/doc_examples/71e47a83f632ef159956287bbfe4ca12.asciidoc new file mode 100644 index 000000000..c76b1d751 --- /dev/null +++ b/docs/doc_examples/71e47a83f632ef159956287bbfe4ca12.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "example", + query: { + shape: { + geometry: { + shape: { + type: "envelope", + coordinates: [ + [1355, 5355], + [1400, 5200], + ], + }, + relation: "within", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/71fa652ddea811eb3c8bf8c5db21e549.asciidoc b/docs/doc_examples/71fa652ddea811eb3c8bf8c5db21e549.asciidoc new file mode 100644 index 000000000..559eb3d85 --- /dev/null +++ b/docs/doc_examples/71fa652ddea811eb3c8bf8c5db21e549.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + index: "analyze_sample", + analyzer: "whitespace", + text: "this is a test", +}); +console.log(response); +---- diff --git a/docs/doc_examples/722238b4e7b78cdb3c6a986780e7e286.asciidoc b/docs/doc_examples/722238b4e7b78cdb3c6a986780e7e286.asciidoc new file mode 100644 index 000000000..f2001fb6e --- /dev/null +++ b/docs/doc_examples/722238b4e7b78cdb3c6a986780e7e286.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "range_index", + size: 0, + query: { + range: { + time_frame: { + gte: "2019-11-01", + format: "yyyy-MM-dd", + }, + }, + }, + aggs: { + november_data: { + date_histogram: { + field: "time_frame", + calendar_interval: "day", + format: "yyyy-MM-dd", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/726994d8f3793b86628255a797155a52.asciidoc b/docs/doc_examples/726994d8f3793b86628255a797155a52.asciidoc new file mode 100644 index 000000000..c85f3c379 --- /dev/null +++ b/docs/doc_examples/726994d8f3793b86628255a797155a52.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.info({ + node_id: "ingest", + filter_path: "nodes.*.ingest.processors", +}); +console.log(response); +---- diff --git a/docs/doc_examples/72a3668ddc95d9aec47cc679d1e7afc5.asciidoc b/docs/doc_examples/72a3668ddc95d9aec47cc679d1e7afc5.asciidoc new file mode 100644 index 000000000..3fe8f1d65 --- /dev/null +++ b/docs/doc_examples/72a3668ddc95d9aec47cc679d1e7afc5.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + cluster: { + remote: { + cluster_one: { + seeds: ["35.238.149.1:9300"], + skip_unavailable: true, + }, + cluster_two: { + seeds: ["35.238.149.2:9300"], + skip_unavailable: false, + }, + cluster_three: { + seeds: ["35.238.149.3:9300"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/72ae3851160fcf02b8e2cdfd4e57d238.asciidoc b/docs/doc_examples/72ae3851160fcf02b8e2cdfd4e57d238.asciidoc new file mode 100644 index 000000000..186d8540d --- /dev/null +++ b/docs/doc_examples/72ae3851160fcf02b8e2cdfd4e57d238.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.start(); +console.log(response); +---- diff --git a/docs/doc_examples/72b999120785dfba2827268482e9be0a.asciidoc b/docs/doc_examples/72b999120785dfba2827268482e9be0a.asciidoc new file mode 100644 index 000000000..042a2e3b2 --- /dev/null +++ b/docs/doc_examples/72b999120785dfba2827268482e9be0a.asciidoc @@ -0,0 +1,98 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "museums", + mappings: { + properties: { + location: { + type: "geo_point", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "museums", + refresh: "true", + operations: [ + { + index: { + _id: 1, + }, + }, + { + location: "POINT (4.912350 52.374081)", + name: "NEMO Science Museum", + }, + { + index: { + _id: 2, + }, + }, + { + location: "POINT (4.901618 52.369219)", + name: "Museum Het Rembrandthuis", + }, + { + index: { + _id: 3, + }, + }, + { + location: "POINT (4.914722 52.371667)", + name: "Nederlands Scheepvaartmuseum", + }, + { + index: { + _id: 4, + }, + }, + { + location: "POINT (4.405200 51.222900)", + name: "Letterenhuis", + }, + { + index: { + _id: 5, + }, + }, + { + location: "POINT (2.336389 48.861111)", + name: "Musée du Louvre", + }, + { + index: { + _id: 6, + }, + }, + { + location: "POINT (2.327000 48.860000)", + name: "Musée d'Orsay", + }, + ], +}); +console.log(response1); + +const response2 = await client.search({ + index: "museums", + size: 0, + query: { + match: { + name: "musée", + }, + }, + aggs: { + viewport: { + geo_bounds: { + field: "location", + wrap_longitude: true, + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/72bae0252b74ff6fd9f0702ff008d84a.asciidoc b/docs/doc_examples/72bae0252b74ff6fd9f0702ff008d84a.asciidoc new file mode 100644 index 000000000..d46bcd8d4 --- /dev/null +++ b/docs/doc_examples/72bae0252b74ff6fd9f0702ff008d84a.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.get({ + repository: "my_repository", + snapshot: "*", + sort: "name", + from_sort_value: "snapshot_2", +}); +console.log(response); +---- diff --git a/docs/doc_examples/72beebe779a258c225dee7b023e60c52.asciidoc b/docs/doc_examples/72beebe779a258c225dee7b023e60c52.asciidoc new file mode 100644 index 000000000..261b4ebbb --- /dev/null +++ b/docs/doc_examples/72beebe779a258c225dee7b023e60c52.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.stats({ + metric: "indices", + index_metric: "search", +}); +console.log(response); +---- diff --git a/docs/doc_examples/730045fae3743c39b612813a42c330c3.asciidoc b/docs/doc_examples/730045fae3743c39b612813a42c330c3.asciidoc new file mode 100644 index 000000000..b2400e39b --- /dev/null +++ b/docs/doc_examples/730045fae3743c39b612813a42c330c3.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + prefix: { + full_name: { + value: "ki", + }, + }, + }, + highlight: { + fields: { + full_name: { + matched_fields: ["full_name._index_prefix"], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/73250f845738c428246a3ade66a8f54c.asciidoc b/docs/doc_examples/73250f845738c428246a3ade66a8f54c.asciidoc new file mode 100644 index 000000000..e49b85234 --- /dev/null +++ b/docs/doc_examples/73250f845738c428246a3ade66a8f54c.asciidoc @@ -0,0 +1,51 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "exams", + refresh: "true", + document: { + grade: 100, + weight: [2, 3], + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "exams", + refresh: "true", + document: { + grade: 80, + weight: 3, + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "exams", + filter_path: "aggregations", + size: 0, + runtime_mappings: { + "weight.combined": { + type: "double", + script: + "\n double s = 0;\n for (double w : doc['weight']) {\n s += w;\n }\n emit(s);\n ", + }, + }, + aggs: { + weighted_grade: { + weighted_avg: { + value: { + script: "doc.grade.value + 1", + }, + weight: { + field: "weight.combined", + }, + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/734c2e2a1e45b84f1e4e65b51356fcd7.asciidoc b/docs/doc_examples/734c2e2a1e45b84f1e4e65b51356fcd7.asciidoc deleted file mode 100644 index af1817642..000000000 --- a/docs/doc_examples/734c2e2a1e45b84f1e4e65b51356fcd7.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'new_users', - body: { - mappings: { - properties: { - user_id: { - type: 'keyword' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/734e2b1d1ca84a305240a449738f0eba.asciidoc b/docs/doc_examples/734e2b1d1ca84a305240a449738f0eba.asciidoc new file mode 100644 index 000000000..0d616a95b --- /dev/null +++ b/docs/doc_examples/734e2b1d1ca84a305240a449738f0eba.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.indices({ + v: "true", + index: + ".ds-my-data-stream-2022.06.17-000001,kibana_sample_data_flightsh=index,status,health", +}); +console.log(response); +---- diff --git a/docs/doc_examples/73646c12ad33a813ab2280f1dc83500e.asciidoc b/docs/doc_examples/73646c12ad33a813ab2280f1dc83500e.asciidoc new file mode 100644 index 000000000..e5c2d87bb --- /dev/null +++ b/docs/doc_examples/73646c12ad33a813ab2280f1dc83500e.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.follow({ + index: "", + wait_for_active_shards: 1, + remote_cluster: "", + leader_index: "", +}); +console.log(response); +---- diff --git a/docs/doc_examples/738db420e3ad2a127ea75fb8e5051926.asciidoc b/docs/doc_examples/738db420e3ad2a127ea75fb8e5051926.asciidoc new file mode 100644 index 000000000..a3947f0ec --- /dev/null +++ b/docs/doc_examples/738db420e3ad2a127ea75fb8e5051926.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "last-log-from-clientip", +}); +console.log(response); +---- diff --git a/docs/doc_examples/73b07b24ab2c4cd304a57f9cbda8b863.asciidoc b/docs/doc_examples/73b07b24ab2c4cd304a57f9cbda8b863.asciidoc new file mode 100644 index 000000000..7f0ee2b39 --- /dev/null +++ b/docs/doc_examples/73b07b24ab2c4cd304a57f9cbda8b863.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.getBehavioralAnalytics(); +console.log(response); +---- diff --git a/docs/doc_examples/73be1f93d789264e5b972ddb5991bc66.asciidoc b/docs/doc_examples/73be1f93d789264e5b972ddb5991bc66.asciidoc new file mode 100644 index 000000000..d5aaaf737 --- /dev/null +++ b/docs/doc_examples/73be1f93d789264e5b972ddb5991bc66.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "logger.org.elasticsearch.discovery": "DEBUG", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/73d1a6c5ef90b7e35d43a0bfdc1e158d.asciidoc b/docs/doc_examples/73d1a6c5ef90b7e35d43a0bfdc1e158d.asciidoc new file mode 100644 index 000000000..f8d2781a1 --- /dev/null +++ b/docs/doc_examples/73d1a6c5ef90b7e35d43a0bfdc1e158d.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.rollup.getRollupIndexCaps({ + index: "sensor_rollup", +}); +console.log(response); +---- diff --git a/docs/doc_examples/73df03be6ee78b10106581dbd7cb39ef.asciidoc b/docs/doc_examples/73df03be6ee78b10106581dbd7cb39ef.asciidoc new file mode 100644 index 000000000..7b799e7e0 --- /dev/null +++ b/docs/doc_examples/73df03be6ee78b10106581dbd7cb39ef.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_date_histo: { + date_histogram: { + field: "date", + calendar_interval: "1M", + }, + aggs: { + the_sum: { + sum: { + field: "price", + }, + }, + the_movavg: { + moving_fn: { + buckets_path: "the_sum", + window: 10, + script: "MovingFunctions.ewma(values, 0.3)", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/73ebc89cb32adb389ae16bb088d7c7e6.asciidoc b/docs/doc_examples/73ebc89cb32adb389ae16bb088d7c7e6.asciidoc new file mode 100644 index 000000000..b7156b497 --- /dev/null +++ b/docs/doc_examples/73ebc89cb32adb389ae16bb088d7c7e6.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "cluster.routing.allocation.enable": null, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/73f9271dee9b8539b6aa7e17f323c623.asciidoc b/docs/doc_examples/73f9271dee9b8539b6aa7e17f323c623.asciidoc new file mode 100644 index 000000000..387c13543 --- /dev/null +++ b/docs/doc_examples/73f9271dee9b8539b6aa7e17f323c623.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "products", + aggs: { + genres_and_products: { + multi_terms: { + terms: [ + { + field: "genre", + }, + { + field: "product", + }, + ], + order: { + total_quantity: "desc", + }, + }, + aggs: { + total_quantity: { + sum: { + field: "quantity", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/73fa0d6d03cd98ea538fff9e89d99eed.asciidoc b/docs/doc_examples/73fa0d6d03cd98ea538fff9e89d99eed.asciidoc new file mode 100644 index 000000000..05336a24a --- /dev/null +++ b/docs/doc_examples/73fa0d6d03cd98ea538fff9e89d99eed.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getServiceAccounts({ + namespace: "elastic", + service: "fleet-server", +}); +console.log(response); +---- diff --git a/docs/doc_examples/7404c6e809fee5d7eb9678a82a872806.asciidoc b/docs/doc_examples/7404c6e809fee5d7eb9678a82a872806.asciidoc new file mode 100644 index 000000000..a9a9de6b8 --- /dev/null +++ b/docs/doc_examples/7404c6e809fee5d7eb9678a82a872806.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + aggs: { + "my-agg-name": { + terms: { + field: "my-field", + }, + aggs: { + "my-sub-agg-name": { + avg: { + field: "my-other-field", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/741180473ba526219578ad0422f4fe81.asciidoc b/docs/doc_examples/741180473ba526219578ad0422f4fe81.asciidoc new file mode 100644 index 000000000..8a5484224 --- /dev/null +++ b/docs/doc_examples/741180473ba526219578ad0422f4fe81.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_connector/my-connector/_features", + body: { + features: { + document_level_security: { + enabled: true, + }, + incremental_sync: { + enabled: true, + }, + sync_rules: { + advanced: { + enabled: false, + }, + basic: { + enabled: true, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7429b16221fe741fd31b0584786dd0b0.asciidoc b/docs/doc_examples/7429b16221fe741fd31b0584786dd0b0.asciidoc new file mode 100644 index 000000000..e41a7bf42 --- /dev/null +++ b/docs/doc_examples/7429b16221fe741fd31b0584786dd0b0.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.textEmbedding({ + inference_id: "my-cohere-endpoint", + input: + "The sky above the port was the color of television tuned to a dead channel.", + task_settings: { + input_type: "ingest", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/744aeb2af40f519e430e21e004e3c3b7.asciidoc b/docs/doc_examples/744aeb2af40f519e430e21e004e3c3b7.asciidoc new file mode 100644 index 000000000..c3d68dfcc --- /dev/null +++ b/docs/doc_examples/744aeb2af40f519e430e21e004e3c3b7.asciidoc @@ -0,0 +1,44 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "mv", + mappings: { + properties: { + b: { + type: "long", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "mv", + refresh: "true", + operations: [ + { + index: {}, + }, + { + a: 1, + b: [2, 2, 1], + }, + { + index: {}, + }, + { + a: 2, + b: [1, 1], + }, + ], +}); +console.log(response1); + +const response2 = await client.esql.query({ + query: "FROM mv | LIMIT 2", +}); +console.log(response2); +---- diff --git a/docs/doc_examples/7456ef459d510d66ba4492cc9fbdc6c6.asciidoc b/docs/doc_examples/7456ef459d510d66ba4492cc9fbdc6c6.asciidoc new file mode 100644 index 000000000..c7336a48f --- /dev/null +++ b/docs/doc_examples/7456ef459d510d66ba4492cc9fbdc6c6.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + cluster: { + remote: { + cluster_two: { + mode: null, + seeds: null, + skip_unavailable: null, + "transport.compress": null, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/745864ef2427188241a4702b94ea57be.asciidoc b/docs/doc_examples/745864ef2427188241a4702b94ea57be.asciidoc new file mode 100644 index 000000000..a33976eea --- /dev/null +++ b/docs/doc_examples/745864ef2427188241a4702b94ea57be.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + query: { + constant_score: { + filter: { + range: { + price: { + lte: "500", + }, + }, + }, + }, + }, + aggs: { + prices: { + histogram: { + field: "price", + interval: 50, + extended_bounds: { + min: 0, + max: 500, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/745f9b8cdb8e91073f6e520e1d9f8c05.asciidoc b/docs/doc_examples/745f9b8cdb8e91073f6e520e1d9f8c05.asciidoc deleted file mode 100644 index 35ee07f1c..000000000 --- a/docs/doc_examples/745f9b8cdb8e91073f6e520e1d9f8c05.asciidoc +++ /dev/null @@ -1,13 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.get({ - index: 'twitter', - id: '0', - _source: '*.id,retweeted' -}) -console.log(response) ----- - diff --git a/docs/doc_examples/74678f8bbc7e4fc1885719d1cf63ac67.asciidoc b/docs/doc_examples/74678f8bbc7e4fc1885719d1cf63ac67.asciidoc new file mode 100644 index 000000000..2c9c1ee30 --- /dev/null +++ b/docs/doc_examples/74678f8bbc7e4fc1885719d1cf63ac67.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + range: { + date_range: { + field: "date", + format: "MM-yyy", + ranges: [ + { + from: "01-2015", + to: "03-2015", + key: "quarter_01", + }, + { + from: "03-2015", + to: "06-2015", + key: "quarter_02", + }, + ], + keyed: true, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/746e0a1cb5984f2672963b363505c7b3.asciidoc b/docs/doc_examples/746e0a1cb5984f2672963b363505c7b3.asciidoc new file mode 100644 index 000000000..4a55b9ef8 --- /dev/null +++ b/docs/doc_examples/746e0a1cb5984f2672963b363505c7b3.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + date: { + type: "date", + format: "strict_date_optional_time||epoch_second", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: "example", + refresh: "true", + document: { + date: 1618321898, + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + fields: [ + { + field: "date", + }, + ], + _source: false, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/746e87db7e1e8b5e6b40d8b5b188de42.asciidoc b/docs/doc_examples/746e87db7e1e8b5e6b40d8b5b188de42.asciidoc new file mode 100644 index 000000000..9dce10243 --- /dev/null +++ b/docs/doc_examples/746e87db7e1e8b5e6b40d8b5b188de42.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "exams", + size: 0, + aggs: { + grades_stats: { + stats: { + field: "grade", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7471e97aaaf21c3a200abdd89f15c3cc.asciidoc b/docs/doc_examples/7471e97aaaf21c3a200abdd89f15c3cc.asciidoc new file mode 100644 index 000000000..f48892684 --- /dev/null +++ b/docs/doc_examples/7471e97aaaf21c3a200abdd89f15c3cc.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + intervals: { + my_text: { + match: { + query: "hot porridge", + max_gaps: 10, + filter: { + not_containing: { + match: { + query: "salty", + }, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7477671958734843dd67cf0b8e6c7515.asciidoc b/docs/doc_examples/7477671958734843dd67cf0b8e6c7515.asciidoc deleted file mode 100644 index 42a9afd21..000000000 --- a/docs/doc_examples/7477671958734843dd67cf0b8e6c7515.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'index_long', - body: { - mappings: { - properties: { - field: { - type: 'date_nanos' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/7478ff69113fb53f41ea07cdf911fa67.asciidoc b/docs/doc_examples/7478ff69113fb53f41ea07cdf911fa67.asciidoc new file mode 100644 index 000000000..047487632 --- /dev/null +++ b/docs/doc_examples/7478ff69113fb53f41ea07cdf911fa67.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "kibana_sample_data_ecommerce", + size: 0, + aggs: { + daily_sales: { + date_histogram: { + field: "order_date", + calendar_interval: "day", + }, + aggs: { + daily_revenue: { + sum: { + field: "taxful_total_price", + }, + }, + smoothed_revenue: { + moving_fn: { + buckets_path: "daily_revenue", + window: 3, + script: "MovingFunctions.unweightedAvg(values)", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/747a4b5001423938d7d05399d28f1995.asciidoc b/docs/doc_examples/747a4b5001423938d7d05399d28f1995.asciidoc new file mode 100644 index 000000000..9b0378abb --- /dev/null +++ b/docs/doc_examples/747a4b5001423938d7d05399d28f1995.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "indices.lifecycle.poll_interval": "1m", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/74a80c28737a0648db0dfe7f049d12f2.asciidoc b/docs/doc_examples/74a80c28737a0648db0dfe7f049d12f2.asciidoc new file mode 100644 index 000000000..335953144 --- /dev/null +++ b/docs/doc_examples/74a80c28737a0648db0dfe7f049d12f2.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.exists({ + index: "my-index-000001", + id: 0, +}); +console.log(response); +---- diff --git a/docs/doc_examples/74b13ceb6cda3acaa9e9f58c9e5e2431.asciidoc b/docs/doc_examples/74b13ceb6cda3acaa9e9f58c9e5e2431.asciidoc new file mode 100644 index 000000000..ef9d24928 --- /dev/null +++ b/docs/doc_examples/74b13ceb6cda3acaa9e9f58c9e5e2431.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-index-000001", + _meta: { + class: "MyApp2::User3", + version: { + min: "1.3", + max: "1.5", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/74da377bccad43da2b0e276c086d26ba.asciidoc b/docs/doc_examples/74da377bccad43da2b0e276c086d26ba.asciidoc new file mode 100644 index 000000000..6e3a95345 --- /dev/null +++ b/docs/doc_examples/74da377bccad43da2b0e276c086d26ba.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.info({ + target: "_all", +}); +console.log(response); + +const response1 = await client.cluster.info({ + target: "http", +}); +console.log(response1); + +const response2 = await client.cluster.info({ + target: "ingest", +}); +console.log(response2); + +const response3 = await client.cluster.info({ + target: "thread_pool", +}); +console.log(response3); + +const response4 = await client.cluster.info({ + target: "script", +}); +console.log(response4); + +const response5 = await client.cluster.info({ + target: "http,ingest", +}); +console.log(response5); +---- diff --git a/docs/doc_examples/750ac969f9a05567f5cdf4f93d6244b6.asciidoc b/docs/doc_examples/750ac969f9a05567f5cdf4f93d6244b6.asciidoc new file mode 100644 index 000000000..8a82d0135 --- /dev/null +++ b/docs/doc_examples/750ac969f9a05567f5cdf4f93d6244b6.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.reroute({ + commands: [ + { + allocate_empty_primary: { + index: "my-index", + shard: 0, + node: "my-node", + accept_data_loss: "true", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/75330ec1305d2beb0e2f34d2195464e2.asciidoc b/docs/doc_examples/75330ec1305d2beb0e2f34d2195464e2.asciidoc deleted file mode 100644 index 3db659111..000000000 --- a/docs/doc_examples/75330ec1305d2beb0e2f34d2195464e2.asciidoc +++ /dev/null @@ -1,17 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - match_all: { - boost: 1.2 - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/7594a9a85c8511701e281974cbc253e1.asciidoc b/docs/doc_examples/7594a9a85c8511701e281974cbc253e1.asciidoc new file mode 100644 index 000000000..3c4dca864 --- /dev/null +++ b/docs/doc_examples/7594a9a85c8511701e281974cbc253e1.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "text_embedding", + inference_id: "amazon_bedrock_embeddings", + inference_config: { + service: "amazonbedrock", + service_settings: { + access_key: "", + secret_key: "", + region: "", + provider: "", + model: "", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/75957a7d1b67e3d47899c5f18b32cb61.asciidoc b/docs/doc_examples/75957a7d1b67e3d47899c5f18b32cb61.asciidoc new file mode 100644 index 000000000..0ec4dd0eb --- /dev/null +++ b/docs/doc_examples/75957a7d1b67e3d47899c5f18b32cb61.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.closeJob({ + job_id: "low_request_rate", +}); +console.log(response); +---- diff --git a/docs/doc_examples/75aba7b1d3a22dce62f26b8b1e6bee58.asciidoc b/docs/doc_examples/75aba7b1d3a22dce62f26b8b1e6bee58.asciidoc new file mode 100644 index 000000000..d60a21fe9 --- /dev/null +++ b/docs/doc_examples/75aba7b1d3a22dce62f26b8b1e6bee58.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.validateQuery({ + index: "my-index-000001", + explain: "true", + query: { + query_string: { + query: "@timestamp:foo", + lenient: false, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/75c347b181112d2c4538c01ade903afe.asciidoc b/docs/doc_examples/75c347b181112d2c4538c01ade903afe.asciidoc new file mode 100644 index 000000000..2fbf4fba5 --- /dev/null +++ b/docs/doc_examples/75c347b181112d2c4538c01ade903afe.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.validateQuery({ + index: "my-index-000001", + rewrite: "true", + query: { + match: { + "user.id": { + query: "kimchy", + fuzziness: "auto", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/75e13a00f0909c955031ff62acc14a79.asciidoc b/docs/doc_examples/75e13a00f0909c955031ff62acc14a79.asciidoc new file mode 100644 index 000000000..2f6f4225f --- /dev/null +++ b/docs/doc_examples/75e13a00f0909c955031ff62acc14a79.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + match: { + message: "GET /search", + }, + }, + collapse: { + field: "user.id", + }, + sort: [ + { + "http.response.bytes": { + order: "desc", + }, + }, + ], + from: 0, +}); +console.log(response); +---- diff --git a/docs/doc_examples/75e360d03fb416f0a65ca37c662c2e9c.asciidoc b/docs/doc_examples/75e360d03fb416f0a65ca37c662c2e9c.asciidoc new file mode 100644 index 000000000..5b9aad331 --- /dev/null +++ b/docs/doc_examples/75e360d03fb416f0a65ca37c662c2e9c.asciidoc @@ -0,0 +1,49 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "transactions", + refresh: "true", + operations: [ + { + index: { + _id: 1, + }, + }, + { + type: "sale", + amount: 80, + }, + { + index: { + _id: 2, + }, + }, + { + type: "cost", + amount: 10, + }, + { + index: { + _id: 3, + }, + }, + { + type: "cost", + amount: 30, + }, + { + index: { + _id: 4, + }, + }, + { + type: "sale", + amount: 130, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/75e6d66e94e61bd8a555beaaee255c36.asciidoc b/docs/doc_examples/75e6d66e94e61bd8a555beaaee255c36.asciidoc new file mode 100644 index 000000000..fe5240085 --- /dev/null +++ b/docs/doc_examples/75e6d66e94e61bd8a555beaaee255c36.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.rollup.rollupSearch({ + index: "sensor_rollup", + size: 0, + aggregations: { + avg_temperature: { + avg: { + field: "temperature", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/763ce1377c8dfa1ca6a042d8ee99f4f5.asciidoc b/docs/doc_examples/763ce1377c8dfa1ca6a042d8ee99f4f5.asciidoc new file mode 100644 index 000000000..0b3e61686 --- /dev/null +++ b/docs/doc_examples/763ce1377c8dfa1ca6a042d8ee99f4f5.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.rollover({ + alias: "k9s", +}); +console.log(response); +---- diff --git a/docs/doc_examples/76448aaaaa2c352bb6e09d2f83a3fbb3.asciidoc b/docs/doc_examples/76448aaaaa2c352bb6e09d2f83a3fbb3.asciidoc new file mode 100644 index 000000000..de1d5548b --- /dev/null +++ b/docs/doc_examples/76448aaaaa2c352bb6e09d2f83a3fbb3.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "letter", + text: "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone.", +}); +console.log(response); +---- diff --git a/docs/doc_examples/764f9884b370cbdc82a1c5c42ed40ff3.asciidoc b/docs/doc_examples/764f9884b370cbdc82a1c5c42ed40ff3.asciidoc deleted file mode 100644 index 6944419e8..000000000 --- a/docs/doc_examples/764f9884b370cbdc82a1c5c42ed40ff3.asciidoc +++ /dev/null @@ -1,23 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.reindex({ - body: { - source: { - index: 'twitter', - query: { - term: { - user: 'kimchy' - } - } - }, - dest: { - index: 'new_twitter' - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/7659f2f2b0fbe8584b855a01638b95ed.asciidoc b/docs/doc_examples/7659f2f2b0fbe8584b855a01638b95ed.asciidoc new file mode 100644 index 000000000..b0ad2e9ce --- /dev/null +++ b/docs/doc_examples/7659f2f2b0fbe8584b855a01638b95ed.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_buckets: { + composite: { + sources: [ + { + user_name: { + terms: { + field: "user_name", + }, + }, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/765c9c8b40b67a42121648045dbf10fb.asciidoc b/docs/doc_examples/765c9c8b40b67a42121648045dbf10fb.asciidoc new file mode 100644 index 000000000..4a4cc035f --- /dev/null +++ b/docs/doc_examples/765c9c8b40b67a42121648045dbf10fb.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.stats({ + filter_path: "nodes.*.jvm.mem.pools.old", +}); +console.log(response); +---- diff --git a/docs/doc_examples/766cfc1c9fcd2c186e965761ceb2c07d.asciidoc b/docs/doc_examples/766cfc1c9fcd2c186e965761ceb2c07d.asciidoc new file mode 100644 index 000000000..a5adb3692 --- /dev/null +++ b/docs/doc_examples/766cfc1c9fcd2c186e965761ceb2c07d.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "my-index-000001", + settings: { + index: { + number_of_replicas: 1, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/769f75829a8e6670aa4cf83d0d737046.asciidoc b/docs/doc_examples/769f75829a8e6670aa4cf83d0d737046.asciidoc new file mode 100644 index 000000000..4434f1c64 --- /dev/null +++ b/docs/doc_examples/769f75829a8e6670aa4cf83d0d737046.asciidoc @@ -0,0 +1,49 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + refresh: "true", + document: { + date: "2015-10-01T00:30:00Z", + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 2, + refresh: "true", + document: { + date: "2015-10-01T01:30:00Z", + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "my-index-000001", + id: 3, + refresh: "true", + document: { + date: "2015-10-01T02:30:00Z", + }, +}); +console.log(response2); + +const response3 = await client.search({ + index: "my-index-000001", + size: 0, + aggs: { + by_day: { + auto_date_histogram: { + field: "date", + buckets: 3, + }, + }, + }, +}); +console.log(response3); +---- diff --git a/docs/doc_examples/76b279835936ee4b546a171c671c3cd7.asciidoc b/docs/doc_examples/76b279835936ee4b546a171c671c3cd7.asciidoc new file mode 100644 index 000000000..f957fef1c --- /dev/null +++ b/docs/doc_examples/76b279835936ee4b546a171c671c3cd7.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: ["cjk_width"], + text: "シーサイドライナー", +}); +console.log(response); +---- diff --git a/docs/doc_examples/76bc87c2592864152768687c2963d1d1.asciidoc b/docs/doc_examples/76bc87c2592864152768687c2963d1d1.asciidoc new file mode 100644 index 000000000..0fb78415d --- /dev/null +++ b/docs/doc_examples/76bc87c2592864152768687c2963d1d1.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.updateApiKey({ + id: "VuaCfGcBCdbkQm-e5aOx", + role_descriptors: { + "role-a": { + indices: [ + { + names: ["*"], + privileges: ["write"], + }, + ], + }, + }, + metadata: { + environment: { + level: 2, + trusted: true, + tags: ["production"], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/76c167d8ab305cb43b594f140c902dfe.asciidoc b/docs/doc_examples/76c167d8ab305cb43b594f140c902dfe.asciidoc new file mode 100644 index 000000000..91593ace6 --- /dev/null +++ b/docs/doc_examples/76c167d8ab305cb43b594f140c902dfe.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.shrink({ + index: "my_source_index", + target: "my_target_index", + settings: { + "index.number_of_replicas": 1, + "index.number_of_shards": 1, + "index.codec": "best_compression", + }, + aliases: { + my_search_indices: {}, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/76c73b54f3f1e5cb1c0fcccd7c3fd18e.asciidoc b/docs/doc_examples/76c73b54f3f1e5cb1c0fcccd7c3fd18e.asciidoc new file mode 100644 index 000000000..b40a8f8d9 --- /dev/null +++ b/docs/doc_examples/76c73b54f3f1e5cb1c0fcccd7c3fd18e.asciidoc @@ -0,0 +1,52 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + operations: [ + { + index: { + _index: "amazon-reviews", + _id: "2", + }, + }, + { + review_text: "This product is amazing! I love it.", + review_vector: [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8], + }, + { + index: { + _index: "amazon-reviews", + _id: "3", + }, + }, + { + review_text: "This product is terrible. I hate it.", + review_vector: [0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1], + }, + { + index: { + _index: "amazon-reviews", + _id: "4", + }, + }, + { + review_text: "This product is great. I can do anything with it.", + review_vector: [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8], + }, + { + index: { + _index: "amazon-reviews", + _id: "5", + }, + }, + { + review_text: + "This product has ruined my life and the lives of my family and friends.", + review_vector: [0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1], + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/76dbdd0b2bd48c3c6b1a8d81e23bafd6.asciidoc b/docs/doc_examples/76dbdd0b2bd48c3c6b1a8d81e23bafd6.asciidoc new file mode 100644 index 000000000..8d065bde7 --- /dev/null +++ b/docs/doc_examples/76dbdd0b2bd48c3c6b1a8d81e23bafd6.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + analyzer: "standard", + text: "this is a test", +}); +console.log(response); +---- diff --git a/docs/doc_examples/76e02434835630cb830724beb92df354.asciidoc b/docs/doc_examples/76e02434835630cb830724beb92df354.asciidoc new file mode 100644 index 000000000..ab4d1fc80 --- /dev/null +++ b/docs/doc_examples/76e02434835630cb830724beb92df354.asciidoc @@ -0,0 +1,44 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "retrievers_example", + retriever: { + rrf: { + retrievers: [ + { + knn: { + field: "vector", + query_vector: [0.23, 0.67, 0.89], + k: 3, + num_candidates: 5, + }, + }, + { + text_similarity_reranker: { + retriever: { + standard: { + query: { + term: { + topic: "ai", + }, + }, + }, + }, + field: "text", + inference_id: "my-rerank-model", + inference_text: + "Can I use generative AI to identify user intent and improve search relevance?", + }, + }, + ], + rank_window_size: 10, + rank_constant: 1, + }, + }, + _source: false, +}); +console.log(response); +---- diff --git a/docs/doc_examples/77082b1ffaae9ac52dfc133fa597baa7.asciidoc b/docs/doc_examples/77082b1ffaae9ac52dfc133fa597baa7.asciidoc new file mode 100644 index 000000000..9bd1c1eea --- /dev/null +++ b/docs/doc_examples/77082b1ffaae9ac52dfc133fa597baa7.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "cooking_blog", + query: { + match: { + description: { + query: "fluffy pancakes", + operator: "and", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7709a48020a6cefbbe547fb944541cdb.asciidoc b/docs/doc_examples/7709a48020a6cefbbe547fb944541cdb.asciidoc new file mode 100644 index 000000000..d657a9e99 --- /dev/null +++ b/docs/doc_examples/7709a48020a6cefbbe547fb944541cdb.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "my-bit-vectors", + refresh: "true", + operations: [ + { + index: { + _id: "1", + }, + }, + { + my_vector: [127, -127, 0, 1, 42], + }, + { + index: { + _id: "2", + }, + }, + { + my_vector: "8100012a7f", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/77243bbf92f2a55e0fca6c2a349a1c15.asciidoc b/docs/doc_examples/77243bbf92f2a55e0fca6c2a349a1c15.asciidoc deleted file mode 100644 index 1a12ff2cc..000000000 --- a/docs/doc_examples/77243bbf92f2a55e0fca6c2a349a1c15.asciidoc +++ /dev/null @@ -1,35 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - sort: [ - { - _geo_distance: { - 'pin.location': [ - [ - -70, - 40 - ], - [ - -71, - 42 - ] - ], - order: 'asc', - unit: 'km' - } - } - ], - query: { - term: { - user: 'kimchy' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/7741a04e7e621c528cd72848d875776d.asciidoc b/docs/doc_examples/7741a04e7e621c528cd72848d875776d.asciidoc new file mode 100644 index 000000000..d09d2747f --- /dev/null +++ b/docs/doc_examples/7741a04e7e621c528cd72848d875776d.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.createDataStream({ + name: "my-data-stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/77447e2966708e92f5e219d43ac3f00d.asciidoc b/docs/doc_examples/77447e2966708e92f5e219d43ac3f00d.asciidoc new file mode 100644 index 000000000..74f0318e5 --- /dev/null +++ b/docs/doc_examples/77447e2966708e92f5e219d43ac3f00d.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.tasks.list({ + actions: "*reindex", + wait_for_completion: "true", + timeout: "10s", +}); +console.log(response); +---- diff --git a/docs/doc_examples/774bfde8793dc4927f7cad2dd91c5b5f.asciidoc b/docs/doc_examples/774bfde8793dc4927f7cad2dd91c5b5f.asciidoc new file mode 100644 index 000000000..6334c10b3 --- /dev/null +++ b/docs/doc_examples/774bfde8793dc4927f7cad2dd91c5b5f.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.msearchTemplate({ + index: "my-index", + search_templates: [ + {}, + { + id: "my-search-template", + params: { + query_string: "hello world", + from: 0, + size: 10, + }, + }, + {}, + { + id: "my-other-search-template", + params: { + query_type: "match_all", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/774d715155cd13713e6e327adf6ce328.asciidoc b/docs/doc_examples/774d715155cd13713e6e327adf6ce328.asciidoc deleted file mode 100644 index 2b21e7c28..000000000 --- a/docs/doc_examples/774d715155cd13713e6e327adf6ce328.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - aggs: { - tags: { - terms: { - field: 'tags', - execution_hint: 'map' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/77518e8c6198acfe77c0934fd2fe65cb.asciidoc b/docs/doc_examples/77518e8c6198acfe77c0934fd2fe65cb.asciidoc new file mode 100644 index 000000000..4a8d900b8 --- /dev/null +++ b/docs/doc_examples/77518e8c6198acfe77c0934fd2fe65cb.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.textStructure.findMessageStructure({ + messages: [ + "[2024-03-05T10:52:36,256][INFO ][o.a.l.u.VectorUtilPanamaProvider] [laptop] Java vector incubator API enabled; uses preferredBitSize=128", + "[2024-03-05T10:52:41,038][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-url]", + "[2024-03-05T10:52:41,042][INFO ][o.e.p.PluginsService ] [laptop] loaded module [rest-root]", + "[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-core]", + "[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-redact]", + "[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [ingest-user-agent]", + "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-monitoring]", + "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-s3]", + "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-analytics]", + "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-ent-search]", + "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-autoscaling]", + "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [lang-painless]]", + "[2024-03-05T10:52:41,059][INFO ][o.e.p.PluginsService ] [laptop] loaded module [lang-expression]", + "[2024-03-05T10:52:41,059][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-eql]", + "[2024-03-05T10:52:43,291][INFO ][o.e.e.NodeEnvironment ] [laptop] heap size [16gb], compressed ordinary object pointers [true]", + "[2024-03-05T10:52:46,098][INFO ][o.e.x.s.Security ] [laptop] Security is enabled", + "[2024-03-05T10:52:47,227][INFO ][o.e.x.p.ProfilingPlugin ] [laptop] Profiling is enabled", + "[2024-03-05T10:52:47,259][INFO ][o.e.x.p.ProfilingPlugin ] [laptop] profiling index templates will not be installed or reinstalled", + "[2024-03-05T10:52:47,755][INFO ][o.e.i.r.RecoverySettings ] [laptop] using rate limit [40mb] with [default=40mb, read=0b, write=0b, max=0b]", + "[2024-03-05T10:52:47,787][INFO ][o.e.d.DiscoveryModule ] [laptop] using discovery type [multi-node] and seed hosts providers [settings]", + "[2024-03-05T10:52:49,188][INFO ][o.e.n.Node ] [laptop] initialized", + "[2024-03-05T10:52:49,199][INFO ][o.e.n.Node ] [laptop] starting ...", + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/7752b677825523bfb0c38ad9325a6d47.asciidoc b/docs/doc_examples/7752b677825523bfb0c38ad9325a6d47.asciidoc new file mode 100644 index 000000000..f6a5082a7 --- /dev/null +++ b/docs/doc_examples/7752b677825523bfb0c38ad9325a6d47.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.connector.delete({ + connector_id: "another-connector", + delete_sync_jobs: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/776b553df0e507c96dbdbaedecaca0cc.asciidoc b/docs/doc_examples/776b553df0e507c96dbdbaedecaca0cc.asciidoc new file mode 100644 index 000000000..4cb267864 --- /dev/null +++ b/docs/doc_examples/776b553df0e507c96dbdbaedecaca0cc.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.inferTrainedModel({ + model_id: "model2", + docs: [ + { + text_field: "The movie was awesome!!", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/7777326c6052fee28061e5b82540aedc.asciidoc b/docs/doc_examples/7777326c6052fee28061e5b82540aedc.asciidoc new file mode 100644 index 000000000..52190b90d --- /dev/null +++ b/docs/doc_examples/7777326c6052fee28061e5b82540aedc.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "latency", + size: 0, + aggs: { + grade_percentiles: { + percentiles: { + field: "grade", + missing: 10, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7781b13b0ffff6026d10c4e3ab4a3a51.asciidoc b/docs/doc_examples/7781b13b0ffff6026d10c4e3ab4a3a51.asciidoc new file mode 100644 index 000000000..730b79191 --- /dev/null +++ b/docs/doc_examples/7781b13b0ffff6026d10c4e3ab4a3a51.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.putBehavioralAnalytics({ + name: "my_analytics_collection", +}); +console.log(response); +---- diff --git a/docs/doc_examples/77828fcaecc3f058c48b955928198ff6.asciidoc b/docs/doc_examples/77828fcaecc3f058c48b955928198ff6.asciidoc new file mode 100644 index 000000000..6e2fce72a --- /dev/null +++ b/docs/doc_examples/77828fcaecc3f058c48b955928198ff6.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.simulate({ + pipeline: { + description: "parse multiple patterns", + processors: [ + { + grok: { + field: "message", + patterns: ["%{FAVORITE_DOG:pet}", "%{FAVORITE_CAT:pet}"], + pattern_definitions: { + FAVORITE_DOG: "beagle", + FAVORITE_CAT: "burmese", + }, + }, + }, + ], + }, + docs: [ + { + _source: { + message: "I love burmese cats!", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/77b90f6787195767b6da60d8532714b4.asciidoc b/docs/doc_examples/77b90f6787195767b6da60d8532714b4.asciidoc new file mode 100644 index 000000000..08570d5c6 --- /dev/null +++ b/docs/doc_examples/77b90f6787195767b6da60d8532714b4.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "text_embedding", + inference_id: "azure_openai_embeddings", + inference_config: { + service: "azureopenai", + service_settings: { + api_key: "", + resource_name: "", + deployment_id: "", + api_version: "2024-02-01", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/77c099c97ea6911e2dd6e996da7dcca0.asciidoc b/docs/doc_examples/77c099c97ea6911e2dd6e996da7dcca0.asciidoc new file mode 100644 index 000000000..921f2ca66 --- /dev/null +++ b/docs/doc_examples/77c099c97ea6911e2dd6e996da7dcca0.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.hotThreads(); +console.log(response); + +const response1 = await client.nodes.hotThreads({ + node_id: "nodeId1,nodeId2", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/77c50f982906718ecc59aa708aed728f.asciidoc b/docs/doc_examples/77c50f982906718ecc59aa708aed728f.asciidoc new file mode 100644 index 000000000..30b9ef4e8 --- /dev/null +++ b/docs/doc_examples/77c50f982906718ecc59aa708aed728f.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.update({ + index: "my-index-000001", + id: 1, + script: { + source: "ctx._source.counter += params.count", + lang: "painless", + params: { + count: 4, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/77ca1a3193f75651e0bf9e8fe5227a04.asciidoc b/docs/doc_examples/77ca1a3193f75651e0bf9e8fe5227a04.asciidoc new file mode 100644 index 000000000..1a9c3ccc7 --- /dev/null +++ b/docs/doc_examples/77ca1a3193f75651e0bf9e8fe5227a04.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.getModelSnapshotUpgradeStats({ + job_id: "low_request_rate", + snapshot_id: "_all", +}); +console.log(response); +---- diff --git a/docs/doc_examples/77cebba946fe648873a1e7375c13df41.asciidoc b/docs/doc_examples/77cebba946fe648873a1e7375c13df41.asciidoc new file mode 100644 index 000000000..a09e089bb --- /dev/null +++ b/docs/doc_examples/77cebba946fe648873a1e7375c13df41.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "cluster.routing.allocation.disk.watermark.low": "90%", + "cluster.routing.allocation.disk.watermark.high": "95%", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/77d0780c5faea4c9ec51a322a6811b3b.asciidoc b/docs/doc_examples/77d0780c5faea4c9ec51a322a6811b3b.asciidoc new file mode 100644 index 000000000..ce23498c5 --- /dev/null +++ b/docs/doc_examples/77d0780c5faea4c9ec51a322a6811b3b.asciidoc @@ -0,0 +1,68 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "my-index-000001", + refresh: "true", + operations: [ + { + index: {}, + }, + { + timestamp: "2020-04-30T14:30:17-05:00", + message: + '40.135.0.0 - - [30/Apr/2020:14:30:17 -0500] "GET /images/hm_bg.jpg HTTP/1.0" 200 24736', + }, + { + index: {}, + }, + { + timestamp: "2020-04-30T14:30:53-05:00", + message: + '232.0.0.0 - - [30/Apr/2020:14:30:53 -0500] "GET /images/hm_bg.jpg HTTP/1.0" 200 24736', + }, + { + index: {}, + }, + { + timestamp: "2020-04-30T14:31:12-05:00", + message: + '26.1.0.0 - - [30/Apr/2020:14:31:12 -0500] "GET /images/hm_bg.jpg HTTP/1.0" 200 24736', + }, + { + index: {}, + }, + { + timestamp: "2020-04-30T14:31:19-05:00", + message: + '247.37.0.0 - - [30/Apr/2020:14:31:19 -0500] "GET /french/splash_inet.html HTTP/1.0" 200 3781', + }, + { + index: {}, + }, + { + timestamp: "2020-04-30T14:31:22-05:00", + message: + '247.37.0.0 - - [30/Apr/2020:14:31:22 -0500] "GET /images/hm_nbg.jpg HTTP/1.0" 304 0', + }, + { + index: {}, + }, + { + timestamp: "2020-04-30T14:31:27-05:00", + message: + '252.0.0.0 - - [30/Apr/2020:14:31:27 -0500] "GET /images/hm_bg.jpg HTTP/1.0" 200 24736', + }, + { + index: {}, + }, + { + timestamp: "2020-04-30T14:31:28-05:00", + message: "not a valid apache log", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/77e3dcd87d2b2c8e6ec842462b02df1f.asciidoc b/docs/doc_examples/77e3dcd87d2b2c8e6ec842462b02df1f.asciidoc new file mode 100644 index 000000000..12eb887d4 --- /dev/null +++ b/docs/doc_examples/77e3dcd87d2b2c8e6ec842462b02df1f.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.clone({ + index: "my-index-000001", + target: "cloned-my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/78043831fd32004a82930c8ac8a1d809.asciidoc b/docs/doc_examples/78043831fd32004a82930c8ac8a1d809.asciidoc new file mode 100644 index 000000000..5151bb769 --- /dev/null +++ b/docs/doc_examples/78043831fd32004a82930c8ac8a1d809.asciidoc @@ -0,0 +1,46 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "retrievers_example", + retriever: { + text_similarity_reranker: { + retriever: { + rrf: { + retrievers: [ + { + standard: { + query: { + query_string: { + query: + "(information retrieval) OR (artificial intelligence)", + default_field: "text", + }, + }, + }, + }, + { + knn: { + field: "vector", + query_vector: [0.23, 0.67, 0.89], + k: 3, + num_candidates: 5, + }, + }, + ], + rank_window_size: 10, + rank_constant: 1, + }, + }, + field: "text", + inference_id: "my-rerank-model", + inference_text: + "What are the state of the art applications of AI in information retrieval?", + }, + }, + _source: false, +}); +console.log(response); +---- diff --git a/docs/doc_examples/78176cd6f570e1534bb40b19e6e900b6.asciidoc b/docs/doc_examples/78176cd6f570e1534bb40b19e6e900b6.asciidoc new file mode 100644 index 000000000..c20499d5f --- /dev/null +++ b/docs/doc_examples/78176cd6f570e1534bb40b19e6e900b6.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.aliases({ + v: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/783c4fa5351a242364210fc32496beb2.asciidoc b/docs/doc_examples/783c4fa5351a242364210fc32496beb2.asciidoc new file mode 100644 index 000000000..932715a9c --- /dev/null +++ b/docs/doc_examples/783c4fa5351a242364210fc32496beb2.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "products", + id: 1567, + if_seq_no: 362, + if_primary_term: 2, + document: { + product: "r2d2", + details: "A resourceful astromech droid", + tags: ["droid"], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7841b65a3bb880ed66cec453925a50cf.asciidoc b/docs/doc_examples/7841b65a3bb880ed66cec453925a50cf.asciidoc new file mode 100644 index 000000000..672d5b18e --- /dev/null +++ b/docs/doc_examples/7841b65a3bb880ed66cec453925a50cf.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.deleteByQuery({ + index: "my-index-000001,my-index-000002", + query: { + match_all: {}, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7846974b47a3eab1832a475663d23ad9.asciidoc b/docs/doc_examples/7846974b47a3eab1832a475663d23ad9.asciidoc new file mode 100644 index 000000000..c9eeee3af --- /dev/null +++ b/docs/doc_examples/7846974b47a3eab1832a475663d23ad9.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 10000, + query: { + match: { + "user.id": "elkbee", + }, + }, + pit: { + id: "46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA==", + keep_alive: "1m", + }, + sort: [ + { + "@timestamp": { + order: "asc", + format: "strict_date_optional_time_nanos", + }, + }, + ], + search_after: ["2021-05-20T05:30:04.832Z", 4294967298], + track_total_hits: false, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7885ca9d7c61050095288eef6bc6cca9.asciidoc b/docs/doc_examples/7885ca9d7c61050095288eef6bc6cca9.asciidoc new file mode 100644 index 000000000..08547635f --- /dev/null +++ b/docs/doc_examples/7885ca9d7c61050095288eef6bc6cca9.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "jwt8_users", + refresh: "true", + roles: ["user"], + rules: { + all: [ + { + field: { + "realm.name": "jwt8", + }, + }, + { + field: { + username: "principalname1", + }, + }, + ], + }, + enabled: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7888c509774a2abfe82ca370c43d8789.asciidoc b/docs/doc_examples/7888c509774a2abfe82ca370c43d8789.asciidoc new file mode 100644 index 000000000..d4e0c4db6 --- /dev/null +++ b/docs/doc_examples/7888c509774a2abfe82ca370c43d8789.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + wait_for_completion: "false", + source: { + index: "test-data", + size: 50, + }, + dest: { + index: "cohere-embeddings", + pipeline: "cohere_embeddings_pipeline", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/78c4035e4fbf6851140660f6ed2a1fa5.asciidoc b/docs/doc_examples/78c4035e4fbf6851140660f6ed2a1fa5.asciidoc new file mode 100644 index 000000000..2838ef1fb --- /dev/null +++ b/docs/doc_examples/78c4035e4fbf6851140660f6ed2a1fa5.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.stats(); +console.log(response); +---- diff --git a/docs/doc_examples/78c96113ae4ed0054e581b17542528a7.asciidoc b/docs/doc_examples/78c96113ae4ed0054e581b17542528a7.asciidoc index 1257d122a..8cb7ac484 100644 --- a/docs/doc_examples/78c96113ae4ed0054e581b17542528a7.asciidoc +++ b/docs/doc_examples/78c96113ae4ed0054e581b17542528a7.asciidoc @@ -4,21 +4,18 @@ [source, js] ---- const response = await client.reindex({ - body: { - source: { - index: 'source', - query: { - match: { - company: 'cat' - } - } + source: { + index: "source", + query: { + match: { + company: "cat", + }, }, - dest: { - index: 'dest', - routing: '=cat' - } - } -}) -console.log(response) + }, + dest: { + index: "dest", + routing: "=cat", + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/78e20b4cff470ed7357de1fd74bcfeb7.asciidoc b/docs/doc_examples/78e20b4cff470ed7357de1fd74bcfeb7.asciidoc new file mode 100644 index 000000000..3c6cc46c4 --- /dev/null +++ b/docs/doc_examples/78e20b4cff470ed7357de1fd74bcfeb7.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.updateAliases({ + actions: [ + { + remove: { + index: "index1", + alias: "logs-non-existing", + }, + }, + { + add: { + index: "index2", + alias: "logs-non-existing", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/790684b45bef2bb848ea932f0fd0cfbd.asciidoc b/docs/doc_examples/790684b45bef2bb848ea932f0fd0cfbd.asciidoc new file mode 100644 index 000000000..d5144150f --- /dev/null +++ b/docs/doc_examples/790684b45bef2bb848ea932f0fd0cfbd.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + intervals: { + my_text: { + all_of: { + ordered: false, + max_gaps: 1, + intervals: [ + { + match: { + query: "my favorite food", + max_gaps: 0, + ordered: true, + }, + }, + { + match: { + query: "cold porridge", + max_gaps: 4, + ordered: true, + }, + }, + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/790c49fe2ec638e5e8db51a9236bba35.asciidoc b/docs/doc_examples/790c49fe2ec638e5e8db51a9236bba35.asciidoc new file mode 100644 index 000000000..47e4a08cd --- /dev/null +++ b/docs/doc_examples/790c49fe2ec638e5e8db51a9236bba35.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_locations,my_geoshapes", + query: { + bool: { + must: { + match_all: {}, + }, + filter: { + geo_bounding_box: { + "pin.location": { + top_left: { + lat: 40.73, + lon: -74.1, + }, + bottom_right: { + lat: 40.01, + lon: -71.12, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7965d4dbafdc7ca9e1ee6759939dd2e8.asciidoc b/docs/doc_examples/7965d4dbafdc7ca9e1ee6759939dd2e8.asciidoc new file mode 100644 index 000000000..639870304 --- /dev/null +++ b/docs/doc_examples/7965d4dbafdc7ca9e1ee6759939dd2e8.asciidoc @@ -0,0 +1,80 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.putWatch({ + id: "log_errors", + metadata: { + color: "red", + }, + trigger: { + schedule: { + interval: "5m", + }, + }, + input: { + search: { + request: { + indices: "log-events", + body: { + size: 0, + query: { + match: { + status: "error", + }, + }, + }, + }, + }, + }, + condition: { + compare: { + "ctx.payload.hits.total": { + gt: 5, + }, + }, + }, + transform: { + search: { + request: { + indices: "log-events", + body: { + query: { + match: { + status: "error", + }, + }, + }, + }, + }, + }, + actions: { + my_webhook: { + webhook: { + method: "POST", + host: "mylisteninghost", + port: 9200, + path: "/{{watch_id}}", + body: "Encountered {{ctx.payload.hits.total}} errors", + }, + }, + email_administrator: { + email: { + to: "sys.admino@host.domain", + subject: "Encountered {{ctx.payload.hits.total}} errors", + body: "Too many error in the system, see attached data", + attachments: { + attached_data: { + data: { + format: "json", + }, + }, + }, + priority: "high", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/79b43a1bf02fb5b38f54b8d5aa5dab53.asciidoc b/docs/doc_examples/79b43a1bf02fb5b38f54b8d5aa5dab53.asciidoc new file mode 100644 index 000000000..db825c446 --- /dev/null +++ b/docs/doc_examples/79b43a1bf02fb5b38f54b8d5aa5dab53.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sales_over_time: { + auto_date_histogram: { + field: "date", + buckets: 5, + format: "yyyy-MM-dd", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/79bf91ace935d095d8e44b3ef3fe2efa.asciidoc b/docs/doc_examples/79bf91ace935d095d8e44b3ef3fe2efa.asciidoc new file mode 100644 index 000000000..91e3d5e2d --- /dev/null +++ b/docs/doc_examples/79bf91ace935d095d8e44b3ef3fe2efa.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getSettings({ + index: "my-index-000001", + flat_settings: "true", + include_defaults: "true", +}); +console.log(response); + +const response1 = await client.cluster.getSettings({ + flat_settings: "true", + include_defaults: "true", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/79cb85efd5e4c435e73b253cb9feabb1.asciidoc b/docs/doc_examples/79cb85efd5e4c435e73b253cb9feabb1.asciidoc new file mode 100644 index 000000000..359f35a94 --- /dev/null +++ b/docs/doc_examples/79cb85efd5e4c435e73b253cb9feabb1.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + runtime_mappings: { + "http.response": { + type: "long", + script: + '\n String response=dissect(\'%{clientip} %{ident} %{auth} [%{@timestamp}] "%{verb} %{request} HTTP/%{httpversion}" %{response} %{size}\').extract(doc["message"].value)?.response;\n if (response != null) emit(Integer.parseInt(response));\n ', + }, + }, + query: { + match: { + "http.response": "304", + }, + }, + fields: ["http.response"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/79d206a528be704050a437adce2496dd.asciidoc b/docs/doc_examples/79d206a528be704050a437adce2496dd.asciidoc new file mode 100644 index 000000000..60583c320 --- /dev/null +++ b/docs/doc_examples/79d206a528be704050a437adce2496dd.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "rerank", + inference_id: "my-elastic-rerank", + inference_config: { + service: "elasticsearch", + service_settings: { + model_id: ".rerank-v1", + num_threads: 1, + adaptive_allocations: { + enabled: true, + min_number_of_allocations: 1, + max_number_of_allocations: 10, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/79e053326a3a8eec828523a035393f66.asciidoc b/docs/doc_examples/79e053326a3a8eec828523a035393f66.asciidoc new file mode 100644 index 000000000..8795c99d4 --- /dev/null +++ b/docs/doc_examples/79e053326a3a8eec828523a035393f66.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.delete({ + index: ".ds-my-data-stream-2099.03.08-000003", + id: "bfspvnIBr7VVZlfp2lqX", +}); +console.log(response); +---- diff --git a/docs/doc_examples/79e8bbbd6c440a21b0b4260c8cb1a61c.asciidoc b/docs/doc_examples/79e8bbbd6c440a21b0b4260c8cb1a61c.asciidoc new file mode 100644 index 000000000..65de39045 --- /dev/null +++ b/docs/doc_examples/79e8bbbd6c440a21b0b4260c8cb1a61c.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: "LINESTRING (-77.03653 38.897676, -77.009051 38.889939)", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/79f33e05b203eb46eef7958fbc95ef77.asciidoc b/docs/doc_examples/79f33e05b203eb46eef7958fbc95ef77.asciidoc new file mode 100644 index 000000000..ad764d327 --- /dev/null +++ b/docs/doc_examples/79f33e05b203eb46eef7958fbc95ef77.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.getAutoFollowPattern({ + name: "my_auto_follow_pattern", +}); +console.log(response); +---- diff --git a/docs/doc_examples/79feb4a0c0a21b7015a52f9736cd4683.asciidoc b/docs/doc_examples/79feb4a0c0a21b7015a52f9736cd4683.asciidoc new file mode 100644 index 000000000..8f6ef79f5 --- /dev/null +++ b/docs/doc_examples/79feb4a0c0a21b7015a52f9736cd4683.asciidoc @@ -0,0 +1,69 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "test", + mappings: { + properties: { + comments: { + type: "nested", + properties: { + votes: { + type: "nested", + }, + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "test", + id: 1, + refresh: "true", + document: { + title: "Test title", + comments: [ + { + author: "kimchy", + text: "comment text", + votes: [], + }, + { + author: "nik9000", + text: "words words words", + votes: [ + { + value: 1, + voter: "kimchy", + }, + { + value: -1, + voter: "other", + }, + ], + }, + ], + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "test", + query: { + nested: { + path: "comments.votes", + query: { + match: { + "comments.votes.voter": "kimchy", + }, + }, + inner_hits: {}, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/79ff4e7fa5c004226d05d7e2bfb5dc1e.asciidoc b/docs/doc_examples/79ff4e7fa5c004226d05d7e2bfb5dc1e.asciidoc new file mode 100644 index 000000000..13de04136 --- /dev/null +++ b/docs/doc_examples/79ff4e7fa5c004226d05d7e2bfb5dc1e.asciidoc @@ -0,0 +1,49 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "my-metrics", + index_patterns: ["metrics-mymetrics-*"], + priority: 200, + data_stream: {}, + template: { + settings: { + "index.mode": "time_series", + }, + mappings: { + properties: { + attributes: { + type: "passthrough", + priority: 10, + time_series_dimension: true, + properties: { + "host.name": { + type: "keyword", + }, + }, + }, + cpu: { + type: "integer", + time_series_metric: "counter", + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "metrics-mymetrics-test", + document: { + "@timestamp": "2020-01-01T00:00:00.000Z", + attributes: { + "host.name": "foo", + zone: "bar", + }, + cpu: 10, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/7a0c633a67244e9703344d036e584d95.asciidoc b/docs/doc_examples/7a0c633a67244e9703344d036e584d95.asciidoc new file mode 100644 index 000000000..e117ad484 --- /dev/null +++ b/docs/doc_examples/7a0c633a67244e9703344d036e584d95.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.enableUserProfile({ + uid: "u_79HkWkwmnBH5gqFKwoxggWPjEBOur1zLPXQPEl1VBW0_0", +}); +console.log(response); +---- diff --git a/docs/doc_examples/7a0eb2222fe282d3aab66e12feff2a3b.asciidoc b/docs/doc_examples/7a0eb2222fe282d3aab66e12feff2a3b.asciidoc new file mode 100644 index 000000000..2da76ddb9 --- /dev/null +++ b/docs/doc_examples/7a0eb2222fe282d3aab66e12feff2a3b.asciidoc @@ -0,0 +1,54 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "ip_location", + refresh: "true", + document: { + ip: "192.168.1.1", + country: "Canada", + city: "Montreal", + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "logs", + id: 1, + refresh: "true", + document: { + host: "192.168.1.1", + message: "the first message", + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "logs", + id: 2, + refresh: "true", + document: { + host: "192.168.1.2", + message: "the second message", + }, +}); +console.log(response2); + +const response3 = await client.search({ + index: "logs", + runtime_mappings: { + location: { + type: "lookup", + target_index: "ip_location", + input_field: "host", + target_field: "ip", + fetch_fields: ["country", "city"], + }, + }, + fields: ["host", "message", "location"], + _source: false, +}); +console.log(response3); +---- diff --git a/docs/doc_examples/7a23a385a63c87cab58fd494870450fd.asciidoc b/docs/doc_examples/7a23a385a63c87cab58fd494870450fd.asciidoc new file mode 100644 index 000000000..3b2ace07c --- /dev/null +++ b/docs/doc_examples/7a23a385a63c87cab58fd494870450fd.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "mapping4", + roles: ["superuser"], + enabled: true, + rules: { + any: [ + { + field: { + username: "esadmin", + }, + }, + { + field: { + groups: "cn=admins,dc=example,dc=com", + }, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7a27336a61284d079f3cc3994cf927d1.asciidoc b/docs/doc_examples/7a27336a61284d079f3cc3994cf927d1.asciidoc new file mode 100644 index 000000000..a289078cb --- /dev/null +++ b/docs/doc_examples/7a27336a61284d079f3cc3994cf927d1.asciidoc @@ -0,0 +1,50 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.createApiKey({ + name: "my-api-key", + role_descriptors: { + "role-source1": { + indices: [ + { + names: ["source1"], + privileges: ["read"], + query: { + template: { + params: { + access_control: [ + "example.user@example.com", + "source1-user-group", + ], + }, + }, + source: "...", + }, + }, + ], + }, + "role-source2": { + indices: [ + { + names: ["source2"], + privileges: ["read"], + query: { + template: { + params: { + access_control: [ + "example.user@example.com", + "source2-user-group", + ], + }, + }, + source: "...", + }, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7a2b9a7b2b6553a48bd4db60a939c0fc.asciidoc b/docs/doc_examples/7a2b9a7b2b6553a48bd4db60a939c0fc.asciidoc new file mode 100644 index 000000000..d368a4e1d --- /dev/null +++ b/docs/doc_examples/7a2b9a7b2b6553a48bd4db60a939c0fc.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "test_index", + id: 1, + refresh: "true", + document: { + query: { + match: { + body: { + query: "miss bicycl", + analyzer: "whitespace", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7a2fdfd7b0553d63440af7598f9ad867.asciidoc b/docs/doc_examples/7a2fdfd7b0553d63440af7598f9ad867.asciidoc new file mode 100644 index 000000000..63fb1f69a --- /dev/null +++ b/docs/doc_examples/7a2fdfd7b0553d63440af7598f9ad867.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000003", + mappings: { + properties: { + inference_field: { + type: "semantic_text", + inference_id: "my-elser-endpoint-for-ingest", + search_inference_id: "my-elser-endpoint-for-search", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7a3a7fbd81e5050b42e8c1eca26c7c1d.asciidoc b/docs/doc_examples/7a3a7fbd81e5050b42e8c1eca26c7c1d.asciidoc new file mode 100644 index 000000000..7470631a7 --- /dev/null +++ b/docs/doc_examples/7a3a7fbd81e5050b42e8c1eca26c7c1d.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.asyncSearch.delete({ + id: "FmRldE8zREVEUzA2ZVpUeGs2ejJFUFEaMkZ5QTVrSTZSaVN3WlNFVmtlWHJsdzoxMDc=", +}); +console.log(response); +---- diff --git a/docs/doc_examples/7a8de5606f283f4ef171b015eef6befa.asciidoc b/docs/doc_examples/7a8de5606f283f4ef171b015eef6befa.asciidoc new file mode 100644 index 000000000..bd6e331e5 --- /dev/null +++ b/docs/doc_examples/7a8de5606f283f4ef171b015eef6befa.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.stats({ + metric: "search", + groups: "group1,group2", +}); +console.log(response); +---- diff --git a/docs/doc_examples/7a987cd13383bdc990155d7bd5fb221e.asciidoc b/docs/doc_examples/7a987cd13383bdc990155d7bd5fb221e.asciidoc new file mode 100644 index 000000000..7c80d271b --- /dev/null +++ b/docs/doc_examples/7a987cd13383bdc990155d7bd5fb221e.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "test_role5", + indices: [ + { + names: ["*"], + privileges: ["read"], + field_security: { + grant: ["*"], + except: ["customer.handle"], + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/7ab968a61bb0783f563dd6d29b253901.asciidoc b/docs/doc_examples/7ab968a61bb0783f563dd6d29b253901.asciidoc new file mode 100644 index 000000000..fbb338d57 --- /dev/null +++ b/docs/doc_examples/7ab968a61bb0783f563dd6d29b253901.asciidoc @@ -0,0 +1,45 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "catalan_example", + settings: { + analysis: { + filter: { + catalan_elision: { + type: "elision", + articles: ["d", "l", "m", "n", "s", "t"], + articles_case: true, + }, + catalan_stop: { + type: "stop", + stopwords: "_catalan_", + }, + catalan_keywords: { + type: "keyword_marker", + keywords: ["example"], + }, + catalan_stemmer: { + type: "stemmer", + language: "catalan", + }, + }, + analyzer: { + rebuilt_catalan: { + tokenizer: "standard", + filter: [ + "catalan_elision", + "lowercase", + "catalan_stop", + "catalan_keywords", + "catalan_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7ae434b3667c589a8e70fe560f4ee3f9.asciidoc b/docs/doc_examples/7ae434b3667c589a8e70fe560f4ee3f9.asciidoc new file mode 100644 index 000000000..613941fde --- /dev/null +++ b/docs/doc_examples/7ae434b3667c589a8e70fe560f4ee3f9.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.updateByQuery({ + index: "my-index-000001", + conflicts: "proceed", +}); +console.log(response); +---- diff --git a/docs/doc_examples/7af1f62b0cf496cbf593d83d30b472cc.asciidoc b/docs/doc_examples/7af1f62b0cf496cbf593d83d30b472cc.asciidoc new file mode 100644 index 000000000..838cdbbbd --- /dev/null +++ b/docs/doc_examples/7af1f62b0cf496cbf593d83d30b472cc.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.createApiKey({ + name: "music-connector", + role_descriptors: { + "music-connector-role": { + cluster: ["monitor", "manage_connector"], + indices: [ + { + names: ["music", ".search-acl-filter-music", ".elastic-connectors*"], + privileges: ["all"], + allow_restricted_indices: false, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7b3e913368e96eaa6e22e0d03c81310e.asciidoc b/docs/doc_examples/7b3e913368e96eaa6e22e0d03c81310e.asciidoc new file mode 100644 index 000000000..098cad713 --- /dev/null +++ b/docs/doc_examples/7b3e913368e96eaa6e22e0d03c81310e.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + "index.store.type": "hybridfs", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7b3f255d28ce5b46d111402b96b41351.asciidoc b/docs/doc_examples/7b3f255d28ce5b46d111402b96b41351.asciidoc new file mode 100644 index 000000000..e563de4f6 --- /dev/null +++ b/docs/doc_examples/7b3f255d28ce5b46d111402b96b41351.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putUser({ + username: "admin_user", + refresh: "true", + password: "l0ng-r4nd0m-p@ssw0rd", + roles: ["my_admin_role"], + full_name: "Eirian Zola", + metadata: { + intelligence: 7, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7b5c231526846f2f7b98d78f3656ae6a.asciidoc b/docs/doc_examples/7b5c231526846f2f7b98d78f3656ae6a.asciidoc new file mode 100644 index 000000000..04acf4a95 --- /dev/null +++ b/docs/doc_examples/7b5c231526846f2f7b98d78f3656ae6a.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.update({ + index: "test", + id: 1, + doc: { + name: "new_name", + }, + doc_as_upsert: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7b7a828c21c856a3cbc41fd2f85108bf.asciidoc b/docs/doc_examples/7b7a828c21c856a3cbc41fd2f85108bf.asciidoc new file mode 100644 index 000000000..ad9af9cfe --- /dev/null +++ b/docs/doc_examples/7b7a828c21c856a3cbc41fd2f85108bf.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.refresh(); +console.log(response); + +const response1 = await client.search({ + index: "my-index-000001", + size: 0, + filter_path: "hits.total", + query: { + range: { + "http.response.bytes": { + lt: 2000000, + }, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/7b864d61767ab283cfd5f9b9ba784b1f.asciidoc b/docs/doc_examples/7b864d61767ab283cfd5f9b9ba784b1f.asciidoc new file mode 100644 index 000000000..04eba86cc --- /dev/null +++ b/docs/doc_examples/7b864d61767ab283cfd5f9b9ba784b1f.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getApiKey({ + name: "my-api-key", +}); +console.log(response); +---- diff --git a/docs/doc_examples/7b908b1189f076942de8cd497ff1fa59.asciidoc b/docs/doc_examples/7b908b1189f076942de8cd497ff1fa59.asciidoc index 2107c116d..6c4197dee 100644 --- a/docs/doc_examples/7b908b1189f076942de8cd497ff1fa59.asciidoc +++ b/docs/doc_examples/7b908b1189f076942de8cd497ff1fa59.asciidoc @@ -4,20 +4,13 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - multi_match: { - query: 'quick brown fox', - type: 'most_fields', - fields: [ - 'title', - 'title.original', - 'title.shingles' - ] - } - } - } -}) -console.log(response) + query: { + multi_match: { + query: "quick brown fox", + type: "most_fields", + fields: ["title", "title.original", "title.shingles"], + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/7b9dfe5857bde1bd8483ea3241656714.asciidoc b/docs/doc_examples/7b9dfe5857bde1bd8483ea3241656714.asciidoc new file mode 100644 index 000000000..6fca4f796 --- /dev/null +++ b/docs/doc_examples/7b9dfe5857bde1bd8483ea3241656714.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "whitespace", + text: "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone.", +}); +console.log(response); +---- diff --git a/docs/doc_examples/7ba29f0be2297b54a640b0a17d7ef5ca.asciidoc b/docs/doc_examples/7ba29f0be2297b54a640b0a17d7ef5ca.asciidoc new file mode 100644 index 000000000..422e88d26 --- /dev/null +++ b/docs/doc_examples/7ba29f0be2297b54a640b0a17d7ef5ca.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.deleteIpLocationDatabase({ + id: "my-database-id", +}); +console.log(response); +---- diff --git a/docs/doc_examples/7bdc283b96c7a965fae23013647b8578.asciidoc b/docs/doc_examples/7bdc283b96c7a965fae23013647b8578.asciidoc new file mode 100644 index 000000000..7f416d2de --- /dev/null +++ b/docs/doc_examples/7bdc283b96c7a965fae23013647b8578.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "test-index", + mappings: { + properties: { + source_field: { + type: "text", + copy_to: "infer_field", + }, + infer_field: { + type: "semantic_text", + inference_id: ".elser-2-elasticsearch", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7c24d4bef3f2045407fbf1b95c5416f9.asciidoc b/docs/doc_examples/7c24d4bef3f2045407fbf1b95c5416f9.asciidoc new file mode 100644 index 000000000..127d9e0d9 --- /dev/null +++ b/docs/doc_examples/7c24d4bef3f2045407fbf1b95c5416f9.asciidoc @@ -0,0 +1,41 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "range_index", + settings: { + number_of_shards: 2, + }, + mappings: { + properties: { + expected_attendees: { + type: "integer_range", + }, + time_frame: { + type: "date_range", + format: "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "range_index", + id: 1, + refresh: "true", + document: { + expected_attendees: { + gte: 10, + lt: 20, + }, + time_frame: { + gte: "2015-10-31 12:00:00", + lte: "2015-11-01", + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/7c3414279d47e9c29105d061ed316ef8.asciidoc b/docs/doc_examples/7c3414279d47e9c29105d061ed316ef8.asciidoc new file mode 100644 index 000000000..0c0bc6bf3 --- /dev/null +++ b/docs/doc_examples/7c3414279d47e9c29105d061ed316ef8.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "music", + id: 1, + refresh: "true", + document: { + suggest: ["Nevermind", "Nirvana"], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7c4551abbb7a5f3841109f7664bc4aad.asciidoc b/docs/doc_examples/7c4551abbb7a5f3841109f7664bc4aad.asciidoc new file mode 100644 index 000000000..36a0b7806 --- /dev/null +++ b/docs/doc_examples/7c4551abbb7a5f3841109f7664bc4aad.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + camel: { + type: "pattern", + pattern: + "([^\\p{L}\\d]+)|(?<=\\D)(?=\\d)|(?<=\\d)(?=\\D)|(?<=[\\p{L}&&[^\\p{Lu}]])(?=\\p{Lu})|(?<=\\p{Lu})(?=\\p{Lu}[\\p{L}&&[^\\p{Lu}]])", + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.analyze({ + index: "my-index-000001", + analyzer: "camel", + text: "MooseX::FTPClass2_beta", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/7c5aed55a2a1dce4b63c18e1ce8146ff.asciidoc b/docs/doc_examples/7c5aed55a2a1dce4b63c18e1ce8146ff.asciidoc new file mode 100644 index 000000000..ab7abc2bb --- /dev/null +++ b/docs/doc_examples/7c5aed55a2a1dce4b63c18e1ce8146ff.asciidoc @@ -0,0 +1,109 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "network-traffic", + mappings: { + properties: { + ipv4: { + type: "ip", + }, + ipv6: { + type: "ip", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "network-traffic", + refresh: "true", + operations: [ + { + index: { + _id: 0, + }, + }, + { + ipv4: "192.168.1.10", + ipv6: "2001:db8:a4f8:112a:6001:0:12:7f10", + }, + { + index: { + _id: 1, + }, + }, + { + ipv4: "192.168.1.12", + ipv6: "2001:db8:a4f8:112a:6001:0:12:7f12", + }, + { + index: { + _id: 2, + }, + }, + { + ipv4: "192.168.1.33", + ipv6: "2001:db8:a4f8:112a:6001:0:12:7f33", + }, + { + index: { + _id: 3, + }, + }, + { + ipv4: "192.168.1.10", + ipv6: "2001:db8:a4f8:112a:6001:0:12:7f10", + }, + { + index: { + _id: 4, + }, + }, + { + ipv4: "192.168.2.41", + ipv6: "2001:db8:a4f8:112c:6001:0:12:7f41", + }, + { + index: { + _id: 5, + }, + }, + { + ipv4: "192.168.2.10", + ipv6: "2001:db8:a4f8:112c:6001:0:12:7f10", + }, + { + index: { + _id: 6, + }, + }, + { + ipv4: "192.168.2.23", + ipv6: "2001:db8:a4f8:112c:6001:0:12:7f23", + }, + { + index: { + _id: 7, + }, + }, + { + ipv4: "192.168.3.201", + ipv6: "2001:db8:a4f8:114f:6001:0:12:7201", + }, + { + index: { + _id: 8, + }, + }, + { + ipv4: "192.168.3.107", + ipv6: "2001:db8:a4f8:114f:6001:0:12:7307", + }, + ], +}); +console.log(response1); +---- diff --git a/docs/doc_examples/7c5e41a7c0075d87b8f8348a6efa990c.asciidoc b/docs/doc_examples/7c5e41a7c0075d87b8f8348a6efa990c.asciidoc new file mode 100644 index 000000000..650854bef --- /dev/null +++ b/docs/doc_examples/7c5e41a7c0075d87b8f8348a6efa990c.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.pauseFollow({ + index: "follower_index", +}); +console.log(response); + +const response1 = await client.indices.close({ + index: "follower_index", +}); +console.log(response1); + +const response2 = await client.ccr.follow({ + index: "follower_index", + wait_for_active_shards: 1, + remote_cluster: "remote_cluster", + leader_index: "leader_index", +}); +console.log(response2); +---- diff --git a/docs/doc_examples/7c9076f3e93a8f61189783c736bf6082.asciidoc b/docs/doc_examples/7c9076f3e93a8f61189783c736bf6082.asciidoc new file mode 100644 index 000000000..47e925855 --- /dev/null +++ b/docs/doc_examples/7c9076f3e93a8f61189783c736bf6082.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "test_role2", + indices: [ + { + names: ["*"], + privileges: ["read"], + field_security: { + grant: ["event_*"], + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/7ca224d1a7de20a15c008e1b9dbda377.asciidoc b/docs/doc_examples/7ca224d1a7de20a15c008e1b9dbda377.asciidoc new file mode 100644 index 000000000..dd36cf357 --- /dev/null +++ b/docs/doc_examples/7ca224d1a7de20a15c008e1b9dbda377.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + tags: { + terms: { + field: "tags", + missing: "N/A", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7cac05cb589f1614fd5b8589153bef06.asciidoc b/docs/doc_examples/7cac05cb589f1614fd5b8589153bef06.asciidoc deleted file mode 100644 index 143a43ce2..000000000 --- a/docs/doc_examples/7cac05cb589f1614fd5b8589153bef06.asciidoc +++ /dev/null @@ -1,18 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.update({ - index: 'test', - id: '1', - body: { - doc: { - name: 'new_name' - }, - doc_as_upsert: true - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/7cd23457e220c8b64c5b0041d2acc27a.asciidoc b/docs/doc_examples/7cd23457e220c8b64c5b0041d2acc27a.asciidoc new file mode 100644 index 000000000..0719c3219 --- /dev/null +++ b/docs/doc_examples/7cd23457e220c8b64c5b0041d2acc27a.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.info({ + node_id: "_all", + metric: "jvm", +}); +console.log(response); +---- diff --git a/docs/doc_examples/7cd3d8388c51a9f6ee3f730cdaddbb89.asciidoc b/docs/doc_examples/7cd3d8388c51a9f6ee3f730cdaddbb89.asciidoc new file mode 100644 index 000000000..837ad5aaf --- /dev/null +++ b/docs/doc_examples/7cd3d8388c51a9f6ee3f730cdaddbb89.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "my-index-000001", + settings: { + index: { + refresh_interval: null, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7cf71671859be7c1ecf673396db377cd.asciidoc b/docs/doc_examples/7cf71671859be7c1ecf673396db377cd.asciidoc deleted file mode 100644 index 1b52d5faa..000000000 --- a/docs/doc_examples/7cf71671859be7c1ecf673396db377cd.asciidoc +++ /dev/null @@ -1,25 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.updateAliases({ - body: { - actions: [ - { - add: { - index: 'test1', - alias: 'alias2', - filter: { - term: { - user: 'kimchy' - } - } - } - } - ] - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/7d1cbcb545aa19260073dbb2b7ef5074.asciidoc b/docs/doc_examples/7d1cbcb545aa19260073dbb2b7ef5074.asciidoc new file mode 100644 index 000000000..bd1a07cfe --- /dev/null +++ b/docs/doc_examples/7d1cbcb545aa19260073dbb2b7ef5074.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_buckets: { + composite: { + size: 2, + sources: [ + { + date: { + date_histogram: { + field: "timestamp", + calendar_interval: "1d", + }, + }, + }, + { + product: { + terms: { + field: "product", + }, + }, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7d3a74fe0ba3fe95d1c3275365ff9315.asciidoc b/docs/doc_examples/7d3a74fe0ba3fe95d1c3275365ff9315.asciidoc new file mode 100644 index 000000000..b319e1c28 --- /dev/null +++ b/docs/doc_examples/7d3a74fe0ba3fe95d1c3275365ff9315.asciidoc @@ -0,0 +1,50 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, + }, + }, + mappings: { + properties: { + flattened: { + type: "flattened", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "idx", + id: 1, + document: { + flattened: { + field: [ + { + id: 1, + name: "foo", + }, + { + id: 2, + name: "bar", + }, + { + id: 3, + name: "baz", + }, + ], + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/7d880157a95f64ad339225d4af71c2de.asciidoc b/docs/doc_examples/7d880157a95f64ad339225d4af71c2de.asciidoc new file mode 100644 index 000000000..35848c635 --- /dev/null +++ b/docs/doc_examples/7d880157a95f64ad339225d4af71c2de.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.suggestUserProfiles({ + name: "jack", + hint: { + uids: [ + "u_8RKO7AKfEbSiIHZkZZ2LJy2MUSDPWDr3tMI_CkIGApU_0", + "u_79HkWkwmnBH5gqFKwoxggWPjEBOur1zLPXQPEl1VBW0_0", + ], + labels: { + direction: ["north", "east"], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7d9eba51a269571ae62fb8b442b373ce.asciidoc b/docs/doc_examples/7d9eba51a269571ae62fb8b442b373ce.asciidoc new file mode 100644 index 000000000..687099666 --- /dev/null +++ b/docs/doc_examples/7d9eba51a269571ae62fb8b442b373ce.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "standard", + filter: ["lowercase", "custom_stems", "porter_stem"], + }, + }, + filter: { + custom_stems: { + type: "stemmer_override", + rules_path: "analysis/stemmer_override.txt", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7dabae9b37d2cbd724f2a069be9e753b.asciidoc b/docs/doc_examples/7dabae9b37d2cbd724f2a069be9e753b.asciidoc new file mode 100644 index 000000000..ae1c0059d --- /dev/null +++ b/docs/doc_examples/7dabae9b37d2cbd724f2a069be9e753b.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.resetJob({ + job_id: "total-requests", + wait_for_completion: "false", +}); +console.log(response); +---- diff --git a/docs/doc_examples/7daff6b7e668ab8a762b8ab5dff7a167.asciidoc b/docs/doc_examples/7daff6b7e668ab8a762b8ab5dff7a167.asciidoc new file mode 100644 index 000000000..32a9da209 --- /dev/null +++ b/docs/doc_examples/7daff6b7e668ab8a762b8ab5dff7a167.asciidoc @@ -0,0 +1,41 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + query: { + sparse_vector: { + field: "ml.tokens", + inference_id: "my-elser-model", + query: "How is the weather in Jamaica?", + prune: true, + pruning_config: { + tokens_freq_ratio_threshold: 5, + tokens_weight_threshold: 0.4, + only_score_pruned_tokens: false, + }, + }, + }, + rescore: { + window_size: 100, + query: { + rescore_query: { + sparse_vector: { + field: "ml.tokens", + inference_id: "my-elser-model", + query: "How is the weather in Jamaica?", + prune: true, + pruning_config: { + tokens_freq_ratio_threshold: 5, + tokens_weight_threshold: 0.4, + only_score_pruned_tokens: true, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7db09cab02d71f3a10d91071216d80fc.asciidoc b/docs/doc_examples/7db09cab02d71f3a10d91071216d80fc.asciidoc new file mode 100644 index 000000000..94a971289 --- /dev/null +++ b/docs/doc_examples/7db09cab02d71f3a10d91071216d80fc.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "amazon-reviews", + retriever: { + knn: { + field: "review_vector", + query_vector: [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8], + k: 2, + num_candidates: 5, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7db798942cf2d334456e30ef5fcb801b.asciidoc b/docs/doc_examples/7db798942cf2d334456e30ef5fcb801b.asciidoc new file mode 100644 index 000000000..79ead4d92 --- /dev/null +++ b/docs/doc_examples/7db798942cf2d334456e30ef5fcb801b.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "cooking_blog", + query: { + match: { + description: { + query: "fluffy pancakes", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7dc6c0a6386289ac6a34105e839ced55.asciidoc b/docs/doc_examples/7dc6c0a6386289ac6a34105e839ced55.asciidoc new file mode 100644 index 000000000..55bc7f4c7 --- /dev/null +++ b/docs/doc_examples/7dc6c0a6386289ac6a34105e839ced55.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + by_date: { + date_histogram: { + field: "date", + calendar_interval: "month", + }, + aggs: { + my_rate: { + rate: { + unit: "year", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7dc82f7d36686fd57a47e34cbda39a4e.asciidoc b/docs/doc_examples/7dc82f7d36686fd57a47e34cbda39a4e.asciidoc new file mode 100644 index 000000000..d5c2807fa --- /dev/null +++ b/docs/doc_examples/7dc82f7d36686fd57a47e34cbda39a4e.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "whitespace", + filter: ["delimited_payload"], + text: "the|0 brown|10 fox|5 is|0 quick|10", +}); +console.log(response); +---- diff --git a/docs/doc_examples/7dd0d9cc6c5982a2c003d301e90feeba.asciidoc b/docs/doc_examples/7dd0d9cc6c5982a2c003d301e90feeba.asciidoc new file mode 100644 index 000000000..733c366ba --- /dev/null +++ b/docs/doc_examples/7dd0d9cc6c5982a2c003d301e90feeba.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "kibana_sample_data_ecommerce", + size: 0, + aggs: { + daily_sales: { + date_histogram: { + field: "order_date", + calendar_interval: "day", + format: "yyyy-MM-dd", + }, + aggs: { + revenue: { + sum: { + field: "taxful_total_price", + }, + }, + unique_customers: { + cardinality: { + field: "customer_id", + }, + }, + avg_basket_size: { + avg: { + field: "total_quantity", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7dd481337e40f16185f3baa3fc2cce15.asciidoc b/docs/doc_examples/7dd481337e40f16185f3baa3fc2cce15.asciidoc new file mode 100644 index 000000000..4733b1f3d --- /dev/null +++ b/docs/doc_examples/7dd481337e40f16185f3baa3fc2cce15.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + terms: { + _routing: ["user1"], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7de7e647c1c9cbe0a1df0d104fc0a947.asciidoc b/docs/doc_examples/7de7e647c1c9cbe0a1df0d104fc0a947.asciidoc new file mode 100644 index 000000000..9d1e63bbd --- /dev/null +++ b/docs/doc_examples/7de7e647c1c9cbe0a1df0d104fc0a947.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.createRepository({ + name: "my_s3_repository", + repository: { + type: "s3", + settings: { + bucket: "my-bucket", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7dedb148ff74912de81b8f8275f0d7f3.asciidoc b/docs/doc_examples/7dedb148ff74912de81b8f8275f0d7f3.asciidoc new file mode 100644 index 000000000..a5cf11623 --- /dev/null +++ b/docs/doc_examples/7dedb148ff74912de81b8f8275f0d7f3.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "index", + aggs: { + price_ranges: { + terms: { + field: "price_range", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7df191cc7f814e410a4ac7261065e6ef.asciidoc b/docs/doc_examples/7df191cc7f814e410a4ac7261065e6ef.asciidoc new file mode 100644 index 000000000..fd2f71849 --- /dev/null +++ b/docs/doc_examples/7df191cc7f814e410a4ac7261065e6ef.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.tasks.list({ + detailed: "true", + actions: "*byquery", +}); +console.log(response); +---- diff --git a/docs/doc_examples/7e126e2751311db60cfcbb22c9c41caa.asciidoc b/docs/doc_examples/7e126e2751311db60cfcbb22c9c41caa.asciidoc new file mode 100644 index 000000000..62d725868 --- /dev/null +++ b/docs/doc_examples/7e126e2751311db60cfcbb22c9c41caa.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.shards(); +console.log(response); +---- diff --git a/docs/doc_examples/7e16d21cba51eb8960835b63a1a7266a.asciidoc b/docs/doc_examples/7e16d21cba51eb8960835b63a1a7266a.asciidoc new file mode 100644 index 000000000..de928127b --- /dev/null +++ b/docs/doc_examples/7e16d21cba51eb8960835b63a1a7266a.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + dynamic_date_formats: ["MM/dd/yyyy"], + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + create_date: "09/25/2015", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/7e20b6e15e409b02a5e452ceddf1e1e0.asciidoc b/docs/doc_examples/7e20b6e15e409b02a5e452ceddf1e1e0.asciidoc new file mode 100644 index 000000000..4862dab30 --- /dev/null +++ b/docs/doc_examples/7e20b6e15e409b02a5e452ceddf1e1e0.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_buckets: { + composite: { + sources: [ + { + date: { + date_histogram: { + field: "timestamp", + calendar_interval: "1d", + order: "desc", + }, + }, + }, + { + product: { + terms: { + field: "product", + order: "asc", + }, + }, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7e2b9bf4ab353c377b761101775edf93.asciidoc b/docs/doc_examples/7e2b9bf4ab353c377b761101775edf93.asciidoc new file mode 100644 index 000000000..a42e937f1 --- /dev/null +++ b/docs/doc_examples/7e2b9bf4ab353c377b761101775edf93.asciidoc @@ -0,0 +1,44 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "metrics-weather_sensors-dev", + operations: [ + { + create: {}, + }, + { + "@timestamp": "2099-05-06T16:21:15.000Z", + sensor_id: "HAL-000001", + location: "plains", + temperature: 26.7, + humidity: 49.9, + }, + { + create: {}, + }, + { + "@timestamp": "2099-05-06T16:25:42.000Z", + sensor_id: "SYKENET-000001", + location: "swamp", + temperature: 32.4, + humidity: 88.9, + }, + ], +}); +console.log(response); + +const response1 = await client.index({ + index: "metrics-weather_sensors-dev", + document: { + "@timestamp": "2099-05-06T16:21:15.000Z", + sensor_id: "SYKENET-000001", + location: "swamp", + temperature: 32.4, + humidity: 88.9, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/7e484b8b41f9dbc2bcf1f340db197c1d.asciidoc b/docs/doc_examples/7e484b8b41f9dbc2bcf1f340db197c1d.asciidoc new file mode 100644 index 000000000..198997e4a --- /dev/null +++ b/docs/doc_examples/7e484b8b41f9dbc2bcf1f340db197c1d.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + source: { + index: "my-index-000001", + }, + dest: { + index: "my-new-index-000001", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7e48648ca27024831c60b455e836c496.asciidoc b/docs/doc_examples/7e48648ca27024831c60b455e836c496.asciidoc new file mode 100644 index 000000000..8968a05ff --- /dev/null +++ b/docs/doc_examples/7e48648ca27024831c60b455e836c496.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + pinned: { + docs: [ + { + _index: "my-index-000001", + _id: "1", + }, + { + _id: "4", + }, + ], + organic: { + match: { + description: "iphone", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7e49705769c42895fb7b1e2ca028ff47.asciidoc b/docs/doc_examples/7e49705769c42895fb7b1e2ca028ff47.asciidoc new file mode 100644 index 000000000..72f26c0a7 --- /dev/null +++ b/docs/doc_examples/7e49705769c42895fb7b1e2ca028ff47.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.nodes(); +console.log(response); +---- diff --git a/docs/doc_examples/7e4cb3de3e3c75646b60f9f81ddc59cc.asciidoc b/docs/doc_examples/7e4cb3de3e3c75646b60f9f81ddc59cc.asciidoc new file mode 100644 index 000000000..fa38b85db --- /dev/null +++ b/docs/doc_examples/7e4cb3de3e3c75646b60f9f81ddc59cc.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.clearTrainedModelDeploymentCache({ + model_id: "elastic__distilbert-base-uncased-finetuned-conll03-english", +}); +console.log(response); +---- diff --git a/docs/doc_examples/7e5faa551f2c95ffd627da352563d450.asciidoc b/docs/doc_examples/7e5faa551f2c95ffd627da352563d450.asciidoc new file mode 100644 index 000000000..27447be85 --- /dev/null +++ b/docs/doc_examples/7e5faa551f2c95ffd627da352563d450.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "mapping6", + roles: ["example-user"], + enabled: true, + rules: { + field: { + dn: "*,ou=subtree,dc=example,dc=com", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7e74d1a54e816e8f40cfdaa01b070788.asciidoc b/docs/doc_examples/7e74d1a54e816e8f40cfdaa01b070788.asciidoc new file mode 100644 index 000000000..629e908bd --- /dev/null +++ b/docs/doc_examples/7e74d1a54e816e8f40cfdaa01b070788.asciidoc @@ -0,0 +1,43 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "example-index", + retriever: { + rrf: { + retrievers: [ + { + standard: { + query: { + term: { + text: "rrf", + }, + }, + }, + }, + { + knn: { + field: "vector", + query_vector: [3], + k: 5, + num_candidates: 5, + }, + }, + ], + rank_window_size: 5, + rank_constant: 1, + }, + }, + size: 3, + aggs: { + int_count: { + terms: { + field: "integer", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7e77509ab646276ff78f58bb38bec8dd.asciidoc b/docs/doc_examples/7e77509ab646276ff78f58bb38bec8dd.asciidoc new file mode 100644 index 000000000..5fda2a862 --- /dev/null +++ b/docs/doc_examples/7e77509ab646276ff78f58bb38bec8dd.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "DELETE", + path: "/_query_rules/my-ruleset", +}); +console.log(response); +---- diff --git a/docs/doc_examples/7ebeb6cf26be5b5ecdfd408bd0fc3215.asciidoc b/docs/doc_examples/7ebeb6cf26be5b5ecdfd408bd0fc3215.asciidoc new file mode 100644 index 000000000..0750d7989 --- /dev/null +++ b/docs/doc_examples/7ebeb6cf26be5b5ecdfd408bd0fc3215.asciidoc @@ -0,0 +1,52 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-knn-index", + mappings: { + properties: { + "my-vector": { + type: "dense_vector", + dims: 3, + index: true, + similarity: "l2_norm", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "my-knn-index", + refresh: "true", + operations: [ + { + index: { + _id: "1", + }, + }, + { + "my-vector": [1, 5, -20], + }, + { + index: { + _id: "2", + }, + }, + { + "my-vector": [42, 8, -15], + }, + { + index: { + _id: "3", + }, + }, + { + "my-vector": [15, 11, 23], + }, + ], +}); +console.log(response1); +---- diff --git a/docs/doc_examples/7ebfb30b3ece855c1b783d9210939469.asciidoc b/docs/doc_examples/7ebfb30b3ece855c1b783d9210939469.asciidoc new file mode 100644 index 000000000..7207b9cc1 --- /dev/null +++ b/docs/doc_examples/7ebfb30b3ece855c1b783d9210939469.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.flushJob({ + job_id: "total-requests", + advance_time: 1514804400000, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7ed26b34ce90192a1563dcddf0e45dc0.asciidoc b/docs/doc_examples/7ed26b34ce90192a1563dcddf0e45dc0.asciidoc new file mode 100644 index 000000000..d61869f16 --- /dev/null +++ b/docs/doc_examples/7ed26b34ce90192a1563dcddf0e45dc0.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sales_per_month: { + date_histogram: { + field: "date", + calendar_interval: "month", + }, + aggs: { + sales: { + sum: { + field: "price", + }, + }, + sales_deriv: { + derivative: { + buckets_path: "sales", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7f1fade93225f8cf6000b93334d76ce4.asciidoc b/docs/doc_examples/7f1fade93225f8cf6000b93334d76ce4.asciidoc new file mode 100644 index 000000000..9d18e53eb --- /dev/null +++ b/docs/doc_examples/7f1fade93225f8cf6000b93334d76ce4.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "ip_location", + description: "Add ip geolocation info", + processors: [ + { + ip_location: { + field: "ip", + }, + }, + ], +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: "my_id", + pipeline: "ip_location", + document: { + ip: "80.231.5.0", + }, +}); +console.log(response1); + +const response2 = await client.get({ + index: "my-index-000001", + id: "my_id", +}); +console.log(response2); +---- diff --git a/docs/doc_examples/7f28f8ae8fcdbd807dadde0b5b007a6d.asciidoc b/docs/doc_examples/7f28f8ae8fcdbd807dadde0b5b007a6d.asciidoc deleted file mode 100644 index 874123199..000000000 --- a/docs/doc_examples/7f28f8ae8fcdbd807dadde0b5b007a6d.asciidoc +++ /dev/null @@ -1,28 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - aggs: { - actors: { - terms: { - field: 'actors', - size: 10 - }, - aggs: { - costars: { - terms: { - field: 'actors', - size: 5 - } - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/7f2d511cb64743c006225e5933a14bb4.asciidoc b/docs/doc_examples/7f2d511cb64743c006225e5933a14bb4.asciidoc new file mode 100644 index 000000000..c492ea70d --- /dev/null +++ b/docs/doc_examples/7f2d511cb64743c006225e5933a14bb4.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "remote1", + cluster: ["cross_cluster_search"], + indices: [ + { + names: [""], + privileges: ["read"], + }, + ], + remote_indices: [ + { + names: ["logs-*"], + privileges: ["read", "read_cross_cluster"], + clusters: ["my_remote_cluster"], + }, + ], + remote_cluster: [ + { + privileges: ["monitor_enrich"], + clusters: ["my_remote_cluster"], + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/7f37031fb40b68a61255b7c71d7eed0b.asciidoc b/docs/doc_examples/7f37031fb40b68a61255b7c71d7eed0b.asciidoc new file mode 100644 index 000000000..09db83420 --- /dev/null +++ b/docs/doc_examples/7f37031fb40b68a61255b7c71d7eed0b.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.executeWatch({ + id: "my_watch", + action_modes: { + action1: "force_simulate", + action2: "skip", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7f465b7e8ed42df6c42251b4481e699e.asciidoc b/docs/doc_examples/7f465b7e8ed42df6c42251b4481e699e.asciidoc deleted file mode 100644 index 36b2e4f68..000000000 --- a/docs/doc_examples/7f465b7e8ed42df6c42251b4481e699e.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'my_index', - body: { - mappings: { - properties: { - date: { - type: 'date', - format: 'yyyy-MM-dd' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/7f514e9e785e4323d16396359cb184f2.asciidoc b/docs/doc_examples/7f514e9e785e4323d16396359cb184f2.asciidoc new file mode 100644 index 000000000..9303413af --- /dev/null +++ b/docs/doc_examples/7f514e9e785e4323d16396359cb184f2.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "range_index", + properties: { + ip_allowlist: { + type: "ip_range", + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "range_index", + id: 2, + document: { + ip_allowlist: "192.168.0.0/16", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/7f56755fb6c42f7e6203339a6d0cb6e6.asciidoc b/docs/doc_examples/7f56755fb6c42f7e6203339a6d0cb6e6.asciidoc index 1941d7597..c452de4e5 100644 --- a/docs/doc_examples/7f56755fb6c42f7e6203339a6d0cb6e6.asciidoc +++ b/docs/doc_examples/7f56755fb6c42f7e6203339a6d0cb6e6.asciidoc @@ -4,17 +4,14 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - match: { - message: { - query: 'ny city', - auto_generate_synonyms_phrase_query: false - } - } - } - } -}) -console.log(response) + query: { + match: { + message: { + query: "ny city", + auto_generate_synonyms_phrase_query: false, + }, + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/7f697eb436dfa3c30dfe610d8c32d132.asciidoc b/docs/doc_examples/7f697eb436dfa3c30dfe610d8c32d132.asciidoc deleted file mode 100644 index 83143ed69..000000000 --- a/docs/doc_examples/7f697eb436dfa3c30dfe610d8c32d132.asciidoc +++ /dev/null @@ -1,28 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.reindex({ - body: { - source: { - remote: { - host: 'http://otherhost:9200/', - socket_timeout: '1m', - connect_timeout: '10s' - }, - index: 'source', - query: { - match: { - test: 'data' - } - } - }, - dest: { - index: 'dest' - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/7f92ddd4e940a37d6227c43fd279c8f5.asciidoc b/docs/doc_examples/7f92ddd4e940a37d6227c43fd279c8f5.asciidoc new file mode 100644 index 000000000..61ec2bb09 --- /dev/null +++ b/docs/doc_examples/7f92ddd4e940a37d6227c43fd279c8f5.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + size: 1, + query: { + match: { + client_ip: "211.11.9.0", + }, + }, + fields: ["*"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/7fb921376cbf66bf9f381bcdd62030ba.asciidoc b/docs/doc_examples/7fb921376cbf66bf9f381bcdd62030ba.asciidoc new file mode 100644 index 000000000..3abdbb52f --- /dev/null +++ b/docs/doc_examples/7fb921376cbf66bf9f381bcdd62030ba.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.getScriptContext(); +console.log(response); +---- diff --git a/docs/doc_examples/7fbebf0fc9b4a402917a4723ad547c6a.asciidoc b/docs/doc_examples/7fbebf0fc9b4a402917a4723ad547c6a.asciidoc new file mode 100644 index 000000000..cd01ba6fc --- /dev/null +++ b/docs/doc_examples/7fbebf0fc9b4a402917a4723ad547c6a.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.createRepository({ + name: "my-repo", + repository: { + type: "s3", + settings: { + bucket: "repo-bucket", + client: "elastic-internal-71bcd3", + base_path: "myrepo", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7fd2532f4e12e3efbc58af195060b31e.asciidoc b/docs/doc_examples/7fd2532f4e12e3efbc58af195060b31e.asciidoc new file mode 100644 index 000000000..8306c83a4 --- /dev/null +++ b/docs/doc_examples/7fd2532f4e12e3efbc58af195060b31e.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + refresh: "true", + document: { + text: "The fooBarBaz method", + }, +}); +console.log(response); + +const response1 = await client.search({ + index: "my-index-000001", + query: { + match: { + text: "bar", + }, + }, + highlight: { + fields: { + text: {}, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/7fd5883564d183603e60b37d286ac7e2.asciidoc b/docs/doc_examples/7fd5883564d183603e60b37d286ac7e2.asciidoc new file mode 100644 index 000000000..6fa8a7c88 --- /dev/null +++ b/docs/doc_examples/7fd5883564d183603e60b37d286ac7e2.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.deleteExpiredData({ + timeout: "1h", +}); +console.log(response); +---- diff --git a/docs/doc_examples/7fde3ff91c4a2e7080444af37d5cd287.asciidoc b/docs/doc_examples/7fde3ff91c4a2e7080444af37d5cd287.asciidoc new file mode 100644 index 000000000..9fdb8dc45 --- /dev/null +++ b/docs/doc_examples/7fde3ff91c4a2e7080444af37d5cd287.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.esql.query({ + query: + '\n FROM library\n | EVAL year = DATE_EXTRACT("year", release_date)\n | WHERE page_count > ?page_count AND author == ?author\n | STATS count = COUNT(*) by year\n | WHERE count > ?count\n | LIMIT 5\n ', + params: [ + { + page_count: 300, + }, + { + author: "Frank Herbert", + }, + { + count: 0, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/7fe2179705304af5e87eb382dca6235a.asciidoc b/docs/doc_examples/7fe2179705304af5e87eb382dca6235a.asciidoc new file mode 100644 index 000000000..7aa2b01be --- /dev/null +++ b/docs/doc_examples/7fe2179705304af5e87eb382dca6235a.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.open({ + index: "logs-my_app-default", +}); +console.log(response); +---- diff --git a/docs/doc_examples/7fe9f0a583e079f7fc6fd64d12b6e9e5.asciidoc b/docs/doc_examples/7fe9f0a583e079f7fc6fd64d12b6e9e5.asciidoc new file mode 100644 index 000000000..6e49e53e5 --- /dev/null +++ b/docs/doc_examples/7fe9f0a583e079f7fc6fd64d12b6e9e5.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + runtime_mappings: { + "price.weighted": { + type: "double", + script: + "\n double price = doc['price'].value;\n if (doc['promoted'].value) {\n price *= 0.8;\n }\n emit(price);\n ", + }, + }, + query: { + constant_score: { + filter: { + match: { + type: "hat", + }, + }, + }, + }, + aggs: { + hat_prices: { + sum: { + field: "price.weighted", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7fef68840761c6982c14ad7af96caf37.asciidoc b/docs/doc_examples/7fef68840761c6982c14ad7af96caf37.asciidoc new file mode 100644 index 000000000..88a14a284 --- /dev/null +++ b/docs/doc_examples/7fef68840761c6982c14ad7af96caf37.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + document: { + group: "fans", + user: [ + { + first: "John", + last: "Smith", + }, + { + first: "Alice", + last: "White", + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/7ff4124df0541ee2496034004f4146d4.asciidoc b/docs/doc_examples/7ff4124df0541ee2496034004f4146d4.asciidoc new file mode 100644 index 000000000..cc4fcf208 --- /dev/null +++ b/docs/doc_examples/7ff4124df0541ee2496034004f4146d4.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-index-000001", + properties: { + tags: { + type: "keyword", + eager_global_ordinals: false, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/800861c15bb33ca01a46fb97dde7537a.asciidoc b/docs/doc_examples/800861c15bb33ca01a46fb97dde7537a.asciidoc new file mode 100644 index 000000000..30819bdc6 --- /dev/null +++ b/docs/doc_examples/800861c15bb33ca01a46fb97dde7537a.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.getFilters({ + filter_id: "safe_domains", +}); +console.log(response); +---- diff --git a/docs/doc_examples/80135e8c644e34cc70ce8a4e7915d1a2.asciidoc b/docs/doc_examples/80135e8c644e34cc70ce8a4e7915d1a2.asciidoc new file mode 100644 index 000000000..bb8174e35 --- /dev/null +++ b/docs/doc_examples/80135e8c644e34cc70ce8a4e7915d1a2.asciidoc @@ -0,0 +1,38 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "attachment", + description: "Extract attachment information", + processors: [ + { + attachment: { + field: "data", + indexed_chars: 11, + indexed_chars_field: "max_size", + remove_binary: true, + }, + }, + ], +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: "my_id_2", + pipeline: "attachment", + document: { + data: "e1xydGYxXGFuc2kNCkxvcmVtIGlwc3VtIGRvbG9yIHNpdCBhbWV0DQpccGFyIH0=", + max_size: 5, + }, +}); +console.log(response1); + +const response2 = await client.get({ + index: "my-index-000001", + id: "my_id_2", +}); +console.log(response2); +---- diff --git a/docs/doc_examples/8022e6a690344035b6472a43a9d122e0.asciidoc b/docs/doc_examples/8022e6a690344035b6472a43a9d122e0.asciidoc deleted file mode 100644 index eb8025f4e..000000000 --- a/docs/doc_examples/8022e6a690344035b6472a43a9d122e0.asciidoc +++ /dev/null @@ -1,12 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: '_all', - q: 'user:kimchy' -}) -console.log(response) ----- - diff --git a/docs/doc_examples/803bbc14fbec0e49dfed9fab49c8a7f8.asciidoc b/docs/doc_examples/803bbc14fbec0e49dfed9fab49c8a7f8.asciidoc new file mode 100644 index 000000000..1821e0de4 --- /dev/null +++ b/docs/doc_examples/803bbc14fbec0e49dfed9fab49c8a7f8.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + full_text: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/804a97ff4d0613e6568e4efb19c52021.asciidoc b/docs/doc_examples/804a97ff4d0613e6568e4efb19c52021.asciidoc deleted file mode 100644 index fed037571..000000000 --- a/docs/doc_examples/804a97ff4d0613e6568e4efb19c52021.asciidoc +++ /dev/null @@ -1,33 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response0 = await client.cluster.putSettings({ - body: { - persistent: { - 'action.auto_create_index': 'twitter,index10,-index1*,+ind*' - } - } -}) -console.log(response0) - -const response1 = await client.cluster.putSettings({ - body: { - persistent: { - 'action.auto_create_index': 'false' - } - } -}) -console.log(response1) - -const response2 = await client.cluster.putSettings({ - body: { - persistent: { - 'action.auto_create_index': 'true' - } - } -}) -console.log(response2) ----- - diff --git a/docs/doc_examples/8051766cadded0892290bc2cc06e145c.asciidoc b/docs/doc_examples/8051766cadded0892290bc2cc06e145c.asciidoc new file mode 100644 index 000000000..c4522e245 --- /dev/null +++ b/docs/doc_examples/8051766cadded0892290bc2cc06e145c.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.ackWatch({ + watch_id: "my_watch", + action_id: "action1,action2", +}); +console.log(response); +---- diff --git a/docs/doc_examples/805f5550b90e75aa5cc82b90d8c6c242.asciidoc b/docs/doc_examples/805f5550b90e75aa5cc82b90d8c6c242.asciidoc new file mode 100644 index 000000000..409f1ce03 --- /dev/null +++ b/docs/doc_examples/805f5550b90e75aa5cc82b90d8c6c242.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "news", + query: { + match: { + content: "elasticsearch", + }, + }, + aggs: { + sample: { + sampler: { + shard_size: 100, + }, + aggs: { + keywords: { + significant_text: { + field: "content", + filter_duplicate_text: true, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/807c0c9763f8c1114b3c8278c2a0cb56.asciidoc b/docs/doc_examples/807c0c9763f8c1114b3c8278c2a0cb56.asciidoc new file mode 100644 index 000000000..b10488464 --- /dev/null +++ b/docs/doc_examples/807c0c9763f8c1114b3c8278c2a0cb56.asciidoc @@ -0,0 +1,43 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + intervals: { + my_text: { + all_of: { + ordered: true, + intervals: [ + { + match: { + query: "my favorite food", + max_gaps: 0, + ordered: true, + }, + }, + { + any_of: { + intervals: [ + { + match: { + query: "hot water", + }, + }, + { + match: { + query: "cold porridge", + }, + }, + ], + }, + }, + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8080cd9e24a8785728ce7c372ec4acf1.asciidoc b/docs/doc_examples/8080cd9e24a8785728ce7c372ec4acf1.asciidoc new file mode 100644 index 000000000..2904d2dc7 --- /dev/null +++ b/docs/doc_examples/8080cd9e24a8785728ce7c372ec4acf1.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_watcher/settings", + body: { + "index.routing.allocation.include.role": "watcher", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/808f4db1e2361be77dd6816c1f818139.asciidoc b/docs/doc_examples/808f4db1e2361be77dd6816c1f818139.asciidoc new file mode 100644 index 000000000..83b3a9af5 --- /dev/null +++ b/docs/doc_examples/808f4db1e2361be77dd6816c1f818139.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.shardStores({ + index: "my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/80dbaf28d1976dc00de3fe2018067e81.asciidoc b/docs/doc_examples/80dbaf28d1976dc00de3fe2018067e81.asciidoc new file mode 100644 index 000000000..c74de8b5f --- /dev/null +++ b/docs/doc_examples/80dbaf28d1976dc00de3fe2018067e81.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.deleteTemplate({ + name: ".cloud-hot-warm-allocation-0", +}); +console.log(response); +---- diff --git a/docs/doc_examples/80dd7f5882c59b9c1c90e8351937441f.asciidoc b/docs/doc_examples/80dd7f5882c59b9c1c90e8351937441f.asciidoc new file mode 100644 index 000000000..3e12f095c --- /dev/null +++ b/docs/doc_examples/80dd7f5882c59b9c1c90e8351937441f.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.bulkUpdateApiKeys({ + ids: ["VuaCfGcBCdbkQm-e5aOx", "H3_AhoIBA9hmeQJdg7ij"], + role_descriptors: { + "role-a": { + indices: [ + { + names: ["*"], + privileges: ["write"], + }, + ], + }, + }, + metadata: { + environment: { + level: 2, + trusted: true, + tags: ["production"], + }, + }, + expiration: "30d", +}); +console.log(response); +---- diff --git a/docs/doc_examples/80edd2124a822d9f9bf22ecc49d2c2e9.asciidoc b/docs/doc_examples/80edd2124a822d9f9bf22ecc49d2c2e9.asciidoc new file mode 100644 index 000000000..ad1cec287 --- /dev/null +++ b/docs/doc_examples/80edd2124a822d9f9bf22ecc49d2c2e9.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.synonyms.getSynonymRule({ + set_id: "my-synonyms-set", + rule_id: "test-1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/812a3d7ab461d74efd9136aaf4bcf11c.asciidoc b/docs/doc_examples/812a3d7ab461d74efd9136aaf4bcf11c.asciidoc new file mode 100644 index 000000000..4bea906fc --- /dev/null +++ b/docs/doc_examples/812a3d7ab461d74efd9136aaf4bcf11c.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "range_index", + size: 0, + aggs: { + range_histo: { + histogram: { + field: "expected_attendees", + interval: 5, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/812deb6b7668c7444f3b99d843d2adc1.asciidoc b/docs/doc_examples/812deb6b7668c7444f3b99d843d2adc1.asciidoc new file mode 100644 index 000000000..62f019cdf --- /dev/null +++ b/docs/doc_examples/812deb6b7668c7444f3b99d843d2adc1.asciidoc @@ -0,0 +1,48 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "shapes", + mappings: { + properties: { + geometry: { + type: "shape", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "shapes", + id: "footprint", + document: { + geometry: { + type: "envelope", + coordinates: [ + [1355, 5355], + [1400, 5200], + ], + }, + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "example", + query: { + shape: { + geometry: { + indexed_shape: { + index: "shapes", + id: "footprint", + path: "geometry", + }, + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/8141b60ad245ece2ff5e8d0817400ee5.asciidoc b/docs/doc_examples/8141b60ad245ece2ff5e8d0817400ee5.asciidoc new file mode 100644 index 000000000..796fe255d --- /dev/null +++ b/docs/doc_examples/8141b60ad245ece2ff5e8d0817400ee5.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-data-stream", + query: + '\n sequence by process.pid\n [ file where file.name == "cmd.exe" and process.pid != 2013 ]\n [ process where stringContains(process.executable, "regsvr32") ]\n ', +}); +console.log(response); +---- diff --git a/docs/doc_examples/8141cdaddbe7d794f09f9ee84e46194c.asciidoc b/docs/doc_examples/8141cdaddbe7d794f09f9ee84e46194c.asciidoc new file mode 100644 index 000000000..814d8e865 --- /dev/null +++ b/docs/doc_examples/8141cdaddbe7d794f09f9ee84e46194c.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.count({ + index: "my-index-000001", + v: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/81612c2537386e031b7eb604f6756a71.asciidoc b/docs/doc_examples/81612c2537386e031b7eb604f6756a71.asciidoc new file mode 100644 index 000000000..ae7191527 --- /dev/null +++ b/docs/doc_examples/81612c2537386e031b7eb604f6756a71.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.clone({ + index: "my_source_index", + target: "my_target_index", + settings: { + "index.number_of_shards": 5, + }, + aliases: { + my_search_indices: {}, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8194f1fae6aa72ab91ea559daad932d4.asciidoc b/docs/doc_examples/8194f1fae6aa72ab91ea559daad932d4.asciidoc new file mode 100644 index 000000000..377f726ec --- /dev/null +++ b/docs/doc_examples/8194f1fae6aa72ab91ea559daad932d4.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + max_concurrent_shard_requests: 3, + query: { + match: { + "user.id": "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/819e00cc6547d925d80090b94e0650d7.asciidoc b/docs/doc_examples/819e00cc6547d925d80090b94e0650d7.asciidoc new file mode 100644 index 000000000..9e33adbad --- /dev/null +++ b/docs/doc_examples/819e00cc6547d925d80090b94e0650d7.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: + "my-index-000001,cluster_one:my-index-000001,cluster_two:my-index-000001", + query: { + match: { + "user.id": "kimchy", + }, + }, + _source: ["user.id", "message", "http.response.status_code"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/81aad155ff23b1b396833b1182c9d46b.asciidoc b/docs/doc_examples/81aad155ff23b1b396833b1182c9d46b.asciidoc new file mode 100644 index 000000000..a4744544e --- /dev/null +++ b/docs/doc_examples/81aad155ff23b1b396833b1182c9d46b.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.shards({ + v: "true", +}); +console.log(response); + +const response1 = await client.cat.recovery({ + v: "true", + active_only: "true", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/81c7a392efd505b686eed978fb7d9d17.asciidoc b/docs/doc_examples/81c7a392efd505b686eed978fb7d9d17.asciidoc new file mode 100644 index 000000000..d516dced4 --- /dev/null +++ b/docs/doc_examples/81c7a392efd505b686eed978fb7d9d17.asciidoc @@ -0,0 +1,44 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "english_example", + settings: { + analysis: { + filter: { + english_stop: { + type: "stop", + stopwords: "_english_", + }, + english_keywords: { + type: "keyword_marker", + keywords: ["example"], + }, + english_stemmer: { + type: "stemmer", + language: "english", + }, + english_possessive_stemmer: { + type: "stemmer", + language: "possessive_english", + }, + }, + analyzer: { + rebuilt_english: { + tokenizer: "standard", + filter: [ + "english_possessive_stemmer", + "lowercase", + "english_stop", + "english_keywords", + "english_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/81c9aa2678d6166a9662ddf2c011a6a5.asciidoc b/docs/doc_examples/81c9aa2678d6166a9662ddf2c011a6a5.asciidoc deleted file mode 100644 index de99a4a47..000000000 --- a/docs/doc_examples/81c9aa2678d6166a9662ddf2c011a6a5.asciidoc +++ /dev/null @@ -1,15 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - match_none: {} - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/81ee2ad368208c4c78098292547b0577.asciidoc b/docs/doc_examples/81ee2ad368208c4c78098292547b0577.asciidoc new file mode 100644 index 000000000..f7aa01327 --- /dev/null +++ b/docs/doc_examples/81ee2ad368208c4c78098292547b0577.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "admin_user", + roles: ["monitoring"], + rules: { + field: { + dn: "cn=Admin,ou=example,o=com", + }, + }, + enabled: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/81ef5774355180fc44d2a52b5182d24a.asciidoc b/docs/doc_examples/81ef5774355180fc44d2a52b5182d24a.asciidoc new file mode 100644 index 000000000..2a175137a --- /dev/null +++ b/docs/doc_examples/81ef5774355180fc44d2a52b5182d24a.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + size: 0, + aggs: { + message_stats: { + string_stats: { + field: "message.keyword", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/81f1b1e1d5c81683b6bf471c469e6046.asciidoc b/docs/doc_examples/81f1b1e1d5c81683b6bf471c469e6046.asciidoc new file mode 100644 index 000000000..a16d759b9 --- /dev/null +++ b/docs/doc_examples/81f1b1e1d5c81683b6bf471c469e6046.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "shirts", + query: { + bool: { + filter: [ + { + term: { + color: "red", + }, + }, + { + term: { + brand: "gucci", + }, + }, + ], + }, + }, + aggs: { + models: { + terms: { + field: "model", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8206a7cc615ad93fec322513b8fdd4fd.asciidoc b/docs/doc_examples/8206a7cc615ad93fec322513b8fdd4fd.asciidoc new file mode 100644 index 000000000..7b2517ea2 --- /dev/null +++ b/docs/doc_examples/8206a7cc615ad93fec322513b8fdd4fd.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "job-candidates", + id: 2, + refresh: "true", + document: { + name: "Jason Response", + programming_languages: ["java", "php"], + required_matches: 2, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/820f689eaaef15fc07abd1073fa880f8.asciidoc b/docs/doc_examples/820f689eaaef15fc07abd1073fa880f8.asciidoc new file mode 100644 index 000000000..7882ecb00 --- /dev/null +++ b/docs/doc_examples/820f689eaaef15fc07abd1073fa880f8.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + from: 5, + size: 20, + query: { + match: { + "user.id": "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/821422f8a03dc98d024a15fc737fe9eb.asciidoc b/docs/doc_examples/821422f8a03dc98d024a15fc737fe9eb.asciidoc new file mode 100644 index 000000000..3e052ea88 --- /dev/null +++ b/docs/doc_examples/821422f8a03dc98d024a15fc737fe9eb.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.deleteTrainedModelAlias({ + model_id: "flight-delay-prediction-1574775339910", + model_alias: "flight_delay_model", +}); +console.log(response); +---- diff --git a/docs/doc_examples/821ac598f5f4a795a13f8dd0c0c4d8d6.asciidoc b/docs/doc_examples/821ac598f5f4a795a13f8dd0c0c4d8d6.asciidoc new file mode 100644 index 000000000..463b34a82 --- /dev/null +++ b/docs/doc_examples/821ac598f5f4a795a13f8dd0c0c4d8d6.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.createDataStream({ + name: "metrics-weather_sensors-dev", +}); +console.log(response); +---- diff --git a/docs/doc_examples/824fded1f9db28906ae7e85ae8de9bd0.asciidoc b/docs/doc_examples/824fded1f9db28906ae7e85ae8de9bd0.asciidoc new file mode 100644 index 000000000..e01c1c4ce --- /dev/null +++ b/docs/doc_examples/824fded1f9db28906ae7e85ae8de9bd0.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.resumeFollow({ + index: "follower_index", + max_read_request_operation_count: 1024, + max_outstanding_read_requests: 16, + max_read_request_size: "1024k", + max_write_request_operation_count: 32768, + max_write_request_size: "16k", + max_outstanding_write_requests: 8, + max_write_buffer_count: 512, + max_write_buffer_size: "512k", + max_retry_delay: "10s", + read_poll_timeout: "30s", +}); +console.log(response); +---- diff --git a/docs/doc_examples/827b7e9308ea288f18aea00a5accc38e.asciidoc b/docs/doc_examples/827b7e9308ea288f18aea00a5accc38e.asciidoc new file mode 100644 index 000000000..dd4323962 --- /dev/null +++ b/docs/doc_examples/827b7e9308ea288f18aea00a5accc38e.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.getComponentTemplate({ + name: "template_1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/82844ef45e11c0eece100d3109db3182.asciidoc b/docs/doc_examples/82844ef45e11c0eece100d3109db3182.asciidoc new file mode 100644 index 000000000..07d04a9b1 --- /dev/null +++ b/docs/doc_examples/82844ef45e11c0eece100d3109db3182.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "completion", + inference_id: "amazon_bedrock_completion", + inference_config: { + service: "amazonbedrock", + service_settings: { + access_key: "", + secret_key: "", + region: "us-east-1", + provider: "amazontitan", + model: "amazon.titan-text-premier-v1:0", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/828f0045747fde4888a947bb99e190e3.asciidoc b/docs/doc_examples/828f0045747fde4888a947bb99e190e3.asciidoc new file mode 100644 index 000000000..a4e4969f9 --- /dev/null +++ b/docs/doc_examples/828f0045747fde4888a947bb99e190e3.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "movies", + retriever: { + rule: { + match_criteria: { + query_string: "harry potter", + }, + ruleset_ids: ["my-ruleset"], + retriever: { + standard: { + query: { + query_string: { + query: "harry potter", + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/829a40d484c778a8c58340c7bf09e1d8.asciidoc b/docs/doc_examples/829a40d484c778a8c58340c7bf09e1d8.asciidoc new file mode 100644 index 000000000..8d6320e7a --- /dev/null +++ b/docs/doc_examples/829a40d484c778a8c58340c7bf09e1d8.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match: { + message: { + operator: "or", + query: "the quick brown", + }, + }, + }, + rescore: { + window_size: 50, + query: { + rescore_query: { + match_phrase: { + message: { + query: "the quick brown", + slop: 2, + }, + }, + }, + query_weight: 0.7, + rescore_query_weight: 1.2, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/82bb6c61dab959f4446dc5ecab7ecbdf.asciidoc b/docs/doc_examples/82bb6c61dab959f4446dc5ecab7ecbdf.asciidoc new file mode 100644 index 000000000..c693e38f1 --- /dev/null +++ b/docs/doc_examples/82bb6c61dab959f4446dc5ecab7ecbdf.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.chatCompletionUnified({ + inference_id: "openai-completion", + chat_completion_request: { + messages: [ + { + role: "assistant", + content: "Let's find out what the weather is", + tool_calls: [ + { + id: "call_KcAjWtAww20AihPHphUh46Gd", + type: "function", + function: { + name: "get_current_weather", + arguments: '{"location":"Boston, MA"}', + }, + }, + ], + }, + { + role: "tool", + content: "The weather is cold", + tool_call_id: "call_KcAjWtAww20AihPHphUh46Gd", + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/82d6de3081de7b0664f44adf2942675a.asciidoc b/docs/doc_examples/82d6de3081de7b0664f44adf2942675a.asciidoc new file mode 100644 index 000000000..429967543 --- /dev/null +++ b/docs/doc_examples/82d6de3081de7b0664f44adf2942675a.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.getBehavioralAnalytics({ + name: "my_analytics_collection", +}); +console.log(response); +---- diff --git a/docs/doc_examples/82e94b6cdf65e324575f916b3776b779.asciidoc b/docs/doc_examples/82e94b6cdf65e324575f916b3776b779.asciidoc new file mode 100644 index 000000000..a6c307c15 --- /dev/null +++ b/docs/doc_examples/82e94b6cdf65e324575f916b3776b779.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + dynamic_templates: [ + { + strings_as_keywords: { + match_mapping_type: "string", + runtime: {}, + }, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/83062a543163370328cf2e21a68c1bd3.asciidoc b/docs/doc_examples/83062a543163370328cf2e21a68c1bd3.asciidoc new file mode 100644 index 000000000..71645d3b6 --- /dev/null +++ b/docs/doc_examples/83062a543163370328cf2e21a68c1bd3.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + delete: { + actions: { + wait_for_snapshot: { + policy: "slm-policy-name", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/831f65d700577e11112c711236110f61.asciidoc b/docs/doc_examples/831f65d700577e11112c711236110f61.asciidoc new file mode 100644 index 000000000..701142ed9 --- /dev/null +++ b/docs/doc_examples/831f65d700577e11112c711236110f61.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_email_analyzer: { + type: "pattern", + pattern: "\\W|_", + lowercase: true, + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.analyze({ + index: "my-index-000001", + analyzer: "my_email_analyzer", + text: "John_Smith@foo-bar.com", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/8330b2ea6317769e52d0647ba434b354.asciidoc b/docs/doc_examples/8330b2ea6317769e52d0647ba434b354.asciidoc new file mode 100644 index 000000000..3692a9350 --- /dev/null +++ b/docs/doc_examples/8330b2ea6317769e52d0647ba434b354.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.mget({ + routing: "key1", + docs: [ + { + _index: "test", + _id: "1", + routing: "key2", + }, + { + _index: "test", + _id: "2", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/8345d2615f43a934fe1871a5120eca1d.asciidoc b/docs/doc_examples/8345d2615f43a934fe1871a5120eca1d.asciidoc new file mode 100644 index 000000000..e9fab2a15 --- /dev/null +++ b/docs/doc_examples/8345d2615f43a934fe1871a5120eca1d.asciidoc @@ -0,0 +1,52 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.previewTransform({ + source: { + index: "kibana_sample_data_ecommerce", + query: { + bool: { + filter: { + term: { + currency: "EUR", + }, + }, + }, + }, + }, + pivot: { + group_by: { + customer_id: { + terms: { + field: "customer_id", + }, + }, + }, + aggregations: { + "total_quantity.sum": { + sum: { + field: "total_quantity", + }, + }, + "taxless_total_price.sum": { + sum: { + field: "taxless_total_price", + }, + }, + "total_quantity.max": { + max: { + field: "total_quantity", + }, + }, + "order_id.cardinality": { + cardinality: { + field: "order_id", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/834764b2fba6cbb41eaabd740be75656.asciidoc b/docs/doc_examples/834764b2fba6cbb41eaabd740be75656.asciidoc new file mode 100644 index 000000000..2b286e62b --- /dev/null +++ b/docs/doc_examples/834764b2fba6cbb41eaabd740be75656.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_custom_analyzer: { + tokenizer: "standard", + filter: ["keyword_repeat", "porter_stem", "remove_duplicates"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8357aa6099089940589ae3e97e7bcffa.asciidoc b/docs/doc_examples/8357aa6099089940589ae3e97e7bcffa.asciidoc new file mode 100644 index 000000000..aa9ff97c5 --- /dev/null +++ b/docs/doc_examples/8357aa6099089940589ae3e97e7bcffa.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getDataStream(); +console.log(response); +---- diff --git a/docs/doc_examples/83780c8f5f17eb21064c1ba6e0a7aa10.asciidoc b/docs/doc_examples/83780c8f5f17eb21064c1ba6e0a7aa10.asciidoc new file mode 100644 index 000000000..b31cf25db --- /dev/null +++ b/docs/doc_examples/83780c8f5f17eb21064c1ba6e0a7aa10.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + wrapper: { + query: "eyJ0ZXJtIiA6IHsgInVzZXIuaWQiIDogImtpbWNoeSIgfX0=", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/838a4eabebba4c06100fb37dc30c7722.asciidoc b/docs/doc_examples/838a4eabebba4c06100fb37dc30c7722.asciidoc new file mode 100644 index 000000000..6ff988112 --- /dev/null +++ b/docs/doc_examples/838a4eabebba4c06100fb37dc30c7722.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.rollup.putJob({ + id: "sensor", + index_pattern: "sensor-*", + rollup_index: "sensor_rollup", + cron: "*/30 * * * * ?", + page_size: 1000, + groups: { + date_histogram: { + field: "timestamp", + fixed_interval: "1h", + delay: "7d", + }, + terms: { + fields: ["node"], + }, + }, + metrics: [ + { + field: "temperature", + metrics: ["min", "max", "sum"], + }, + { + field: "voltage", + metrics: ["avg"], + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/839710129a165cf93c6e329abedf9089.asciidoc b/docs/doc_examples/839710129a165cf93c6e329abedf9089.asciidoc new file mode 100644 index 000000000..e21691205 --- /dev/null +++ b/docs/doc_examples/839710129a165cf93c6e329abedf9089.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "POST", + path: "/_security/cross_cluster/api_key", + body: { + name: "my-cross-cluster-api-key", + access: { + search: [ + { + names: ["logs*"], + }, + ], + }, + metadata: { + application: "search", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/839a4b2930856790e34cc9dfeb983284.asciidoc b/docs/doc_examples/839a4b2930856790e34cc9dfeb983284.asciidoc new file mode 100644 index 000000000..4c5a97c46 --- /dev/null +++ b/docs/doc_examples/839a4b2930856790e34cc9dfeb983284.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + warm: { + actions: { + downsample: { + fixed_interval: "1h", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/83b94f9e7b3a9abca8e165ea56927714.asciidoc b/docs/doc_examples/83b94f9e7b3a9abca8e165ea56927714.asciidoc new file mode 100644 index 000000000..7cfc7762b --- /dev/null +++ b/docs/doc_examples/83b94f9e7b3a9abca8e165ea56927714.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "", + aliases: { + "my-write-alias": {}, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/83cd4eb89818b4c32f654d370eafa920.asciidoc b/docs/doc_examples/83cd4eb89818b4c32f654d370eafa920.asciidoc new file mode 100644 index 000000000..d06f00c1d --- /dev/null +++ b/docs/doc_examples/83cd4eb89818b4c32f654d370eafa920.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: [ + { + type: "keep_types", + types: [""], + }, + ], + text: "1 quick fox 2 lazy dogs", +}); +console.log(response); +---- diff --git a/docs/doc_examples/83d712b9ffb2e703212b762eba3c521a.asciidoc b/docs/doc_examples/83d712b9ffb2e703212b762eba3c521a.asciidoc new file mode 100644 index 000000000..e386b454b --- /dev/null +++ b/docs/doc_examples/83d712b9ffb2e703212b762eba3c521a.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-alias", + ignore_unavailable: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/83d8c920460a12f87b9d5bf65515c367.asciidoc b/docs/doc_examples/83d8c920460a12f87b9d5bf65515c367.asciidoc new file mode 100644 index 000000000..3c1edaaca --- /dev/null +++ b/docs/doc_examples/83d8c920460a12f87b9d5bf65515c367.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_date_histo: { + date_histogram: { + field: "date", + calendar_interval: "1M", + }, + aggs: { + the_sum: { + sum: { + field: "price", + }, + }, + the_moving_sum: { + moving_fn: { + buckets_path: "the_sum", + window: 10, + script: + "MovingFunctions.stdDev(values, MovingFunctions.unweightedAvg(values))", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/83dd715e45a5da097123c6d10f22f8f4.asciidoc b/docs/doc_examples/83dd715e45a5da097123c6d10f22f8f4.asciidoc new file mode 100644 index 000000000..ee691348e --- /dev/null +++ b/docs/doc_examples/83dd715e45a5da097123c6d10f22f8f4.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + span_containing: { + little: { + span_term: { + field1: "foo", + }, + }, + big: { + span_near: { + clauses: [ + { + span_term: { + field1: "bar", + }, + }, + { + span_term: { + field1: "baz", + }, + }, + ], + slop: 5, + in_order: true, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/83dfd0852101eca3ba8174c9c38b4e73.asciidoc b/docs/doc_examples/83dfd0852101eca3ba8174c9c38b4e73.asciidoc new file mode 100644 index 000000000..e7f4dc8b5 --- /dev/null +++ b/docs/doc_examples/83dfd0852101eca3ba8174c9c38b4e73.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getTemplate({ + name: ".monitoring-*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/840b6c5c3d9c56aed854cfab8da04486.asciidoc b/docs/doc_examples/840b6c5c3d9c56aed854cfab8da04486.asciidoc new file mode 100644 index 000000000..cda364d7e --- /dev/null +++ b/docs/doc_examples/840b6c5c3d9c56aed854cfab8da04486.asciidoc @@ -0,0 +1,95 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "file-path-test", + settings: { + analysis: { + analyzer: { + custom_path_tree: { + tokenizer: "custom_hierarchy", + }, + custom_path_tree_reversed: { + tokenizer: "custom_hierarchy_reversed", + }, + }, + tokenizer: { + custom_hierarchy: { + type: "path_hierarchy", + delimiter: "/", + }, + custom_hierarchy_reversed: { + type: "path_hierarchy", + delimiter: "/", + reverse: "true", + }, + }, + }, + }, + mappings: { + properties: { + file_path: { + type: "text", + fields: { + tree: { + type: "text", + analyzer: "custom_path_tree", + }, + tree_reversed: { + type: "text", + analyzer: "custom_path_tree_reversed", + }, + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "file-path-test", + id: 1, + document: { + file_path: "/User/alice/photos/2017/05/16/my_photo1.jpg", + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "file-path-test", + id: 2, + document: { + file_path: "/User/alice/photos/2017/05/16/my_photo2.jpg", + }, +}); +console.log(response2); + +const response3 = await client.index({ + index: "file-path-test", + id: 3, + document: { + file_path: "/User/alice/photos/2017/05/16/my_photo3.jpg", + }, +}); +console.log(response3); + +const response4 = await client.index({ + index: "file-path-test", + id: 4, + document: { + file_path: "/User/alice/photos/2017/05/15/my_photo1.jpg", + }, +}); +console.log(response4); + +const response5 = await client.index({ + index: "file-path-test", + id: 5, + document: { + file_path: "/User/bob/photos/2017/05/16/my_photo1.jpg", + }, +}); +console.log(response5); +---- diff --git a/docs/doc_examples/84108653e9e03b4edacd878ec870df77.asciidoc b/docs/doc_examples/84108653e9e03b4edacd878ec870df77.asciidoc new file mode 100644 index 000000000..23119d33f --- /dev/null +++ b/docs/doc_examples/84108653e9e03b4edacd878ec870df77.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "hungarian_example", + settings: { + analysis: { + filter: { + hungarian_stop: { + type: "stop", + stopwords: "_hungarian_", + }, + hungarian_keywords: { + type: "keyword_marker", + keywords: ["példa"], + }, + hungarian_stemmer: { + type: "stemmer", + language: "hungarian", + }, + }, + analyzer: { + rebuilt_hungarian: { + tokenizer: "standard", + filter: [ + "lowercase", + "hungarian_stop", + "hungarian_keywords", + "hungarian_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8417d8d35ec5fc5665dfb2f95d6d1101.asciidoc b/docs/doc_examples/8417d8d35ec5fc5665dfb2f95d6d1101.asciidoc new file mode 100644 index 000000000..12610d1a5 --- /dev/null +++ b/docs/doc_examples/8417d8d35ec5fc5665dfb2f95d6d1101.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: ".watcher-history*", + pretty: "true", + query: { + bool: { + must: [ + { + match: { + "result.condition.met": true, + }, + }, + { + range: { + "result.execution_time": { + gte: "now-10s", + }, + }, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/841ad0a70f4271f61f0bac0b467b59c5.asciidoc b/docs/doc_examples/841ad0a70f4271f61f0bac0b467b59c5.asciidoc new file mode 100644 index 000000000..7848732d8 --- /dev/null +++ b/docs/doc_examples/841ad0a70f4271f61f0bac0b467b59c5.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.mtermvectors({ + index: "my-index-000001", + docs: [ + { + _id: "2", + fields: ["message"], + term_statistics: true, + }, + { + _id: "1", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/841d8b766902c8e3ae85c228a31383ac.asciidoc b/docs/doc_examples/841d8b766902c8e3ae85c228a31383ac.asciidoc new file mode 100644 index 000000000..bf5b9ae06 --- /dev/null +++ b/docs/doc_examples/841d8b766902c8e3ae85c228a31383ac.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.getAsyncStatus({ + id: "FmdMX2pIang3UWhLRU5QS0lqdlppYncaMUpYQ05oSkpTc3kwZ21EdC1tbFJXQToxOTI=", + format: "json", +}); +console.log(response); +---- diff --git a/docs/doc_examples/84237aa9da49ab4b4c4e2b21d2548df2.asciidoc b/docs/doc_examples/84237aa9da49ab4b4c4e2b21d2548df2.asciidoc new file mode 100644 index 000000000..ae893a16e --- /dev/null +++ b/docs/doc_examples/84237aa9da49ab4b4c4e2b21d2548df2.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.repositoryVerifyIntegrity({ + name: "my_repository", +}); +console.log(response); +---- diff --git a/docs/doc_examples/84243213614fe64930b1d430704afb29.asciidoc b/docs/doc_examples/84243213614fe64930b1d430704afb29.asciidoc new file mode 100644 index 000000000..3e58c1614 --- /dev/null +++ b/docs/doc_examples/84243213614fe64930b1d430704afb29.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-index-000001", + runtime: { + voltage_corrected: { + type: "double", + script: { + source: + "\n emit(doc['voltage'].value * params['multiplier'])\n ", + params: { + multiplier: 2, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/84465de841fe5c6099a0382f786f2cb8.asciidoc b/docs/doc_examples/84465de841fe5c6099a0382f786f2cb8.asciidoc new file mode 100644 index 000000000..230633e38 --- /dev/null +++ b/docs/doc_examples/84465de841fe5c6099a0382f786f2cb8.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.updateAliases({ + actions: [ + { + remove: { + index: "logs-nginx.access-prod", + alias: "logs", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/8478c39c71bbb559ef6ab919f918f22b.asciidoc b/docs/doc_examples/8478c39c71bbb559ef6ab919f918f22b.asciidoc new file mode 100644 index 000000000..4bd021e11 --- /dev/null +++ b/docs/doc_examples/8478c39c71bbb559ef6ab919f918f22b.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-data-stream", + filter: { + range: { + "@timestamp": { + gte: "now-1d/d", + lt: "now/d", + }, + }, + }, + query: + '\n file where (file.type == "file" and file.name == "cmd.exe")\n ', +}); +console.log(response); +---- diff --git a/docs/doc_examples/8494d09c39e109a012094eb9d6ec52ac.asciidoc b/docs/doc_examples/8494d09c39e109a012094eb9d6ec52ac.asciidoc new file mode 100644 index 000000000..66286fa8f --- /dev/null +++ b/docs/doc_examples/8494d09c39e109a012094eb9d6ec52ac.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "pipelineA", + description: "inner pipeline", + processors: [ + { + set: { + field: "inner_pipeline_set", + value: "inner", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/84c61160ca815e29e9973ba1380219dd.asciidoc b/docs/doc_examples/84c61160ca815e29e9973ba1380219dd.asciidoc new file mode 100644 index 000000000..ef2135f43 --- /dev/null +++ b/docs/doc_examples/84c61160ca815e29e9973ba1380219dd.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchableSnapshots.stats({ + index: "my-index", +}); +console.log(response); +---- diff --git a/docs/doc_examples/84c69fb07050f0e89720007a6507a221.asciidoc b/docs/doc_examples/84c69fb07050f0e89720007a6507a221.asciidoc new file mode 100644 index 000000000..902a23594 --- /dev/null +++ b/docs/doc_examples/84c69fb07050f0e89720007a6507a221.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.tasks.cancel({ + task_id: "oTUltX4IQMOUUVeiohTt8A:464", +}); +console.log(response); +---- diff --git a/docs/doc_examples/84e2cf7417c9e0c9e6f3c23031001440.asciidoc b/docs/doc_examples/84e2cf7417c9e0c9e6f3c23031001440.asciidoc new file mode 100644 index 000000000..06916ca58 --- /dev/null +++ b/docs/doc_examples/84e2cf7417c9e0c9e6f3c23031001440.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.enrich.stats(); +console.log(response); +---- diff --git a/docs/doc_examples/84edb44c5b74426f448b2baa101092d6.asciidoc b/docs/doc_examples/84edb44c5b74426f448b2baa101092d6.asciidoc new file mode 100644 index 000000000..0de7ea182 --- /dev/null +++ b/docs/doc_examples/84edb44c5b74426f448b2baa101092d6.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "range_index", + query: { + term: { + expected_attendees: { + value: 12, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/84ef9fe951c6d3caa7438238a5b23319.asciidoc b/docs/doc_examples/84ef9fe951c6d3caa7438238a5b23319.asciidoc new file mode 100644 index 000000000..5d2ef1d8d --- /dev/null +++ b/docs/doc_examples/84ef9fe951c6d3caa7438238a5b23319.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "cooking_blog", + query: { + term: { + "author.keyword": "Maria Rodriguez", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/84f2f0cea90340bdd041421afdb58ec3.asciidoc b/docs/doc_examples/84f2f0cea90340bdd041421afdb58ec3.asciidoc new file mode 100644 index 000000000..13c112515 --- /dev/null +++ b/docs/doc_examples/84f2f0cea90340bdd041421afdb58ec3.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "index1", + mappings: { + properties: { + comment: { + type: "text", + analyzer: "standard", + fields: { + english: { + type: "text", + analyzer: "english", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/84f3e8524f6ff80e870c03ab71551538.asciidoc b/docs/doc_examples/84f3e8524f6ff80e870c03ab71551538.asciidoc new file mode 100644 index 000000000..4859db54a --- /dev/null +++ b/docs/doc_examples/84f3e8524f6ff80e870c03ab71551538.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + preference: "my-custom-shard-string", + query: { + match: { + "user.id": "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/850bfd0a00d32475a54ac7f87fb4cc4d.asciidoc b/docs/doc_examples/850bfd0a00d32475a54ac7f87fb4cc4d.asciidoc new file mode 100644 index 000000000..bf886d598 --- /dev/null +++ b/docs/doc_examples/850bfd0a00d32475a54ac7f87fb4cc4d.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + runtime_mappings: { + "measures.voltage": { + type: "double", + script: { + source: + "if (doc['model_number.keyword'].value.equals('HG537PU'))\n {emit(1.7 * params._source['measures']['voltage']);}\n else{emit(params._source['measures']['voltage']);}", + }, + }, + }, + query: { + match: { + model_number: "HG537PU", + }, + }, + fields: ["measures.voltage"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/851f9754dbefc099c54c5423ca4565c0.asciidoc b/docs/doc_examples/851f9754dbefc099c54c5423ca4565c0.asciidoc new file mode 100644 index 000000000..9682a809d --- /dev/null +++ b/docs/doc_examples/851f9754dbefc099c54c5423ca4565c0.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "network-traffic", + size: 0, + aggs: { + "ipv6-subnets": { + ip_prefix: { + field: "ipv6", + prefix_length: 64, + is_ipv6: true, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/852b394d78b8c79ee0055b5501981a4b.asciidoc b/docs/doc_examples/852b394d78b8c79ee0055b5501981a4b.asciidoc new file mode 100644 index 000000000..9dbde19b3 --- /dev/null +++ b/docs/doc_examples/852b394d78b8c79ee0055b5501981a4b.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_buckets: { + composite: { + sources: [ + { + product_name: { + terms: { + field: "product", + missing_bucket: true, + missing_order: "last", + }, + }, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/853fc710cea79fb4e1a85fb6d149f9c5.asciidoc b/docs/doc_examples/853fc710cea79fb4e1a85fb6d149f9c5.asciidoc new file mode 100644 index 000000000..0ca3674f6 --- /dev/null +++ b/docs/doc_examples/853fc710cea79fb4e1a85fb6d149f9c5.asciidoc @@ -0,0 +1,42 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "movies", + retriever: { + rule: { + match_criteria: { + query_string: "harry potter", + }, + ruleset_ids: ["my-ruleset"], + retriever: { + rrf: { + retrievers: [ + { + standard: { + query: { + query_string: { + query: "sorcerer's stone", + }, + }, + }, + }, + { + standard: { + query: { + query_string: { + query: "chamber of secrets", + }, + }, + }, + }, + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/85479e02af00681210e17e3d0ff51e21.asciidoc b/docs/doc_examples/85479e02af00681210e17e3d0ff51e21.asciidoc new file mode 100644 index 000000000..cbe5e40f7 --- /dev/null +++ b/docs/doc_examples/85479e02af00681210e17e3d0ff51e21.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + date: { + type: "date", + format: "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/85519a614ae18c998986d46bbad82b76.asciidoc b/docs/doc_examples/85519a614ae18c998986d46bbad82b76.asciidoc new file mode 100644 index 000000000..bd8e5bc57 --- /dev/null +++ b/docs/doc_examples/85519a614ae18c998986d46bbad82b76.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "my_template", + index_patterns: ["test-*"], + template: { + settings: { + number_of_shards: 1, + number_of_replicas: 1, + "index.lifecycle.name": "my_policy", + "index.lifecycle.rollover_alias": "test-alias", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8566f5ecf4ae14802ba63c8cc7c629f8.asciidoc b/docs/doc_examples/8566f5ecf4ae14802ba63c8cc7c629f8.asciidoc new file mode 100644 index 000000000..dad0bf4d5 --- /dev/null +++ b/docs/doc_examples/8566f5ecf4ae14802ba63c8cc7c629f8.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "text_embedding", + inference_id: "mistral_embeddings", + inference_config: { + service: "mistral", + service_settings: { + api_key: "", + model: "", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/856c10ad554c26b70f1121454caff40a.asciidoc b/docs/doc_examples/856c10ad554c26b70f1121454caff40a.asciidoc new file mode 100644 index 000000000..561da085c --- /dev/null +++ b/docs/doc_examples/856c10ad554c26b70f1121454caff40a.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "byte-image-index", + knn: { + field: "byte-image-vector", + query_vector: "fb09", + k: 10, + num_candidates: 100, + }, + fields: ["title"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/8582e918a6275472d2eba2e95f1dbe77.asciidoc b/docs/doc_examples/8582e918a6275472d2eba2e95f1dbe77.asciidoc new file mode 100644 index 000000000..ec8e3ffd4 --- /dev/null +++ b/docs/doc_examples/8582e918a6275472d2eba2e95f1dbe77.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "cluster.routing.allocation.disk.watermark.low": "90%", + "cluster.routing.allocation.disk.watermark.low.max_headroom": "100GB", + "cluster.routing.allocation.disk.watermark.high": "95%", + "cluster.routing.allocation.disk.watermark.high.max_headroom": "20GB", + "cluster.routing.allocation.disk.watermark.flood_stage": "97%", + "cluster.routing.allocation.disk.watermark.flood_stage.max_headroom": "5GB", + "cluster.routing.allocation.disk.watermark.flood_stage.frozen": "97%", + "cluster.routing.allocation.disk.watermark.flood_stage.frozen.max_headroom": + "5GB", + }, +}); +console.log(response); + +const response1 = await client.indices.putSettings({ + index: "*", + expand_wildcards: "all", + settings: { + "index.blocks.read_only_allow_delete": null, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/858fde15fb0a0340873b123043f8c3b4.asciidoc b/docs/doc_examples/858fde15fb0a0340873b123043f8c3b4.asciidoc new file mode 100644 index 000000000..249008681 --- /dev/null +++ b/docs/doc_examples/858fde15fb0a0340873b123043f8c3b4.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + document: { + my_text: "histogram_1", + my_histogram: { + values: [0.1, 0.2, 0.3, 0.4, 0.5], + counts: [3, 7, 23, 12, 6], + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 2, + document: { + my_text: "histogram_2", + my_histogram: { + values: [0.1, 0.25, 0.35, 0.4, 0.45, 0.5], + counts: [8, 17, 8, 7, 6, 2], + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/85ae90b63ecba9d2bad16144b054c0a1.asciidoc b/docs/doc_examples/85ae90b63ecba9d2bad16144b054c0a1.asciidoc new file mode 100644 index 000000000..f546521a4 --- /dev/null +++ b/docs/doc_examples/85ae90b63ecba9d2bad16144b054c0a1.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.query({ + format: "txt", + runtime_mappings: { + release_day_of_week: { + type: "keyword", + script: + "\n emit(doc['release_date'].value.dayOfWeekEnum.toString())\n ", + }, + }, + query: + "\n SELECT * FROM library WHERE page_count > 300 AND author = 'Frank Herbert'\n ", +}); +console.log(response); +---- diff --git a/docs/doc_examples/85d2e33791f1a74a69dfb04a60e69306.asciidoc b/docs/doc_examples/85d2e33791f1a74a69dfb04a60e69306.asciidoc new file mode 100644 index 000000000..3670bfbae --- /dev/null +++ b/docs/doc_examples/85d2e33791f1a74a69dfb04a60e69306.asciidoc @@ -0,0 +1,58 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.putWatch({ + id: "error_logs_alert", + metadata: { + color: "red", + }, + trigger: { + schedule: { + interval: "5m", + }, + }, + input: { + search: { + request: { + indices: "log-events", + body: { + size: 0, + query: { + match: { + status: "error", + }, + }, + }, + }, + }, + }, + condition: { + compare: { + "ctx.payload.hits.total": { + gt: 5, + }, + }, + }, + actions: { + email_administrator: { + throttle_period: "15m", + email: { + to: "sys.admino@host.domain", + subject: "Encountered {{ctx.payload.hits.total}} errors", + body: "Too many error in the system, see attached data", + attachments: { + attached_data: { + data: { + format: "json", + }, + }, + }, + priority: "high", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/85e2719d9fd6d2c2d47d28d39f2e3f7e.asciidoc b/docs/doc_examples/85e2719d9fd6d2c2d47d28d39f2e3f7e.asciidoc new file mode 100644 index 000000000..dda69bb3e --- /dev/null +++ b/docs/doc_examples/85e2719d9fd6d2c2d47d28d39f2e3f7e.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.migration.getFeatureUpgradeStatus(); +console.log(response); +---- diff --git a/docs/doc_examples/85f0e5e8ab91ceab63c21dbedd9f4037.asciidoc b/docs/doc_examples/85f0e5e8ab91ceab63c21dbedd9f4037.asciidoc new file mode 100644 index 000000000..563f66b85 --- /dev/null +++ b/docs/doc_examples/85f0e5e8ab91ceab63c21dbedd9f4037.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "finnish_example", + settings: { + analysis: { + filter: { + finnish_stop: { + type: "stop", + stopwords: "_finnish_", + }, + finnish_keywords: { + type: "keyword_marker", + keywords: ["esimerkki"], + }, + finnish_stemmer: { + type: "stemmer", + language: "finnish", + }, + }, + analyzer: { + rebuilt_finnish: { + tokenizer: "standard", + filter: [ + "lowercase", + "finnish_stop", + "finnish_keywords", + "finnish_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/85f2839beeb71edb66988e5c82188be0.asciidoc b/docs/doc_examples/85f2839beeb71edb66988e5c82188be0.asciidoc new file mode 100644 index 000000000..70e3a8299 --- /dev/null +++ b/docs/doc_examples/85f2839beeb71edb66988e5c82188be0.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.license.post({ + licenses: [ + { + uid: "893361dc-9749-4997-93cb-802e3d7fa4xx", + type: "basic", + issue_date_in_millis: 1411948800000, + expiry_date_in_millis: 1914278399999, + max_nodes: 1, + issued_to: "issuedTo", + issuer: "issuer", + signature: "xx", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/85f6667f148d16d075493fddf07e2932.asciidoc b/docs/doc_examples/85f6667f148d16d075493fddf07e2932.asciidoc new file mode 100644 index 000000000..4b8133125 --- /dev/null +++ b/docs/doc_examples/85f6667f148d16d075493fddf07e2932.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + source: { + index: ".ds-my-data-stream-2099.03.07-000001", + }, + dest: { + index: "new-data-stream", + op_type: "create", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/85f9fc6f98e8573efed9b034e853d5ae.asciidoc b/docs/doc_examples/85f9fc6f98e8573efed9b034e853d5ae.asciidoc new file mode 100644 index 000000000..9bc60ea81 --- /dev/null +++ b/docs/doc_examples/85f9fc6f98e8573efed9b034e853d5ae.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "sparse_embedding", + inference_id: "use_existing_deployment", + inference_config: { + service: "elasticsearch", + service_settings: { + deployment_id: ".elser_model_2", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8619bd17bbfe33490b1f277007f654db.asciidoc b/docs/doc_examples/8619bd17bbfe33490b1f277007f654db.asciidoc new file mode 100644 index 000000000..48f850d0c --- /dev/null +++ b/docs/doc_examples/8619bd17bbfe33490b1f277007f654db.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "rerank", + inference_id: "cohere-rerank", + inference_config: { + service: "cohere", + service_settings: { + api_key: "", + model_id: "rerank-english-v3.0", + }, + task_settings: { + top_n: 10, + return_documents: true, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/861f5f61409dc87f3671293b87839ff7.asciidoc b/docs/doc_examples/861f5f61409dc87f3671293b87839ff7.asciidoc new file mode 100644 index 000000000..c9fcd3320 --- /dev/null +++ b/docs/doc_examples/861f5f61409dc87f3671293b87839ff7.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.stats({ + human: "true", + pretty: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/8621c05cc7cf3880bde751f6670a0c3a.asciidoc b/docs/doc_examples/8621c05cc7cf3880bde751f6670a0c3a.asciidoc new file mode 100644 index 000000000..7cb4b44d1 --- /dev/null +++ b/docs/doc_examples/8621c05cc7cf3880bde751f6670a0c3a.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: ".reindexed-v9-ml-anomalies-custom-example", + settings: { + index: { + number_of_replicas: 0, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/86280dcb49aa89083be4b2644daf1b7c.asciidoc b/docs/doc_examples/86280dcb49aa89083be4b2644daf1b7c.asciidoc new file mode 100644 index 000000000..2d6d44e58 --- /dev/null +++ b/docs/doc_examples/86280dcb49aa89083be4b2644daf1b7c.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.getJobs({ + job_id: "high_sum_total_sales", +}); +console.log(response); +---- diff --git a/docs/doc_examples/862907653d1c18d2e80eff7f421200e2.asciidoc b/docs/doc_examples/862907653d1c18d2e80eff7f421200e2.asciidoc new file mode 100644 index 000000000..f6b386605 --- /dev/null +++ b/docs/doc_examples/862907653d1c18d2e80eff7f421200e2.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "saml-example", + roles: ["example_role"], + enabled: true, + rules: { + field: { + "realm.name": "saml1", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/863253bf0ab7d227ff72a0a384f4de8c.asciidoc b/docs/doc_examples/863253bf0ab7d227ff72a0a384f4de8c.asciidoc new file mode 100644 index 000000000..e3b853969 --- /dev/null +++ b/docs/doc_examples/863253bf0ab7d227ff72a0a384f4de8c.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "indices.lifecycle.poll_interval": null, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8634c9993485d622fb12d24f4f242264.asciidoc b/docs/doc_examples/8634c9993485d622fb12d24f4f242264.asciidoc new file mode 100644 index 000000000..7a25d33e8 --- /dev/null +++ b/docs/doc_examples/8634c9993485d622fb12d24f4f242264.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.modifyDataStream({ + actions: [ + { + remove_backing_index: { + data_stream: "my-data-stream", + index: ".ds-my-data-stream-2023.07.26-000001", + }, + }, + { + add_backing_index: { + data_stream: "my-data-stream", + index: ".ds-my-data-stream-2023.07.26-000001-downsample", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/8653e76676de5d327201b77512afa3a0.asciidoc b/docs/doc_examples/8653e76676de5d327201b77512afa3a0.asciidoc deleted file mode 100644 index 5ef368ade..000000000 --- a/docs/doc_examples/8653e76676de5d327201b77512afa3a0.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.putSettings({ - index: 'twitter', - body: { - index: { - number_of_replicas: 2 - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/867f7d43a78066731ead2e223960fc07.asciidoc b/docs/doc_examples/867f7d43a78066731ead2e223960fc07.asciidoc new file mode 100644 index 000000000..331002c0c --- /dev/null +++ b/docs/doc_examples/867f7d43a78066731ead2e223960fc07.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "action.destructive_requires_name": false, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8684589e31d96ab229e8c4feb4d704bb.asciidoc b/docs/doc_examples/8684589e31d96ab229e8c4feb4d704bb.asciidoc new file mode 100644 index 000000000..8d712c7ba --- /dev/null +++ b/docs/doc_examples/8684589e31d96ab229e8c4feb4d704bb.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.enrich.getPolicy({ + name: "my-policy,other-policy", +}); +console.log(response); +---- diff --git a/docs/doc_examples/86926bcebf213ac182d4373027554858.asciidoc b/docs/doc_examples/86926bcebf213ac182d4373027554858.asciidoc new file mode 100644 index 000000000..8baadb02c --- /dev/null +++ b/docs/doc_examples/86926bcebf213ac182d4373027554858.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my_index", + mappings: { + properties: { + my_counter: { + type: "unsigned_long", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8696ba08ca6cc4992110c331732e5f47.asciidoc b/docs/doc_examples/8696ba08ca6cc4992110c331732e5f47.asciidoc new file mode 100644 index 000000000..44ced54be --- /dev/null +++ b/docs/doc_examples/8696ba08ca6cc4992110c331732e5f47.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "latency", + size: 0, + aggs: { + grade_boxplot: { + boxplot: { + field: "grade", + missing: 10, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8699d35269a47ba867fa8cc766287413.asciidoc b/docs/doc_examples/8699d35269a47ba867fa8cc766287413.asciidoc new file mode 100644 index 000000000..d3af85d46 --- /dev/null +++ b/docs/doc_examples/8699d35269a47ba867fa8cc766287413.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.license.postStartBasic(); +console.log(response); +---- diff --git a/docs/doc_examples/86c5594c4ec551391096c1abcd652b50.asciidoc b/docs/doc_examples/86c5594c4ec551391096c1abcd652b50.asciidoc new file mode 100644 index 000000000..ec1a73d6b --- /dev/null +++ b/docs/doc_examples/86c5594c4ec551391096c1abcd652b50.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_index", + query: { + match_all: {}, + }, + script_fields: { + count10: { + script: { + source: "Long.divideUnsigned(doc['my_counter'].value, 10)", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8703f3b1b3895543abc36e2a7a0013d3.asciidoc b/docs/doc_examples/8703f3b1b3895543abc36e2a7a0013d3.asciidoc new file mode 100644 index 000000000..102d6de69 --- /dev/null +++ b/docs/doc_examples/8703f3b1b3895543abc36e2a7a0013d3.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "index_1", +}); +console.log(response); + +const response1 = await client.indices.create({ + index: "index_2", +}); +console.log(response1); + +const response2 = await client.indices.create({ + index: "index_3", + settings: { + "index.priority": 10, + }, +}); +console.log(response2); + +const response3 = await client.indices.create({ + index: "index_4", + settings: { + "index.priority": 5, + }, +}); +console.log(response3); +---- diff --git a/docs/doc_examples/871154d08efd7251cf3272e758f06acf.asciidoc b/docs/doc_examples/871154d08efd7251cf3272e758f06acf.asciidoc new file mode 100644 index 000000000..9d1b127fc --- /dev/null +++ b/docs/doc_examples/871154d08efd7251cf3272e758f06acf.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "common_grams_example", + settings: { + analysis: { + analyzer: { + index_grams: { + tokenizer: "whitespace", + filter: ["common_grams"], + }, + }, + filter: { + common_grams: { + type: "common_grams", + common_words: ["a", "is", "the"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8731188553e14134b0a533010318f91a.asciidoc b/docs/doc_examples/8731188553e14134b0a533010318f91a.asciidoc new file mode 100644 index 000000000..476b8e9b5 --- /dev/null +++ b/docs/doc_examples/8731188553e14134b0a533010318f91a.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + terms: { + force: ["British Transport Police"], + }, + }, + aggregations: { + significant_crime_types: { + significant_terms: { + field: "crime_type", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8739fad1fb2323950b673acf0c9f2ff5.asciidoc b/docs/doc_examples/8739fad1fb2323950b673acf0c9f2ff5.asciidoc new file mode 100644 index 000000000..88e4bf500 --- /dev/null +++ b/docs/doc_examples/8739fad1fb2323950b673acf0c9f2ff5.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.open({ + index: "my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/873e2333734b1cf5ed066596e5f74b0a.asciidoc b/docs/doc_examples/873e2333734b1cf5ed066596e5f74b0a.asciidoc new file mode 100644 index 000000000..966f0118a --- /dev/null +++ b/docs/doc_examples/873e2333734b1cf5ed066596e5f74b0a.asciidoc @@ -0,0 +1,98 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "museums", + mappings: { + properties: { + location: { + type: "geo_point", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "museums", + refresh: "true", + operations: [ + { + index: { + _id: 1, + }, + }, + { + location: "POINT (4.912350 52.374081)", + city: "Amsterdam", + name: "NEMO Science Museum", + }, + { + index: { + _id: 2, + }, + }, + { + location: "POINT (4.901618 52.369219)", + city: "Amsterdam", + name: "Museum Het Rembrandthuis", + }, + { + index: { + _id: 3, + }, + }, + { + location: "POINT (4.914722 52.371667)", + city: "Amsterdam", + name: "Nederlands Scheepvaartmuseum", + }, + { + index: { + _id: 4, + }, + }, + { + location: "POINT (4.405200 51.222900)", + city: "Antwerp", + name: "Letterenhuis", + }, + { + index: { + _id: 5, + }, + }, + { + location: "POINT (2.336389 48.861111)", + city: "Paris", + name: "Musée du Louvre", + }, + { + index: { + _id: 6, + }, + }, + { + location: "POINT (2.327000 48.860000)", + city: "Paris", + name: "Musée d'Orsay", + }, + ], +}); +console.log(response1); + +const response2 = await client.search({ + index: "museums", + size: 0, + aggs: { + centroid: { + geo_centroid: { + field: "location", + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/873fbbc6ab81409058591385fd602736.asciidoc b/docs/doc_examples/873fbbc6ab81409058591385fd602736.asciidoc index 6722b4fb3..73e8ab273 100644 --- a/docs/doc_examples/873fbbc6ab81409058591385fd602736.asciidoc +++ b/docs/doc_examples/873fbbc6ab81409058591385fd602736.asciidoc @@ -3,47 +3,46 @@ [source, js] ---- -const response0 = await client.index({ - index: 'drivers', - id: '1', - body: { +const response = await client.index({ + index: "drivers", + id: 1, + document: { driver: { - last_name: 'McQueen', + last_name: "McQueen", vehicle: [ { - make: 'Powell Motors', - model: 'Canyonero' + make: "Powell Motors", + model: "Canyonero", }, { - make: 'Miller-Meteor', - model: 'Ecto-1' - } - ] - } - } -}) -console.log(response0) + make: "Miller-Meteor", + model: "Ecto-1", + }, + ], + }, + }, +}); +console.log(response); const response1 = await client.index({ - index: 'drivers', - id: '2', - refresh: true, - body: { + index: "drivers", + id: 2, + refresh: "true", + document: { driver: { - last_name: 'Hudson', + last_name: "Hudson", vehicle: [ { - make: 'Mifune', - model: 'Mach Five' + make: "Mifune", + model: "Mach Five", }, { - make: 'Miller-Meteor', - model: 'Ecto-1' - } - ] - } - } -}) -console.log(response1) + make: "Miller-Meteor", + model: "Ecto-1", + }, + ], + }, + }, +}); +console.log(response1); ---- - diff --git a/docs/doc_examples/87416e6a1ca2da324dbed6deb05303eb.asciidoc b/docs/doc_examples/87416e6a1ca2da324dbed6deb05303eb.asciidoc new file mode 100644 index 000000000..7a998c4c7 --- /dev/null +++ b/docs/doc_examples/87416e6a1ca2da324dbed6deb05303eb.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + refresh: "true", + document: { + "user.id": "kimchy", + }, +}); +console.log(response); + +const response1 = await client.count({ + index: "my-index-000001", + q: "user:kimchy", +}); +console.log(response1); + +const response2 = await client.count({ + index: "my-index-000001", + query: { + term: { + "user.id": "kimchy", + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/8743887d9b89ea1a2d5e780c349972cf.asciidoc b/docs/doc_examples/8743887d9b89ea1a2d5e780c349972cf.asciidoc new file mode 100644 index 000000000..b6f07251e --- /dev/null +++ b/docs/doc_examples/8743887d9b89ea1a2d5e780c349972cf.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + match: { + message: "GET /search", + }, + }, + collapse: { + field: "geo.country_name", + inner_hits: { + name: "by_location", + collapse: { + field: "user.id", + }, + size: 3, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/87457bb3467484bec3e9df4e25942ba6.asciidoc b/docs/doc_examples/87457bb3467484bec3e9df4e25942ba6.asciidoc new file mode 100644 index 000000000..238cd5ff2 --- /dev/null +++ b/docs/doc_examples/87457bb3467484bec3e9df4e25942ba6.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.esql.query({ + query: "FROM mv | EVAL b=MV_MIN(b) | EVAL b + 2, a + b | LIMIT 4", +}); +console.log(response); +---- diff --git a/docs/doc_examples/87469f8b7e9b965408479d276c3ce8aa.asciidoc b/docs/doc_examples/87469f8b7e9b965408479d276c3ce8aa.asciidoc new file mode 100644 index 000000000..ddbbf086a --- /dev/null +++ b/docs/doc_examples/87469f8b7e9b965408479d276c3ce8aa.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.getBehavioralAnalytics({ + name: "my*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/87733deeea4b441b595d19a0f97346f0.asciidoc b/docs/doc_examples/87733deeea4b441b595d19a0f97346f0.asciidoc new file mode 100644 index 000000000..e2c5d71b0 --- /dev/null +++ b/docs/doc_examples/87733deeea4b441b595d19a0f97346f0.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.healthReport({ + feature: "shards_availability", +}); +console.log(response); +---- diff --git a/docs/doc_examples/877ea90c663b5df9efe95717646a666f.asciidoc b/docs/doc_examples/877ea90c663b5df9efe95717646a666f.asciidoc new file mode 100644 index 000000000..7579e5be5 --- /dev/null +++ b/docs/doc_examples/877ea90c663b5df9efe95717646a666f.asciidoc @@ -0,0 +1,55 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + group: { + type: "keyword", + }, + user: { + type: "nested", + properties: { + first: { + type: "keyword", + }, + last: { + type: "keyword", + }, + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + refresh: "true", + document: { + group: "fans", + user: [ + { + first: "John", + last: "Smith", + }, + { + first: "Alice", + last: "White", + }, + ], + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + fields: ["*"], + _source: false, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/87846c3ddacab1da4af626ae8099e4be.asciidoc b/docs/doc_examples/87846c3ddacab1da4af626ae8099e4be.asciidoc new file mode 100644 index 000000000..6b632a678 --- /dev/null +++ b/docs/doc_examples/87846c3ddacab1da4af626ae8099e4be.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "basic_user", + roles: ["user"], + rules: { + field: { + dn: "cn=John Doe,ou=example,o=com", + }, + }, + enabled: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/87b0b496747ad6c1e4ab4b462128fa1c.asciidoc b/docs/doc_examples/87b0b496747ad6c1e4ab4b462128fa1c.asciidoc new file mode 100644 index 000000000..c199db648 --- /dev/null +++ b/docs/doc_examples/87b0b496747ad6c1e4ab4b462128fa1c.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.nodeattrs({ + v: "true", + h: "name,pid,attr,value", +}); +console.log(response); +---- diff --git a/docs/doc_examples/87c3e9963400a3e4b296ef8d1c86fae3.asciidoc b/docs/doc_examples/87c3e9963400a3e4b296ef8d1c86fae3.asciidoc new file mode 100644 index 000000000..5d65f861a --- /dev/null +++ b/docs/doc_examples/87c3e9963400a3e4b296ef8d1c86fae3.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.clearCachedRoles({ + name: "my_admin_role,my_test_role", +}); +console.log(response); +---- diff --git a/docs/doc_examples/87c42ef733a50954e4d757fc0a08decc.asciidoc b/docs/doc_examples/87c42ef733a50954e4d757fc0a08decc.asciidoc new file mode 100644 index 000000000..a5245804c --- /dev/null +++ b/docs/doc_examples/87c42ef733a50954e4d757fc0a08decc.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.createApiKey({ + name: "my-api-key-1", + metadata: { + application: "my-application", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/87d970b4944b6d742c484d7184996c8a.asciidoc b/docs/doc_examples/87d970b4944b6d742c484d7184996c8a.asciidoc new file mode 100644 index 000000000..f258702ac --- /dev/null +++ b/docs/doc_examples/87d970b4944b6d742c484d7184996c8a.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.search({ + name: "my_search_application", + params: { + query_string: "Where is the best place for mountain climbing?", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/87f854393d715aabf4d45e90a8eb74ce.asciidoc b/docs/doc_examples/87f854393d715aabf4d45e90a8eb74ce.asciidoc new file mode 100644 index 000000000..fd8b35e40 --- /dev/null +++ b/docs/doc_examples/87f854393d715aabf4d45e90a8eb74ce.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "reviews", + size: 0, + aggs: { + review_variability: { + median_absolute_deviation: { + field: "rating", + missing: 5, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/88195d87a350e7fff200131f410c3e88.asciidoc b/docs/doc_examples/88195d87a350e7fff200131f410c3e88.asciidoc new file mode 100644 index 000000000..7078a7afc --- /dev/null +++ b/docs/doc_examples/88195d87a350e7fff200131f410c3e88.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + aggs: { + price_ranges: { + range: { + field: "price", + keyed: true, + ranges: [ + { + to: 100, + }, + { + from: 100, + to: 200, + }, + { + from: 200, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/88341b4eba71ec722f3e38fa1696fe87.asciidoc b/docs/doc_examples/88341b4eba71ec722f3e38fa1696fe87.asciidoc new file mode 100644 index 000000000..5933f7fad --- /dev/null +++ b/docs/doc_examples/88341b4eba71ec722f3e38fa1696fe87.asciidoc @@ -0,0 +1,56 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.previewTransform({ + source: { + index: "kibana_sample_data_ecommerce", + }, + dest: { + index: "sample_ecommerce_orders_by_customer", + }, + pivot: { + group_by: { + user: { + terms: { + field: "user", + }, + }, + customer_id: { + terms: { + field: "customer_id", + }, + }, + }, + aggregations: { + order_count: { + value_count: { + field: "order_id", + }, + }, + total_order_amt: { + sum: { + field: "taxful_total_price", + }, + }, + avg_amt_per_order: { + avg: { + field: "taxful_total_price", + }, + }, + avg_unique_products_per_order: { + avg: { + field: "total_unique_products", + }, + }, + total_unique_products: { + cardinality: { + field: "products.product_id", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/88554b79dba8fd79991855a692b69ff9.asciidoc b/docs/doc_examples/88554b79dba8fd79991855a692b69ff9.asciidoc new file mode 100644 index 000000000..0904a2fe0 --- /dev/null +++ b/docs/doc_examples/88554b79dba8fd79991855a692b69ff9.asciidoc @@ -0,0 +1,55 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.graph.explore({ + index: "clicklogs", + query: { + match: { + "query.raw": "midi", + }, + }, + controls: { + use_significance: false, + sample_size: 2000, + timeout: 2000, + sample_diversity: { + field: "category.raw", + max_docs_per_value: 500, + }, + }, + vertices: [ + { + field: "product", + size: 5, + min_doc_count: 10, + shard_min_doc_count: 3, + }, + ], + connections: { + query: { + bool: { + filter: [ + { + range: { + query_time: { + gte: "2015-10-01 00:00:00", + }, + }, + }, + ], + }, + }, + vertices: [ + { + field: "query.raw", + size: 5, + min_doc_count: 10, + shard_min_doc_count: 3, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8871b8fcb6de4f0c7dff22798fb10fb7.asciidoc b/docs/doc_examples/8871b8fcb6de4f0c7dff22798fb10fb7.asciidoc deleted file mode 100644 index 4ab63e7e5..000000000 --- a/docs/doc_examples/8871b8fcb6de4f0c7dff22798fb10fb7.asciidoc +++ /dev/null @@ -1,23 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.reindex({ - body: { - source: { - index: 'twitter' - }, - dest: { - index: 'new_twitter', - version_type: 'external' - }, - script: { - source: "if (ctx._source.foo == 'bar') {ctx._version++; ctx._source.remove('foo')}", - lang: 'painless' - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/88a08d0b15ef41324f5c23db533d47d1.asciidoc b/docs/doc_examples/88a08d0b15ef41324f5c23db533d47d1.asciidoc new file mode 100644 index 000000000..667429e4b --- /dev/null +++ b/docs/doc_examples/88a08d0b15ef41324f5c23db533d47d1.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + text: "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone.", +}); +console.log(response); +---- diff --git a/docs/doc_examples/88a283dfccc481f1afba79d9b3c61f51.asciidoc b/docs/doc_examples/88a283dfccc481f1afba79d9b3c61f51.asciidoc new file mode 100644 index 000000000..fe244468b --- /dev/null +++ b/docs/doc_examples/88a283dfccc481f1afba79d9b3c61f51.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.queryUser(); +console.log(response); +---- diff --git a/docs/doc_examples/88b19973b970adf9b73fca82017d4951.asciidoc b/docs/doc_examples/88b19973b970adf9b73fca82017d4951.asciidoc new file mode 100644 index 000000000..6fb12eca1 --- /dev/null +++ b/docs/doc_examples/88b19973b970adf9b73fca82017d4951.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-*", + query: { + match: { + "user.id": "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/88cecae3f0363fc186d955dd8616b5d4.asciidoc b/docs/doc_examples/88cecae3f0363fc186d955dd8616b5d4.asciidoc new file mode 100644 index 000000000..c37e3b663 --- /dev/null +++ b/docs/doc_examples/88cecae3f0363fc186d955dd8616b5d4.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.getStatus({ + id: "FmNJRUZ1YWZCU3dHY1BIOUhaenVSRkEaaXFlZ3h4c1RTWFNocDdnY2FSaERnUTozNDE=", + keep_alive: "5d", +}); +console.log(response); +---- diff --git a/docs/doc_examples/88cf60d3310a56d8ae12704abc05b565.asciidoc b/docs/doc_examples/88cf60d3310a56d8ae12704abc05b565.asciidoc new file mode 100644 index 000000000..0c4c71725 --- /dev/null +++ b/docs/doc_examples/88cf60d3310a56d8ae12704abc05b565.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.license.getTrialStatus(); +console.log(response); +---- diff --git a/docs/doc_examples/894fce12d8f0d01e4c4083885a0c0077.asciidoc b/docs/doc_examples/894fce12d8f0d01e4c4083885a0c0077.asciidoc new file mode 100644 index 000000000..5a195c6f2 --- /dev/null +++ b/docs/doc_examples/894fce12d8f0d01e4c4083885a0c0077.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + wait_for_completion: "false", + source: { + index: "test-data", + size: 50, + }, + dest: { + index: "mistral-embeddings", + pipeline: "mistral_embeddings_pipeline", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8963fb1e3d0900ba3b68be212e8972ee.asciidoc b/docs/doc_examples/8963fb1e3d0900ba3b68be212e8972ee.asciidoc new file mode 100644 index 000000000..800021f23 --- /dev/null +++ b/docs/doc_examples/8963fb1e3d0900ba3b68be212e8972ee.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + names: { + type: "text", + position_increment_gap: 0, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + names: ["John Abraham", "Lincoln Smith"], + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + query: { + match_phrase: { + names: "Abraham Lincoln", + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/897668edcbb0785fa5229aeb2dfc963e.asciidoc b/docs/doc_examples/897668edcbb0785fa5229aeb2dfc963e.asciidoc new file mode 100644 index 000000000..3a2727d45 --- /dev/null +++ b/docs/doc_examples/897668edcbb0785fa5229aeb2dfc963e.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + refresh: "true", + document: { + query: { + match: { + message: "bonsai tree", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/899eef71a67a1b2aa11a2166ec7f48f1.asciidoc b/docs/doc_examples/899eef71a67a1b2aa11a2166ec7f48f1.asciidoc deleted file mode 100644 index 5c91f566b..000000000 --- a/docs/doc_examples/899eef71a67a1b2aa11a2166ec7f48f1.asciidoc +++ /dev/null @@ -1,24 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - sort: [ - { - price: { - unmapped_type: 'long' - } - } - ], - query: { - term: { - product: 'chocolate' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/89a6b24618cafd60de1702a5b9f28a8d.asciidoc b/docs/doc_examples/89a6b24618cafd60de1702a5b9f28a8d.asciidoc new file mode 100644 index 000000000..13d14c876 --- /dev/null +++ b/docs/doc_examples/89a6b24618cafd60de1702a5b9f28a8d.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "test", + suggest: { + text: "noble prize", + simple_phrase: { + phrase: { + field: "title.trigram", + size: 1, + direct_generator: [ + { + field: "title.trigram", + suggest_mode: "always", + min_word_length: 1, + }, + ], + collate: { + query: { + source: { + match: { + "{{field_name}}": "{{suggestion}}", + }, + }, + }, + params: { + field_name: "title", + }, + prune: true, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/89a8ac1509936acc272fc2d72907bc45.asciidoc b/docs/doc_examples/89a8ac1509936acc272fc2d72907bc45.asciidoc deleted file mode 100644 index f7af4f7b0..000000000 --- a/docs/doc_examples/89a8ac1509936acc272fc2d72907bc45.asciidoc +++ /dev/null @@ -1,12 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.getSource({ - index: 'twitter', - id: '1' -}) -console.log(response) ----- - diff --git a/docs/doc_examples/89aed93f641a5e243bdc3ee5cdc2acc6.asciidoc b/docs/doc_examples/89aed93f641a5e243bdc3ee5cdc2acc6.asciidoc new file mode 100644 index 000000000..88aba54b9 --- /dev/null +++ b/docs/doc_examples/89aed93f641a5e243bdc3ee5cdc2acc6.asciidoc @@ -0,0 +1,56 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.put({ + name: "my_search_application", + search_application: { + indices: ["index1", "index2"], + template: { + script: { + lang: "mustache", + source: + '\n {\n "query": {\n "bool": {\n "should": [\n {{#text}}\n {\n "multi_match": {\n "query": "{{query_string}}",\n "fields": [{{#text_fields}}"{{name}}^{{boost}}",{{/text_fields}}],\n "boost": "{{text_query_boost}}"\n }\n },\n {{/text}}\n {{#elser}}\n {{#elser_fields}}\n {\n "sparse_vector": {\n "field": "ml.inference.{{.}}_expanded.predicted_value",\n "inference_id": "",\n "query": "{{query_string}}"\n }\n },\n {{/elser_fields}}\n { "bool": { "must": [] } },\n {{/elser}}\n {{^text}}\n {{^elser}}\n {\n "query_string": {\n "query": "{{query_string}}",\n "default_field": "{{default_field}}",\n "default_operator": "{{default_operator}}",\n "boost": "{{text_query_boost}}"\n }\n },\n {{/elser}}\n {{/text}}\n { "bool": { "must": [] } }\n ],\n "minimum_should_match": 1\n }\n },\n "min_score": "{{min_score}}",\n "explain": "{{explain}}",\n "from": "{{from}}",\n "size": "{{size}}"\n }\n ', + params: { + text: false, + elser: false, + elser_fields: [ + { + name: "title", + boost: 1, + }, + { + name: "description", + boost: 1, + }, + ], + text_fields: [ + { + name: "title", + boost: 10, + }, + { + name: "description", + boost: 5, + }, + { + name: "state", + boost: 1, + }, + ], + query_string: "*", + text_query_boost: 4, + default_field: "*", + default_operator: "OR", + explain: false, + from: 0, + size: 10, + min_score: 0, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/89b72dd7f747f6297c2b089e8bc807be.asciidoc b/docs/doc_examples/89b72dd7f747f6297c2b089e8bc807be.asciidoc new file mode 100644 index 000000000..d0d409240 --- /dev/null +++ b/docs/doc_examples/89b72dd7f747f6297c2b089e8bc807be.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.createRepository({ + name: "my_repository", + repository: { + type: "fs", + settings: { + location: "my_backup_location", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/89c57917bc7bd2e6387b5eb54ece37b1.asciidoc b/docs/doc_examples/89c57917bc7bd2e6387b5eb54ece37b1.asciidoc new file mode 100644 index 000000000..fe5711134 --- /dev/null +++ b/docs/doc_examples/89c57917bc7bd2e6387b5eb54ece37b1.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.count({ + index: "my-index-000001", + query: { + exists: { + field: "my-field", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/89d2a3748dc14c6d5d4c6f94b9b03938.asciidoc b/docs/doc_examples/89d2a3748dc14c6d5d4c6f94b9b03938.asciidoc new file mode 100644 index 000000000..700b24426 --- /dev/null +++ b/docs/doc_examples/89d2a3748dc14c6d5d4c6f94b9b03938.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.addBlock({ + index: "my_source_index", + block: "write", +}); +console.log(response); +---- diff --git a/docs/doc_examples/89dee10a24ea2727af5b00039a4271bd.asciidoc b/docs/doc_examples/89dee10a24ea2727af5b00039a4271bd.asciidoc new file mode 100644 index 000000000..4b84700b5 --- /dev/null +++ b/docs/doc_examples/89dee10a24ea2727af5b00039a4271bd.asciidoc @@ -0,0 +1,161 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "tour", + mappings: { + properties: { + city: { + type: "keyword", + time_series_dimension: true, + }, + category: { + type: "keyword", + }, + route: { + type: "long", + }, + name: { + type: "keyword", + }, + location: { + type: "geo_point", + }, + "@timestamp": { + type: "date", + }, + }, + }, + settings: { + index: { + mode: "time_series", + routing_path: ["city"], + time_series: { + start_time: "2023-01-01T00:00:00Z", + end_time: "2024-01-01T00:00:00Z", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "tour", + refresh: "true", + operations: [ + { + index: {}, + }, + { + "@timestamp": "2023-01-02T09:00:00Z", + route: 0, + location: "POINT(4.889187 52.373184)", + city: "Amsterdam", + category: "Attraction", + name: "Royal Palace Amsterdam", + }, + { + index: {}, + }, + { + "@timestamp": "2023-01-02T10:00:00Z", + route: 1, + location: "POINT(4.885057 52.370159)", + city: "Amsterdam", + category: "Attraction", + name: "The Amsterdam Dungeon", + }, + { + index: {}, + }, + { + "@timestamp": "2023-01-02T13:00:00Z", + route: 2, + location: "POINT(4.901618 52.369219)", + city: "Amsterdam", + category: "Museum", + name: "Museum Het Rembrandthuis", + }, + { + index: {}, + }, + { + "@timestamp": "2023-01-02T16:00:00Z", + route: 3, + location: "POINT(4.912350 52.374081)", + city: "Amsterdam", + category: "Museum", + name: "NEMO Science Museum", + }, + { + index: {}, + }, + { + "@timestamp": "2023-01-03T12:00:00Z", + route: 4, + location: "POINT(4.914722 52.371667)", + city: "Amsterdam", + category: "Museum", + name: "Nederlands Scheepvaartmuseum", + }, + { + index: {}, + }, + { + "@timestamp": "2023-01-04T09:00:00Z", + route: 5, + location: "POINT(4.401384 51.220292)", + city: "Antwerp", + category: "Attraction", + name: "Cathedral of Our Lady", + }, + { + index: {}, + }, + { + "@timestamp": "2023-01-04T12:00:00Z", + route: 6, + location: "POINT(4.405819 51.221758)", + city: "Antwerp", + category: "Museum", + name: "Snijders&Rockoxhuis", + }, + { + index: {}, + }, + { + "@timestamp": "2023-01-04T15:00:00Z", + route: 7, + location: "POINT(4.405200 51.222900)", + city: "Antwerp", + category: "Museum", + name: "Letterenhuis", + }, + { + index: {}, + }, + { + "@timestamp": "2023-01-05T10:00:00Z", + route: 8, + location: "POINT(2.336389 48.861111)", + city: "Paris", + category: "Museum", + name: "Musée du Louvre", + }, + { + index: {}, + }, + { + "@timestamp": "2023-01-05T14:00:00Z", + route: 9, + location: "POINT(2.327000 48.860000)", + city: "Paris", + category: "Museum", + name: "Musée dOrsay", + }, + ], +}); +console.log(response1); +---- diff --git a/docs/doc_examples/89f547649895176c246bb8c41313ff21.asciidoc b/docs/doc_examples/89f547649895176c246bb8c41313ff21.asciidoc new file mode 100644 index 000000000..571f64436 --- /dev/null +++ b/docs/doc_examples/89f547649895176c246bb8c41313ff21.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.esql.query({ + query: + '\nFROM library\n| EVAL year = DATE_EXTRACT("year", release_date)\n| WHERE page_count > ? AND match(author, ?, {"minimum_should_match": ?})\n| LIMIT 5\n', + params: [300, "Frank Herbert", 2], +}); +console.log(response); +---- diff --git a/docs/doc_examples/89f8eac24f3ec6a7668d580aaf0eeefa.asciidoc b/docs/doc_examples/89f8eac24f3ec6a7668d580aaf0eeefa.asciidoc new file mode 100644 index 000000000..03db68d26 --- /dev/null +++ b/docs/doc_examples/89f8eac24f3ec6a7668d580aaf0eeefa.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: ["snowball"], + text: "detailed output", + explain: true, + attributes: ["keyword"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/8a0b5f759de3f27f0801c1176e616117.asciidoc b/docs/doc_examples/8a0b5f759de3f27f0801c1176e616117.asciidoc new file mode 100644 index 000000000..5ac85568d --- /dev/null +++ b/docs/doc_examples/8a0b5f759de3f27f0801c1176e616117.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "semantic-embeddings", + mappings: { + properties: { + content: { + type: "semantic_text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8a12cd824404d74f098d854716a26899.asciidoc b/docs/doc_examples/8a12cd824404d74f098d854716a26899.asciidoc new file mode 100644 index 000000000..0de5dd4f6 --- /dev/null +++ b/docs/doc_examples/8a12cd824404d74f098d854716a26899.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.deleteDatafeed({ + datafeed_id: "datafeed-total-requests", +}); +console.log(response); +---- diff --git a/docs/doc_examples/8a1b6eae4893c5dd27b3d81fd8d70f5b.asciidoc b/docs/doc_examples/8a1b6eae4893c5dd27b3d81fd8d70f5b.asciidoc new file mode 100644 index 000000000..d88b6d247 --- /dev/null +++ b/docs/doc_examples/8a1b6eae4893c5dd27b3d81fd8d70f5b.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.tasks.get({ + task_id: "", +}); +console.log(response); +---- diff --git a/docs/doc_examples/8a1f6cffa653800282c0ae160ee375bc.asciidoc b/docs/doc_examples/8a1f6cffa653800282c0ae160ee375bc.asciidoc new file mode 100644 index 000000000..75db49a3d --- /dev/null +++ b/docs/doc_examples/8a1f6cffa653800282c0ae160ee375bc.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.update({ + index: "test", + id: 1, + script: { + source: + "if (ctx._source.tags.contains(params.tag)) { ctx._source.tags.remove(ctx._source.tags.indexOf(params.tag)) }", + lang: "painless", + params: { + tag: "blue", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8a4941cae0b32d68b22bec2d12c82860.asciidoc b/docs/doc_examples/8a4941cae0b32d68b22bec2d12c82860.asciidoc new file mode 100644 index 000000000..120f4e523 --- /dev/null +++ b/docs/doc_examples/8a4941cae0b32d68b22bec2d12c82860.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-data-stream", + query: + '\n sequence by process.pid with maxspan=1h\n [ process where process.name == "regsvr32.exe" ]\n [ file where stringContains(file.name, "scrobj.dll") ]\n ', +}); +console.log(response); +---- diff --git a/docs/doc_examples/8a617dbfe5887f8ecc8815de132b6eb0.asciidoc b/docs/doc_examples/8a617dbfe5887f8ecc8815de132b6eb0.asciidoc new file mode 100644 index 000000000..b481e842e --- /dev/null +++ b/docs/doc_examples/8a617dbfe5887f8ecc8815de132b6eb0.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putUser({ + username: "cross-cluster-kibana", + password: "l0ng-r4nd0m-p@ssw0rd", + roles: ["logstash-reader", "kibana-access"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/8aa17bd25a3f2d634e5253b4b72fec4c.asciidoc b/docs/doc_examples/8aa17bd25a3f2d634e5253b4b72fec4c.asciidoc new file mode 100644 index 000000000..c6532bbde --- /dev/null +++ b/docs/doc_examples/8aa17bd25a3f2d634e5253b4b72fec4c.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.explainDataFrameAnalytics({ + source: { + index: "houses_sold_last_10_yrs", + }, + analysis: { + regression: { + dependent_variable: "price", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8aa74aee3dcf4b34028e4c5e1c1ed27b.asciidoc b/docs/doc_examples/8aa74aee3dcf4b34028e4c5e1c1ed27b.asciidoc new file mode 100644 index 000000000..2146d1c0b --- /dev/null +++ b/docs/doc_examples/8aa74aee3dcf4b34028e4c5e1c1ed27b.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "bug_reports", + mappings: { + properties: { + title: { + type: "text", + }, + labels: { + type: "flattened", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "bug_reports", + id: 1, + document: { + title: "Results are not sorted correctly.", + labels: { + priority: "urgent", + release: ["v1.2.5", "v1.3.0"], + timestamp: { + created: 1541458026, + closed: 1541457010, + }, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/8ab11a25e017124a70484781ca11fb52.asciidoc b/docs/doc_examples/8ab11a25e017124a70484781ca11fb52.asciidoc new file mode 100644 index 000000000..abbd63ece --- /dev/null +++ b/docs/doc_examples/8ab11a25e017124a70484781ca11fb52.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-data-stream", + filter_path: "-hits.events", + query: '\n any where process.name == "regsvr32.exe" \n ', + size: 200, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8b07372a21a10a16b52e70fc0c87ad4e.asciidoc b/docs/doc_examples/8b07372a21a10a16b52e70fc0c87ad4e.asciidoc new file mode 100644 index 000000000..f93c9a6ad --- /dev/null +++ b/docs/doc_examples/8b07372a21a10a16b52e70fc0c87ad4e.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + document: { + region: "US", + manager: { + age: 30, + name: { + first: "John", + last: "Smith", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8b301122cbf42be6eafeda714a36559e.asciidoc b/docs/doc_examples/8b301122cbf42be6eafeda714a36559e.asciidoc new file mode 100644 index 000000000..fe63d2690 --- /dev/null +++ b/docs/doc_examples/8b301122cbf42be6eafeda714a36559e.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.logstash.putPipeline({ + id: "my_pipeline", + pipeline: { + description: "Sample pipeline for illustration purposes", + last_modified: "2021-01-02T02:50:51.250Z", + pipeline_metadata: { + type: "logstash_pipeline", + version: "1", + }, + username: "elastic", + pipeline: "input {}\n filter { grok {} }\n output {}", + pipeline_settings: { + "pipeline.workers": 1, + "pipeline.batch.size": 125, + "pipeline.batch.delay": 50, + "queue.type": "memory", + "queue.max_bytes": "1gb", + "queue.checkpoint.writes": 1024, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8b38eeb41eb388ee6d92f26b5c0cc48d.asciidoc b/docs/doc_examples/8b38eeb41eb388ee6d92f26b5c0cc48d.asciidoc new file mode 100644 index 000000000..9d2e6845a --- /dev/null +++ b/docs/doc_examples/8b38eeb41eb388ee6d92f26b5c0cc48d.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.putScript({ + id: "my-prod-tag-script", + script: { + lang: "painless", + source: + "\n Collection tags = ctx.tags;\n if(tags != null){\n for (String tag : tags) {\n if (tag.toLowerCase().contains('prod')) {\n return false;\n }\n }\n }\n return true;\n ", + }, +}); +console.log(response); + +const response1 = await client.ingest.putPipeline({ + id: "my-pipeline", + processors: [ + { + drop: { + description: "Drop documents that don't contain 'prod' tag", + if: { + id: "my-prod-tag-script", + }, + }, + }, + ], +}); +console.log(response1); +---- diff --git a/docs/doc_examples/8b3a94495127efd9d56b2cd7f3eecdca.asciidoc b/docs/doc_examples/8b3a94495127efd9d56b2cd7f3eecdca.asciidoc new file mode 100644 index 000000000..6a76e6bcd --- /dev/null +++ b/docs/doc_examples/8b3a94495127efd9d56b2cd7f3eecdca.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getRoleMapping({ + name: "mapping1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/8b5bc6e217b0d33e4c88d84f5c1a0712.asciidoc b/docs/doc_examples/8b5bc6e217b0d33e4c88d84f5c1a0712.asciidoc new file mode 100644 index 000000000..7f67c6988 --- /dev/null +++ b/docs/doc_examples/8b5bc6e217b0d33e4c88d84f5c1a0712.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + products_without_a_price: { + missing: { + field: "price", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8b652e3205a5e9e0187f56ce3c36ae4e.asciidoc b/docs/doc_examples/8b652e3205a5e9e0187f56ce3c36ae4e.asciidoc new file mode 100644 index 000000000..fad8ef639 --- /dev/null +++ b/docs/doc_examples/8b652e3205a5e9e0187f56ce3c36ae4e.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "log-messages", + filter_path: "aggregations", + aggs: { + categories: { + categorize_text: { + field: "message", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8b7956a2b88fd798a895d3466d671b58.asciidoc b/docs/doc_examples/8b7956a2b88fd798a895d3466d671b58.asciidoc new file mode 100644 index 000000000..6216793f1 --- /dev/null +++ b/docs/doc_examples/8b7956a2b88fd798a895d3466d671b58.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "http.tracer.include": "*", + "http.tracer.exclude": "", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8b8b6aac2111b2d8b93758ac737e6543.asciidoc b/docs/doc_examples/8b8b6aac2111b2d8b93758ac737e6543.asciidoc new file mode 100644 index 000000000..9bee24620 --- /dev/null +++ b/docs/doc_examples/8b8b6aac2111b2d8b93758ac737e6543.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx_keep", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, + }, + }, + mappings: { + properties: { + path: { + type: "object", + synthetic_source_keep: "all", + }, + ids: { + type: "integer", + synthetic_source_keep: "arrays", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8baccd8688a6bad1749b8935f9601ea4.asciidoc b/docs/doc_examples/8baccd8688a6bad1749b8935f9601ea4.asciidoc deleted file mode 100644 index f83ba6405..000000000 --- a/docs/doc_examples/8baccd8688a6bad1749b8935f9601ea4.asciidoc +++ /dev/null @@ -1,25 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.index({ - index: 'my_index', - id: '1', - body: { - group: 'fans', - user: [ - { - first: 'John', - last: 'Smith' - }, - { - first: 'Alice', - last: 'White' - } - ] - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/8bf1e7a6d529547906ba8b1d6501fa0c.asciidoc b/docs/doc_examples/8bf1e7a6d529547906ba8b1d6501fa0c.asciidoc new file mode 100644 index 000000000..c92fe8266 --- /dev/null +++ b/docs/doc_examples/8bf1e7a6d529547906ba8b1d6501fa0c.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_connector/_sync_job/my-connector-sync-job/_error", + body: { + error: "some-error", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8c2060b0272556457f4871c5d7a589fd.asciidoc b/docs/doc_examples/8c2060b0272556457f4871c5d7a589fd.asciidoc new file mode 100644 index 000000000..b2e2c8b0f --- /dev/null +++ b/docs/doc_examples/8c2060b0272556457f4871c5d7a589fd.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "logstash-reader", + indices: [ + { + names: ["logstash-*"], + privileges: ["read", "view_index_metadata"], + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/8c47c80139f40f25db44f5781ca2dfbe.asciidoc b/docs/doc_examples/8c47c80139f40f25db44f5781ca2dfbe.asciidoc new file mode 100644 index 000000000..680f24481 --- /dev/null +++ b/docs/doc_examples/8c47c80139f40f25db44f5781ca2dfbe.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getAlias({ + index: ".ml-anomalies-custom-example", +}); +console.log(response); +---- diff --git a/docs/doc_examples/8c5977410335d58217e0626618ce6641.asciidoc b/docs/doc_examples/8c5977410335d58217e0626618ce6641.asciidoc deleted file mode 100644 index 83e2c51bd..000000000 --- a/docs/doc_examples/8c5977410335d58217e0626618ce6641.asciidoc +++ /dev/null @@ -1,15 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.index({ - index: 'my_index', - id: '2', - body: { - color: 'blue' - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/8c5d48252cd6d1ee26a2bb817f89c78e.asciidoc b/docs/doc_examples/8c5d48252cd6d1ee26a2bb817f89c78e.asciidoc new file mode 100644 index 000000000..226992d54 --- /dev/null +++ b/docs/doc_examples/8c5d48252cd6d1ee26a2bb817f89c78e.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.deleteFilter({ + filter_id: "safe_domains", +}); +console.log(response); +---- diff --git a/docs/doc_examples/8c619666488927dac6ecb7dcebca44c2.asciidoc b/docs/doc_examples/8c619666488927dac6ecb7dcebca44c2.asciidoc new file mode 100644 index 000000000..d637e9ff4 --- /dev/null +++ b/docs/doc_examples/8c619666488927dac6ecb7dcebca44c2.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "cohere-embeddings", + mappings: { + properties: { + content_embedding: { + type: "dense_vector", + dims: 1024, + element_type: "byte", + }, + content: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8c639d3eef5c2de29e12bd9c6a42d3d4.asciidoc b/docs/doc_examples/8c639d3eef5c2de29e12bd9c6a42d3d4.asciidoc new file mode 100644 index 000000000..aa09492cf --- /dev/null +++ b/docs/doc_examples/8c639d3eef5c2de29e12bd9c6a42d3d4.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "kibana_sample_data_ecommerce", + size: 0, + aggs: { + categories: { + terms: { + field: "category.keyword", + size: 5, + order: { + total_revenue: "desc", + }, + }, + aggs: { + total_revenue: { + sum: { + field: "taxful_total_price", + }, + }, + avg_order_value: { + avg: { + field: "taxful_total_price", + }, + }, + total_items: { + sum: { + field: "total_quantity", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8c693e057f6e85fbf2b56ca442719362.asciidoc b/docs/doc_examples/8c693e057f6e85fbf2b56ca442719362.asciidoc new file mode 100644 index 000000000..da0b3217f --- /dev/null +++ b/docs/doc_examples/8c693e057f6e85fbf2b56ca442719362.asciidoc @@ -0,0 +1,38 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "stats-index", + size: 0, + aggs: { + metric_min: { + min: { + field: "agg_metric", + }, + }, + metric_max: { + max: { + field: "agg_metric", + }, + }, + metric_value_count: { + value_count: { + field: "agg_metric", + }, + }, + metric_sum: { + sum: { + field: "agg_metric", + }, + }, + metric_avg: { + avg: { + field: "agg_metric", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8c6f3bb8abae9ff1d21e776f16ad1c86.asciidoc b/docs/doc_examples/8c6f3bb8abae9ff1d21e776f16ad1c86.asciidoc new file mode 100644 index 000000000..756573630 --- /dev/null +++ b/docs/doc_examples/8c6f3bb8abae9ff1d21e776f16ad1c86.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.putDataFrameAnalytics({ + id: "model-flight-delays-pre", + source: { + index: ["kibana_sample_data_flights"], + query: { + range: { + DistanceKilometers: { + gt: 0, + }, + }, + }, + _source: { + includes: [], + excludes: ["FlightDelay", "FlightDelayType"], + }, + }, + dest: { + index: "df-flight-delays", + results_field: "ml-results", + }, + analysis: { + regression: { + dependent_variable: "FlightDelayMin", + training_percent: 90, + }, + }, + analyzed_fields: { + includes: [], + excludes: ["FlightNum"], + }, + model_memory_limit: "100mb", +}); +console.log(response); +---- diff --git a/docs/doc_examples/8c8b5224befab7804461c7e7b6086d9a.asciidoc b/docs/doc_examples/8c8b5224befab7804461c7e7b6086d9a.asciidoc new file mode 100644 index 000000000..05fa659e7 --- /dev/null +++ b/docs/doc_examples/8c8b5224befab7804461c7e7b6086d9a.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + document: { + text: "Document with ID 1", + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 2, + refresh: "true", + document: { + text: "Document with ID 2", + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + query: { + terms: { + _id: ["1", "2"], + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/8c9081dc738d1290fd76071b283fcaec.asciidoc b/docs/doc_examples/8c9081dc738d1290fd76071b283fcaec.asciidoc new file mode 100644 index 000000000..19241e817 --- /dev/null +++ b/docs/doc_examples/8c9081dc738d1290fd76071b283fcaec.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.get({ + index: "my-index-000001", + id: 2, + routing: "user1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/8c92c5e87facbae8dc4f58376ec21815.asciidoc b/docs/doc_examples/8c92c5e87facbae8dc4f58376ec21815.asciidoc new file mode 100644 index 000000000..bbd7c9edc --- /dev/null +++ b/docs/doc_examples/8c92c5e87facbae8dc4f58376ec21815.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + fields: ["voltage_corrected", "node"], + size: 2, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8cbf9b46ce3ccc966c4902d2e0c56317.asciidoc b/docs/doc_examples/8cbf9b46ce3ccc966c4902d2e0c56317.asciidoc new file mode 100644 index 000000000..117a672d0 --- /dev/null +++ b/docs/doc_examples/8cbf9b46ce3ccc966c4902d2e0c56317.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "whitespace", + filter: ["keyword_repeat", "stemmer"], + text: "fox running and jumping", + explain: true, + attributes: "keyword", +}); +console.log(response); +---- diff --git a/docs/doc_examples/8cd00a3aba7c3c158277bc032aac2830.asciidoc b/docs/doc_examples/8cd00a3aba7c3c158277bc032aac2830.asciidoc index bc1eb2c9b..0b2932f62 100644 --- a/docs/doc_examples/8cd00a3aba7c3c158277bc032aac2830.asciidoc +++ b/docs/doc_examples/8cd00a3aba7c3c158277bc032aac2830.asciidoc @@ -4,77 +4,76 @@ [source, js] ---- const response = await client.bulk({ - body: [ + operations: [ { update: { - _id: '1', - _index: 'index1', - retry_on_conflict: 3 - } + _id: "1", + _index: "index1", + retry_on_conflict: 3, + }, }, { doc: { - field: 'value' - } + field: "value", + }, }, { update: { - _id: '0', - _index: 'index1', - retry_on_conflict: 3 - } + _id: "0", + _index: "index1", + retry_on_conflict: 3, + }, }, { script: { - source: 'ctx._source.counter += params.param1', - lang: 'painless', + source: "ctx._source.counter += params.param1", + lang: "painless", params: { - param1: 1 - } + param1: 1, + }, }, upsert: { - counter: 1 - } + counter: 1, + }, }, { update: { - _id: '2', - _index: 'index1', - retry_on_conflict: 3 - } + _id: "2", + _index: "index1", + retry_on_conflict: 3, + }, }, { doc: { - field: 'value' + field: "value", }, - doc_as_upsert: true + doc_as_upsert: true, }, { update: { - _id: '3', - _index: 'index1', - _source: true - } + _id: "3", + _index: "index1", + _source: true, + }, }, { doc: { - field: 'value' - } + field: "value", + }, }, { update: { - _id: '4', - _index: 'index1' - } + _id: "4", + _index: "index1", + }, }, { doc: { - field: 'value' + field: "value", }, - _source: true - } - ] -}) -console.log(response) + _source: true, + }, + ], +}); +console.log(response); ---- - diff --git a/docs/doc_examples/8cef2b98f3fe3a85874f1b48ebe6ec63.asciidoc b/docs/doc_examples/8cef2b98f3fe3a85874f1b48ebe6ec63.asciidoc new file mode 100644 index 000000000..a2825d9b1 --- /dev/null +++ b/docs/doc_examples/8cef2b98f3fe3a85874f1b48ebe6ec63.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "elision_case_insensitive_example", + settings: { + analysis: { + analyzer: { + default: { + tokenizer: "whitespace", + filter: ["elision_case_insensitive"], + }, + }, + filter: { + elision_case_insensitive: { + type: "elision", + articles: ["l", "m", "t", "qu", "n", "s", "j"], + articles_case: true, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8d05862be1f9e7edaba162b1888b5677.asciidoc b/docs/doc_examples/8d05862be1f9e7edaba162b1888b5677.asciidoc new file mode 100644 index 000000000..0241e351c --- /dev/null +++ b/docs/doc_examples/8d05862be1f9e7edaba162b1888b5677.asciidoc @@ -0,0 +1,61 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "cooking_blog", + properties: { + title: { + type: "text", + analyzer: "standard", + fields: { + keyword: { + type: "keyword", + ignore_above: 256, + }, + }, + }, + description: { + type: "text", + fields: { + keyword: { + type: "keyword", + }, + }, + }, + author: { + type: "text", + fields: { + keyword: { + type: "keyword", + }, + }, + }, + date: { + type: "date", + format: "yyyy-MM-dd", + }, + category: { + type: "text", + fields: { + keyword: { + type: "keyword", + }, + }, + }, + tags: { + type: "text", + fields: { + keyword: { + type: "keyword", + }, + }, + }, + rating: { + type: "float", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8d064eda2199de52e5be9ee68a5b7c68.asciidoc b/docs/doc_examples/8d064eda2199de52e5be9ee68a5b7c68.asciidoc new file mode 100644 index 000000000..929bb2357 --- /dev/null +++ b/docs/doc_examples/8d064eda2199de52e5be9ee68a5b7c68.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "my-text-embeddings-pipeline", + description: "Text embedding pipeline", + processors: [ + { + inference: { + model_id: ".elser_model_2", + input_output: [ + { + input_field: "my_text_field", + output_field: "my_tokens", + }, + ], + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/8d421c5bec38eecce4679b219cacc9db.asciidoc b/docs/doc_examples/8d421c5bec38eecce4679b219cacc9db.asciidoc new file mode 100644 index 000000000..ddc9bab51 --- /dev/null +++ b/docs/doc_examples/8d421c5bec38eecce4679b219cacc9db.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "latency", + size: 0, + runtime_mappings: { + "load_time.seconds": { + type: "long", + script: { + source: "emit(doc['load_time'].value / params.timeUnit)", + params: { + timeUnit: 1000, + }, + }, + }, + }, + aggs: { + load_time_ranks: { + percentile_ranks: { + values: [500, 600], + field: "load_time.seconds", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8d4ca17349e7e82c329cdd854cc670a1.asciidoc b/docs/doc_examples/8d4ca17349e7e82c329cdd854cc670a1.asciidoc new file mode 100644 index 000000000..709910bb1 --- /dev/null +++ b/docs/doc_examples/8d4ca17349e7e82c329cdd854cc670a1.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "remote-search", +}); +console.log(response); +---- diff --git a/docs/doc_examples/8d4dda5d988d568f4f4210a6387e026f.asciidoc b/docs/doc_examples/8d4dda5d988d568f4f4210a6387e026f.asciidoc new file mode 100644 index 000000000..e4289442f --- /dev/null +++ b/docs/doc_examples/8d4dda5d988d568f4f4210a6387e026f.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.samlLogout({ + token: "46ToAxZVaXVVZTVKOVF5YU04ZFJVUDVSZlV3", + refresh_token: "mJdXLtmvTUSpoLwMvdBt_w", +}); +console.log(response); +---- diff --git a/docs/doc_examples/8d6631b622f9bfb8fa70154f6fb8b153.asciidoc b/docs/doc_examples/8d6631b622f9bfb8fa70154f6fb8b153.asciidoc new file mode 100644 index 000000000..4e6209177 --- /dev/null +++ b/docs/doc_examples/8d6631b622f9bfb8fa70154f6fb8b153.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "kibana_sample_data_ecommerce", + q: "kimchy", +}); +console.log(response); +---- diff --git a/docs/doc_examples/8d7193902a353872740a3324c60c5001.asciidoc b/docs/doc_examples/8d7193902a353872740a3324c60c5001.asciidoc new file mode 100644 index 000000000..fe8e691c7 --- /dev/null +++ b/docs/doc_examples/8d7193902a353872740a3324c60c5001.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "events", + settings: { + index: { + "sort.field": "timestamp", + "sort.order": "desc", + }, + }, + mappings: { + properties: { + timestamp: { + type: "date", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8d9b04f2a97f4229dec9e620126de049.asciidoc b/docs/doc_examples/8d9b04f2a97f4229dec9e620126de049.asciidoc new file mode 100644 index 000000000..22ae3fa2d --- /dev/null +++ b/docs/doc_examples/8d9b04f2a97f4229dec9e620126de049.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "logger.com.amazonaws.request": "DEBUG", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8db799543eb084ec71547980863d60b9.asciidoc b/docs/doc_examples/8db799543eb084ec71547980863d60b9.asciidoc new file mode 100644 index 000000000..fb3df27c8 --- /dev/null +++ b/docs/doc_examples/8db799543eb084ec71547980863d60b9.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sales_per_month: { + date_histogram: { + field: "date", + calendar_interval: "month", + }, + aggs: { + sales: { + sum: { + field: "price", + }, + }, + }, + }, + sum_monthly_sales: { + sum_bucket: { + buckets_path: "sales_per_month>sales", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8de6fed6ba2b94ce6a12ce076be2b4d7.asciidoc b/docs/doc_examples/8de6fed6ba2b94ce6a12ce076be2b4d7.asciidoc new file mode 100644 index 000000000..6ff015fdb --- /dev/null +++ b/docs/doc_examples/8de6fed6ba2b94ce6a12ce076be2b4d7.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.segments({ + v: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/8e06d8b2b737c43806018eae2ca061c1.asciidoc b/docs/doc_examples/8e06d8b2b737c43806018eae2ca061c1.asciidoc new file mode 100644 index 000000000..5c43e7f8f --- /dev/null +++ b/docs/doc_examples/8e06d8b2b737c43806018eae2ca061c1.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + size: 0, + aggs: { + message_stats: { + string_stats: { + field: "message.keyword", + missing: "[empty message]", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8e0f43829df9af20547ea6896f4c0124.asciidoc b/docs/doc_examples/8e0f43829df9af20547ea6896f4c0124.asciidoc new file mode 100644 index 000000000..738638e26 --- /dev/null +++ b/docs/doc_examples/8e0f43829df9af20547ea6896f4c0124.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "rollover_policy", + policy: { + phases: { + hot: { + actions: { + rollover: { + max_size: "50gb", + }, + }, + }, + delete: { + min_age: "1d", + actions: { + delete: {}, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8e208098a0156c4c92afe0a06960b230.asciidoc b/docs/doc_examples/8e208098a0156c4c92afe0a06960b230.asciidoc new file mode 100644 index 000000000..cf5a5c2b6 --- /dev/null +++ b/docs/doc_examples/8e208098a0156c4c92afe0a06960b230.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.samlAuthenticate({ + content: + "PHNhbWxwOlJlc3BvbnNlIHhtbG5zOnNhbWxwPSJ1cm46b2FzaXM6bmFtZXM6dGM6U0FNTDoyLjA6cHJvdG9jb2wiIHhtbG5zOnNhbWw9InVybjpvYXNpczpuYW1lczp0YzpTQU1MOjIuMD.....", + ids: ["4fee3b046395c4e751011e97f8900b5273d56685"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/8e286a205a1f84f888a6d99f2620c80e.asciidoc b/docs/doc_examples/8e286a205a1f84f888a6d99f2620c80e.asciidoc new file mode 100644 index 000000000..fab31728c --- /dev/null +++ b/docs/doc_examples/8e286a205a1f84f888a6d99f2620c80e.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "logger.org.elasticsearch.deprecation": "OFF", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8e2bbef535fef688d397e60e09aefa7f.asciidoc b/docs/doc_examples/8e2bbef535fef688d397e60e09aefa7f.asciidoc new file mode 100644 index 000000000..9a6f60e85 --- /dev/null +++ b/docs/doc_examples/8e2bbef535fef688d397e60e09aefa7f.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.stats({ + metric: "indexing,search", + level: "shards", + human: "true", + expand_wildcards: "all", +}); +console.log(response); +---- diff --git a/docs/doc_examples/8e42a17edace2bc6e42c6a1532779937.asciidoc b/docs/doc_examples/8e42a17edace2bc6e42c6a1532779937.asciidoc new file mode 100644 index 000000000..f65a76375 --- /dev/null +++ b/docs/doc_examples/8e42a17edace2bc6e42c6a1532779937.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + max_price: { + max: { + field: "price", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8e43bb5b7946143e69d397bb81d87df0.asciidoc b/docs/doc_examples/8e43bb5b7946143e69d397bb81d87df0.asciidoc new file mode 100644 index 000000000..f94192bd1 --- /dev/null +++ b/docs/doc_examples/8e43bb5b7946143e69d397bb81d87df0.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.followStats({ + index: "follower_index", +}); +console.log(response); +---- diff --git a/docs/doc_examples/8e68cdfad45e7e6dff254d931eea29d4.asciidoc b/docs/doc_examples/8e68cdfad45e7e6dff254d931eea29d4.asciidoc new file mode 100644 index 000000000..6f990c6e7 --- /dev/null +++ b/docs/doc_examples/8e68cdfad45e7e6dff254d931eea29d4.asciidoc @@ -0,0 +1,101 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "my-index-000001", + refresh: "true", + operations: [ + { + index: {}, + }, + { + "@timestamp": "2020-06-21T15:00:01-05:00", + message: + '211.11.9.0 - - [2020-06-21T15:00:01-05:00] "GET /english/index.html HTTP/1.0" 304 0', + }, + { + index: {}, + }, + { + "@timestamp": "2020-06-21T15:00:01-05:00", + message: + '211.11.9.0 - - [2020-06-21T15:00:01-05:00] "GET /english/index.html HTTP/1.0" 304 0', + }, + { + index: {}, + }, + { + "@timestamp": "2020-04-30T14:30:17-05:00", + message: + '40.135.0.0 - - [2020-04-30T14:30:17-05:00] "GET /images/hm_bg.jpg HTTP/1.0" 200 24736', + }, + { + index: {}, + }, + { + "@timestamp": "2020-04-30T14:30:53-05:00", + message: + '232.0.0.0 - - [2020-04-30T14:30:53-05:00] "GET /images/hm_bg.jpg HTTP/1.0" 200 24736', + }, + { + index: {}, + }, + { + "@timestamp": "2020-04-30T14:31:12-05:00", + message: + '26.1.0.0 - - [2020-04-30T14:31:12-05:00] "GET /images/hm_bg.jpg HTTP/1.0" 200 24736', + }, + { + index: {}, + }, + { + "@timestamp": "2020-04-30T14:31:19-05:00", + message: + '247.37.0.0 - - [2020-04-30T14:31:19-05:00] "GET /french/splash_inet.html HTTP/1.0" 200 3781', + }, + { + index: {}, + }, + { + "@timestamp": "2020-04-30T14:31:27-05:00", + message: + '252.0.0.0 - - [2020-04-30T14:31:27-05:00] "GET /images/hm_bg.jpg HTTP/1.0" 200 24736', + }, + { + index: {}, + }, + { + "@timestamp": "2020-04-30T14:31:29-05:00", + message: + '247.37.0.0 - - [2020-04-30T14:31:29-05:00] "GET /images/hm_brdl.gif HTTP/1.0" 304 0', + }, + { + index: {}, + }, + { + "@timestamp": "2020-04-30T14:31:29-05:00", + message: + '247.37.0.0 - - [2020-04-30T14:31:29-05:00] "GET /images/hm_arw.gif HTTP/1.0" 304 0', + }, + { + index: {}, + }, + { + "@timestamp": "2020-04-30T14:31:32-05:00", + message: + '247.37.0.0 - - [2020-04-30T14:31:32-05:00] "GET /images/nav_bg_top.gif HTTP/1.0" 200 929', + }, + { + index: {}, + }, + { + "@timestamp": "2020-04-30T14:31:43-05:00", + message: + '247.37.0.0 - - [2020-04-30T14:31:43-05:00] "GET /french/images/nav_venue_off.gif HTTP/1.0" 304 0', + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/8e89fee0be6a436c4e3d7c152659c47e.asciidoc b/docs/doc_examples/8e89fee0be6a436c4e3d7c152659c47e.asciidoc new file mode 100644 index 000000000..fb4577692 --- /dev/null +++ b/docs/doc_examples/8e89fee0be6a436c4e3d7c152659c47e.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.connector.updateScheduling({ + connector_id: "my-connector", + scheduling: { + access_control: { + enabled: true, + interval: "0 10 0 * * ?", + }, + full: { + enabled: true, + interval: "0 20 0 * * ?", + }, + incremental: { + enabled: false, + interval: "0 30 0 * * ?", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8e92b10ebcfedc76562ab52d0e46b916.asciidoc b/docs/doc_examples/8e92b10ebcfedc76562ab52d0e46b916.asciidoc new file mode 100644 index 000000000..7fdd02e82 --- /dev/null +++ b/docs/doc_examples/8e92b10ebcfedc76562ab52d0e46b916.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.deleteScript({ + id: "my-search-template", +}); +console.log(response); +---- diff --git a/docs/doc_examples/8e9e7dc5fad2b2b8e74ab4dc225d9c53.asciidoc b/docs/doc_examples/8e9e7dc5fad2b2b8e74ab4dc225d9c53.asciidoc new file mode 100644 index 000000000..f05ef2cfe --- /dev/null +++ b/docs/doc_examples/8e9e7dc5fad2b2b8e74ab4dc225d9c53.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.setUpgradeMode({ + enabled: "false", + timeout: "10m", +}); +console.log(response); +---- diff --git a/docs/doc_examples/8e9f7261af6264c92d0eb4d586a176f9.asciidoc b/docs/doc_examples/8e9f7261af6264c92d0eb4d586a176f9.asciidoc new file mode 100644 index 000000000..89704ff94 --- /dev/null +++ b/docs/doc_examples/8e9f7261af6264c92d0eb4d586a176f9.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "lowercase_example", + settings: { + analysis: { + analyzer: { + whitespace_lowercase: { + tokenizer: "whitespace", + filter: ["lowercase"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8eac28d2e9b6482b413d61817456a14f.asciidoc b/docs/doc_examples/8eac28d2e9b6482b413d61817456a14f.asciidoc new file mode 100644 index 000000000..ef8fbee12 --- /dev/null +++ b/docs/doc_examples/8eac28d2e9b6482b413d61817456a14f.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + genres: { + terms: { + field: "genre", + order: { + max_play_count: "desc", + }, + }, + aggs: { + max_play_count: { + max: { + field: "play_count", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8eaf4d5dd4ab1335deefa7749fdbbcc3.asciidoc b/docs/doc_examples/8eaf4d5dd4ab1335deefa7749fdbbcc3.asciidoc deleted file mode 100644 index 54bc01d70..000000000 --- a/docs/doc_examples/8eaf4d5dd4ab1335deefa7749fdbbcc3.asciidoc +++ /dev/null @@ -1,22 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - function_score: { - field_value_factor: { - field: 'likes', - factor: 1.2, - modifier: 'sqrt', - missing: 1 - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/8ecefdcf8f153cf91588e9fdde8f3e6b.asciidoc b/docs/doc_examples/8ecefdcf8f153cf91588e9fdde8f3e6b.asciidoc new file mode 100644 index 000000000..e8e466065 --- /dev/null +++ b/docs/doc_examples/8ecefdcf8f153cf91588e9fdde8f3e6b.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + query_string: { + fields: ["content", "name^5"], + query: "this AND that OR thus", + tie_breaker: 0, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8ed31628081db2b6e9106d61d1e142be.asciidoc b/docs/doc_examples/8ed31628081db2b6e9106d61d1e142be.asciidoc new file mode 100644 index 000000000..7940e4418 --- /dev/null +++ b/docs/doc_examples/8ed31628081db2b6e9106d61d1e142be.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + simple_query_string: { + query: "ny city", + auto_generate_synonyms_phrase_query: false, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8edcd80d9b545a222dcc2f25ca4c6d5f.asciidoc b/docs/doc_examples/8edcd80d9b545a222dcc2f25ca4c6d5f.asciidoc new file mode 100644 index 000000000..59801baf0 --- /dev/null +++ b/docs/doc_examples/8edcd80d9b545a222dcc2f25ca4c6d5f.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.search({ + name: "my-search-app", + params: { + query_string: + "What is the most popular brand of coffee sold in the United States?", + elser_fields: ["title", "meta_description"], + text_fields: ["title", "meta_description"], + rrf: { + rank_window_size: 50, + rank_constant: 25, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8ee9521f57661a050efb614f02b4a090.asciidoc b/docs/doc_examples/8ee9521f57661a050efb614f02b4a090.asciidoc new file mode 100644 index 000000000..e74958326 --- /dev/null +++ b/docs/doc_examples/8ee9521f57661a050efb614f02b4a090.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + genres: { + terms: { + field: "genre", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8f0511f8a5cb176ff2afdd4311799a33.asciidoc b/docs/doc_examples/8f0511f8a5cb176ff2afdd4311799a33.asciidoc deleted file mode 100644 index 15040fe9b..000000000 --- a/docs/doc_examples/8f0511f8a5cb176ff2afdd4311799a33.asciidoc +++ /dev/null @@ -1,34 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response0 = await client.index({ - index: 'twitter', - id: '1', - refresh: true, - body: { - user: 'kimchy' - } -}) -console.log(response0) - -const response1 = await client.count({ - index: 'twitter', - q: 'user:kimchy' -}) -console.log(response1) - -const response2 = await client.count({ - index: 'twitter', - body: { - query: { - term: { - user: 'kimchy' - } - } - } -}) -console.log(response2) ----- - diff --git a/docs/doc_examples/8f0c5c81cdb902c136db821947ee70a1.asciidoc b/docs/doc_examples/8f0c5c81cdb902c136db821947ee70a1.asciidoc new file mode 100644 index 000000000..6d592d602 --- /dev/null +++ b/docs/doc_examples/8f0c5c81cdb902c136db821947ee70a1.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + runtime_mappings: { + "price.adjusted": { + type: "double", + script: + "\n double price = doc['price'].value;\n if (doc['promoted'].value) {\n price *= 0.8;\n }\n emit(price);\n ", + }, + }, + aggs: { + min_price: { + min: { + field: "price.adjusted", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8f2875d976332cf5da8fb7764097a307.asciidoc b/docs/doc_examples/8f2875d976332cf5da8fb7764097a307.asciidoc new file mode 100644 index 000000000..2d5479b4c --- /dev/null +++ b/docs/doc_examples/8f2875d976332cf5da8fb7764097a307.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "template", + index_patterns: ["my-data-stream*"], + data_stream: {}, + priority: 500, + template: { + lifecycle: { + data_retention: "7d", + }, + }, + _meta: { + description: "Template with data stream lifecycle", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8f4a7f68f2ca3698abdf20026a2d8c5f.asciidoc b/docs/doc_examples/8f4a7f68f2ca3698abdf20026a2d8c5f.asciidoc new file mode 100644 index 000000000..4e03b85dd --- /dev/null +++ b/docs/doc_examples/8f4a7f68f2ca3698abdf20026a2d8c5f.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.tasks.list({ + actions: "*search", + detailed: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/8f6f7ea5abf56152b4a5639ddf40848f.asciidoc b/docs/doc_examples/8f6f7ea5abf56152b4a5639ddf40848f.asciidoc new file mode 100644 index 000000000..8d4db5856 --- /dev/null +++ b/docs/doc_examples/8f6f7ea5abf56152b4a5639ddf40848f.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "native1_users", + refresh: "true", + roles: ["user"], + rules: { + all: [ + { + field: { + "realm.name": "native1", + }, + }, + { + field: { + username: "principalname1", + }, + }, + ], + }, + enabled: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8f7936f219500305e5b2518dbbf949ea.asciidoc b/docs/doc_examples/8f7936f219500305e5b2518dbbf949ea.asciidoc new file mode 100644 index 000000000..51580581f --- /dev/null +++ b/docs/doc_examples/8f7936f219500305e5b2518dbbf949ea.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.asyncSearch.status({ + id: "FmpwbThueVB4UkRDeUxqb1l4akIza3cbWEJyeVBPQldTV3FGZGdIeUVabXBldzoyMDIw", +}); +console.log(response); +---- diff --git a/docs/doc_examples/8f9a3fcd17a111f63caa3bef6e5f00f2.asciidoc b/docs/doc_examples/8f9a3fcd17a111f63caa3bef6e5f00f2.asciidoc new file mode 100644 index 000000000..939e09227 --- /dev/null +++ b/docs/doc_examples/8f9a3fcd17a111f63caa3bef6e5f00f2.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + tags: { + terms: { + field: "tags", + execution_hint: "map", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8f9f88cf9a27c1138226efb94ac09e73.asciidoc b/docs/doc_examples/8f9f88cf9a27c1138226efb94ac09e73.asciidoc new file mode 100644 index 000000000..c3470454d --- /dev/null +++ b/docs/doc_examples/8f9f88cf9a27c1138226efb94ac09e73.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + term: { + ip_addr: "192.168.0.0/16", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8fdf2344c4fb3de6902ad7c5735270df.asciidoc b/docs/doc_examples/8fdf2344c4fb3de6902ad7c5735270df.asciidoc deleted file mode 100644 index 6bb55708a..000000000 --- a/docs/doc_examples/8fdf2344c4fb3de6902ad7c5735270df.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.get({ - index: 'twitter', - id: '0', - _source_includes: '*.id', - _source_excludes: 'entities' -}) -console.log(response) ----- - diff --git a/docs/doc_examples/8fe128323a944765f525c76d85af7a2f.asciidoc b/docs/doc_examples/8fe128323a944765f525c76d85af7a2f.asciidoc new file mode 100644 index 000000000..3d82c170f --- /dev/null +++ b/docs/doc_examples/8fe128323a944765f525c76d85af7a2f.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "kibana_sample_data_ecommerce", + size: 0, + track_total_hits: "false", + aggregations: { + sampling: { + random_sampler: { + probability: 0.1, + }, + aggs: { + price_percentiles: { + percentiles: { + field: "taxful_total_price", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/8fec06a98d0151c1d717a01491d0b8f0.asciidoc b/docs/doc_examples/8fec06a98d0151c1d717a01491d0b8f0.asciidoc new file mode 100644 index 000000000..2d7c738ce --- /dev/null +++ b/docs/doc_examples/8fec06a98d0151c1d717a01491d0b8f0.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "dsl-data-stream", + document: { + "@timestamp": "2023-10-18T16:21:15.000Z", + message: + '192.0.2.42 - - [06/May/2099:16:21:15 +0000] "GET /images/bg.jpg HTTP/1.0" 200 24736', + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/90083d93e46fad2524755b8d4d1306fc.asciidoc b/docs/doc_examples/90083d93e46fad2524755b8d4d1306fc.asciidoc new file mode 100644 index 000000000..d1c80a37d --- /dev/null +++ b/docs/doc_examples/90083d93e46fad2524755b8d4d1306fc.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_connector/_sync_job/my-connector-sync-job/_stats", + body: { + deleted_document_count: 10, + indexed_document_count: 20, + indexed_document_volume: 1000, + total_document_count: 2000, + last_seen: "2023-01-02T10:00:00Z", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/901d66919e584515717bf78ab5ca2cbb.asciidoc b/docs/doc_examples/901d66919e584515717bf78ab5ca2cbb.asciidoc new file mode 100644 index 000000000..696802ab2 --- /dev/null +++ b/docs/doc_examples/901d66919e584515717bf78ab5ca2cbb.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + range: { + date_range: { + field: "date", + time_zone: "CET", + ranges: [ + { + to: "2016/02/01", + }, + { + from: "2016/02/01", + to: "now/d", + }, + { + from: "now/d", + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/902cfd5aeec2f65b3adf55f5e38b21f0.asciidoc b/docs/doc_examples/902cfd5aeec2f65b3adf55f5e38b21f0.asciidoc new file mode 100644 index 000000000..66053e31e --- /dev/null +++ b/docs/doc_examples/902cfd5aeec2f65b3adf55f5e38b21f0.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "kibana_sample_data_ecommerce2", + document: { + user: "kimchy", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9054187cbab5c9e1c4ca2a4dba6a5db0.asciidoc b/docs/doc_examples/9054187cbab5c9e1c4ca2a4dba6a5db0.asciidoc new file mode 100644 index 000000000..da884497d --- /dev/null +++ b/docs/doc_examples/9054187cbab5c9e1c4ca2a4dba6a5db0.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.xpack.info(); +console.log(response); +---- diff --git a/docs/doc_examples/90631797c7fbda43902abf2cc0ea8304.asciidoc b/docs/doc_examples/90631797c7fbda43902abf2cc0ea8304.asciidoc new file mode 100644 index 000000000..5e3c8c70b --- /dev/null +++ b/docs/doc_examples/90631797c7fbda43902abf2cc0ea8304.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.stats({ + metric: "indices", + index_metric: "request_cache", + human: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/908326e14ad76c2ff04a9b6d8365751f.asciidoc b/docs/doc_examples/908326e14ad76c2ff04a9b6d8365751f.asciidoc new file mode 100644 index 000000000..d6ab00301 --- /dev/null +++ b/docs/doc_examples/908326e14ad76c2ff04a9b6d8365751f.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "passage_vectors", + fields: ["creation_time", "full_text"], + _source: false, + knn: { + query_vector: [0.45, 45], + field: "paragraph.vector", + k: 2, + num_candidates: 2, + inner_hits: { + _source: false, + fields: ["paragraph.text"], + size: 1, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/909a032a9c1f7095b798444705b09ad6.asciidoc b/docs/doc_examples/909a032a9c1f7095b798444705b09ad6.asciidoc new file mode 100644 index 000000000..e72223aab --- /dev/null +++ b/docs/doc_examples/909a032a9c1f7095b798444705b09ad6.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: + "GEOMETRYCOLLECTION (POINT (100.0 0.0), LINESTRING (101.0 0.0, 102.0 1.0))", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/90c087560ea6c0b7405f710971c86ef0.asciidoc b/docs/doc_examples/90c087560ea6c0b7405f710971c86ef0.asciidoc new file mode 100644 index 000000000..ed42ce39f --- /dev/null +++ b/docs/doc_examples/90c087560ea6c0b7405f710971c86ef0.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.putAutoFollowPattern({ + name: "my_auto_follow_pattern", + remote_cluster: "remote_cluster", + leader_index_patterns: ["leader_index*"], + follow_index_pattern: "{{leader_index}}-follower", + settings: { + "index.number_of_replicas": 0, + }, + max_read_request_operation_count: 1024, + max_outstanding_read_requests: 16, + max_read_request_size: "1024k", + max_write_request_operation_count: 32768, + max_write_request_size: "16k", + max_outstanding_write_requests: 8, + max_write_buffer_count: 512, + max_write_buffer_size: "512k", + max_retry_delay: "10s", + read_poll_timeout: "30s", +}); +console.log(response); +---- diff --git a/docs/doc_examples/90e06d5ec5e454832d8fbd2e73ec2248.asciidoc b/docs/doc_examples/90e06d5ec5e454832d8fbd2e73ec2248.asciidoc new file mode 100644 index 000000000..59859c078 --- /dev/null +++ b/docs/doc_examples/90e06d5ec5e454832d8fbd2e73ec2248.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.autoscaling.deleteAutoscalingPolicy({ + name: "*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/90f1f5304922fb6d097846dd1444c075.asciidoc b/docs/doc_examples/90f1f5304922fb6d097846dd1444c075.asciidoc new file mode 100644 index 000000000..9e800a478 --- /dev/null +++ b/docs/doc_examples/90f1f5304922fb6d097846dd1444c075.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.putWatch({ + id: "cluster_health_watch", + trigger: { + schedule: { + interval: "10s", + }, + }, + input: { + http: { + request: { + host: "localhost", + port: 9200, + path: "/_cluster/health", + }, + }, + }, + condition: { + compare: { + "ctx.payload.status": { + eq: "red", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9116ee8a5b00cc877291ed5559563f24.asciidoc b/docs/doc_examples/9116ee8a5b00cc877291ed5559563f24.asciidoc new file mode 100644 index 000000000..9732b73ad --- /dev/null +++ b/docs/doc_examples/9116ee8a5b00cc877291ed5559563f24.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.putWatch({ + id: "my_watch", + trigger: { + schedule: { + yearly: { + in: "february", + on: 29, + at: "noon", + }, + }, + }, + input: { + simple: { + payload: { + send: "yes", + }, + }, + }, + condition: { + always: {}, + }, + actions: { + test_index: { + throttle_period: "15m", + index: { + index: "test", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/911c56114e50ce7440eb83efc91d28b8.asciidoc b/docs/doc_examples/911c56114e50ce7440eb83efc91d28b8.asciidoc new file mode 100644 index 000000000..352dda611 --- /dev/null +++ b/docs/doc_examples/911c56114e50ce7440eb83efc91d28b8.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-data-stream", + properties: { + host: { + properties: { + ip: { + type: "ip", + ignore_malformed: true, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9120b6a49ec39a1571339fddf8e1a26f.asciidoc b/docs/doc_examples/9120b6a49ec39a1571339fddf8e1a26f.asciidoc new file mode 100644 index 000000000..00f36997e --- /dev/null +++ b/docs/doc_examples/9120b6a49ec39a1571339fddf8e1a26f.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "my-pipeline", + processors: [ + { + set: { + field: "my-long-field", + value: 10, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/91270cef57ac455547ffd47839420887.asciidoc b/docs/doc_examples/91270cef57ac455547ffd47839420887.asciidoc new file mode 100644 index 000000000..84f7381a4 --- /dev/null +++ b/docs/doc_examples/91270cef57ac455547ffd47839420887.asciidoc @@ -0,0 +1,43 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + filter_path: "aggregations", + size: 0, + aggs: { + buckets: { + composite: { + sources: [ + { + month: { + date_histogram: { + field: "date", + calendar_interval: "month", + }, + }, + }, + { + type: { + terms: { + field: "type", + }, + }, + }, + ], + }, + aggs: { + avg_price: { + rate: { + field: "price", + unit: "day", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9129dec88d35571b3166c6677297f03b.asciidoc b/docs/doc_examples/9129dec88d35571b3166c6677297f03b.asciidoc new file mode 100644 index 000000000..ac213dce2 --- /dev/null +++ b/docs/doc_examples/9129dec88d35571b3166c6677297f03b.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.getTransform({ + transform_id: "ecommerce_transform1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/913770050ebbf3b9b549a899bc11060a.asciidoc b/docs/doc_examples/913770050ebbf3b9b549a899bc11060a.asciidoc deleted file mode 100644 index 464593801..000000000 --- a/docs/doc_examples/913770050ebbf3b9b549a899bc11060a.asciidoc +++ /dev/null @@ -1,25 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'twitter', - body: { - mappings: { - properties: { - counter: { - type: 'integer', - store: false - }, - tags: { - type: 'keyword', - store: true - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/9138550002cb26ab64918cce427963b8.asciidoc b/docs/doc_examples/9138550002cb26ab64918cce427963b8.asciidoc new file mode 100644 index 000000000..41cdf5cb4 --- /dev/null +++ b/docs/doc_examples/9138550002cb26ab64918cce427963b8.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "template_1", + index_patterns: ["foo", "bar"], + priority: 0, + template: { + settings: { + number_of_shards: 1, + }, + }, + version: 123, +}); +console.log(response); +---- diff --git a/docs/doc_examples/913c163c197802078a8af72150178061.asciidoc b/docs/doc_examples/913c163c197802078a8af72150178061.asciidoc new file mode 100644 index 000000000..bfc33ec56 --- /dev/null +++ b/docs/doc_examples/913c163c197802078a8af72150178061.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sales_per_month: { + date_histogram: { + field: "date", + calendar_interval: "month", + }, + aggs: { + sales: { + sum: { + field: "price", + }, + }, + sales_deriv: { + derivative: { + buckets_path: "sales", + }, + }, + sales_2nd_deriv: { + derivative: { + buckets_path: "sales_deriv", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9143be4f137574271953a7a8107e175b.asciidoc b/docs/doc_examples/9143be4f137574271953a7a8107e175b.asciidoc new file mode 100644 index 000000000..17b21295f --- /dev/null +++ b/docs/doc_examples/9143be4f137574271953a7a8107e175b.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getUserProfile({ + uid: "u_79HkWkwmnBH5gqFKwoxggWPjEBOur1zLPXQPEl1VBW0_0", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9166cf38427d5cde5d2ec12a2012b669.asciidoc b/docs/doc_examples/9166cf38427d5cde5d2ec12a2012b669.asciidoc deleted file mode 100644 index b8fdd1d28..000000000 --- a/docs/doc_examples/9166cf38427d5cde5d2ec12a2012b669.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.putTemplate({ - name: 'template_1', - body: { - index_patterns: [ - '*' - ], - order: 0, - settings: { - number_of_shards: 1 - }, - version: 123 - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/9169d19a80175ec94f80865d0f9bef4c.asciidoc b/docs/doc_examples/9169d19a80175ec94f80865d0f9bef4c.asciidoc new file mode 100644 index 000000000..39f61a37a --- /dev/null +++ b/docs/doc_examples/9169d19a80175ec94f80865d0f9bef4c.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "restaurants", + retriever: { + rrf: { + retrievers: [ + { + standard: { + query: { + multi_match: { + query: "Austria", + fields: ["city", "region"], + }, + }, + }, + }, + { + knn: { + field: "vector", + query_vector: [10, 22, 77], + k: 10, + num_candidates: 10, + }, + }, + ], + rank_constant: 1, + rank_window_size: 50, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/91750571c195718f0ff246e058e4bc63.asciidoc b/docs/doc_examples/91750571c195718f0ff246e058e4bc63.asciidoc new file mode 100644 index 000000000..4e86f61b8 --- /dev/null +++ b/docs/doc_examples/91750571c195718f0ff246e058e4bc63.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "twitter", + query: { + match: { + title: "elasticsearch", + }, + }, + sort: [ + { + date: "asc", + }, + { + tie_breaker_id: "asc", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/91c01fcad9bf341d039a15dfc593dcd7.asciidoc b/docs/doc_examples/91c01fcad9bf341d039a15dfc593dcd7.asciidoc new file mode 100644 index 000000000..17cd435b2 --- /dev/null +++ b/docs/doc_examples/91c01fcad9bf341d039a15dfc593dcd7.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.fieldCaps({ + index: "my-index-*", + fields: "rating", + index_filter: { + range: { + "@timestamp": { + gte: "2018", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/91c925fc71abe0ddfe52457e9130363b.asciidoc b/docs/doc_examples/91c925fc71abe0ddfe52457e9130363b.asciidoc new file mode 100644 index 000000000..ad067f7f4 --- /dev/null +++ b/docs/doc_examples/91c925fc71abe0ddfe52457e9130363b.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.grantApiKey({ + grant_type: "password", + username: "test_admin", + password: "x-pack-test-password", + run_as: "test_user", + api_key: { + name: "another-api-key", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/91cbeeda86b4e4e393fc79d4e3a4a781.asciidoc b/docs/doc_examples/91cbeeda86b4e4e393fc79d4e3a4a781.asciidoc new file mode 100644 index 000000000..90c51d019 --- /dev/null +++ b/docs/doc_examples/91cbeeda86b4e4e393fc79d4e3a4a781.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "stackoverflow", + size: 0, + query: { + query_string: { + query: "tags:kibana OR tags:javascript", + }, + }, + aggs: { + low_quality_keywords: { + significant_terms: { + field: "tags", + size: 3, + exclude: ["kibana", "javascript"], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/91e106a2affbc8df32cd940684a779ed.asciidoc b/docs/doc_examples/91e106a2affbc8df32cd940684a779ed.asciidoc new file mode 100644 index 000000000..8d425841b --- /dev/null +++ b/docs/doc_examples/91e106a2affbc8df32cd940684a779ed.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putIpLocationDatabase({ + id: "my-database-1", + configuration: { + name: "GeoIP2-Domain", + maxmind: { + account_id: "1234567", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/91ed08faaed54cb5ace9a295af937439.asciidoc b/docs/doc_examples/91ed08faaed54cb5ace9a295af937439.asciidoc new file mode 100644 index 000000000..8e51a524e --- /dev/null +++ b/docs/doc_examples/91ed08faaed54cb5ace9a295af937439.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + size: 0, + runtime_mappings: { + "message.length": { + type: "long", + script: "emit(doc['message.keyword'].value.length())", + }, + }, + aggs: { + message_length: { + histogram: { + interval: 10, + field: "message.length", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9200ed8d5f798a158def4c526e41269e.asciidoc b/docs/doc_examples/9200ed8d5f798a158def4c526e41269e.asciidoc new file mode 100644 index 000000000..71d3f7824 --- /dev/null +++ b/docs/doc_examples/9200ed8d5f798a158def4c526e41269e.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.fieldCaps({ + index: "my-index-000001", + fields: "rating", +}); +console.log(response); +---- diff --git a/docs/doc_examples/92035a2a62d01a511662af65606d5fc6.asciidoc b/docs/doc_examples/92035a2a62d01a511662af65606d5fc6.asciidoc new file mode 100644 index 000000000..f5102d8e7 --- /dev/null +++ b/docs/doc_examples/92035a2a62d01a511662af65606d5fc6.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sales_per_month: { + date_histogram: { + field: "date", + calendar_interval: "month", + }, + aggs: { + bucket_truncate: { + bucket_sort: { + from: 1, + size: 1, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9216e8e544e6d193eda1f59e9160a225.asciidoc b/docs/doc_examples/9216e8e544e6d193eda1f59e9160a225.asciidoc new file mode 100644 index 000000000..e28ae08d9 --- /dev/null +++ b/docs/doc_examples/9216e8e544e6d193eda1f59e9160a225.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + span_near: { + clauses: [ + { + span_term: { + field: "value1", + }, + }, + { + span_term: { + field: "value2", + }, + }, + { + span_term: { + field: "value3", + }, + }, + ], + slop: 12, + in_order: false, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/922529276f87cb9d116be2468d108466.asciidoc b/docs/doc_examples/922529276f87cb9d116be2468d108466.asciidoc new file mode 100644 index 000000000..1a1b6c4c6 --- /dev/null +++ b/docs/doc_examples/922529276f87cb9d116be2468d108466.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + default: { + type: "simple", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9225841fdcddaf83ebdb90c2b0399e20.asciidoc b/docs/doc_examples/9225841fdcddaf83ebdb90c2b0399e20.asciidoc new file mode 100644 index 000000000..b3d7c123d --- /dev/null +++ b/docs/doc_examples/9225841fdcddaf83ebdb90c2b0399e20.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.getTrainedModelsStats(); +console.log(response); +---- diff --git a/docs/doc_examples/92284d24bbb80ce6943f2ddcbf74b833.asciidoc b/docs/doc_examples/92284d24bbb80ce6943f2ddcbf74b833.asciidoc new file mode 100644 index 000000000..dd0d0fa0a --- /dev/null +++ b/docs/doc_examples/92284d24bbb80ce6943f2ddcbf74b833.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + flattened_field: { + type: "flattened", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + refresh: "true", + document: { + flattened_field: { + subfield: "value", + }, + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + fields: ["flattened_field.subfield"], + _source: false, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/923aee95078219ee6eb321a252e1121b.asciidoc b/docs/doc_examples/923aee95078219ee6eb321a252e1121b.asciidoc new file mode 100644 index 000000000..93158444f --- /dev/null +++ b/docs/doc_examples/923aee95078219ee6eb321a252e1121b.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "ngram_example", + settings: { + analysis: { + analyzer: { + standard_ngram: { + tokenizer: "standard", + filter: ["ngram"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9250ac57ec81d5192e8ad4c462438489.asciidoc b/docs/doc_examples/9250ac57ec81d5192e8ad4c462438489.asciidoc new file mode 100644 index 000000000..8a6e40755 --- /dev/null +++ b/docs/doc_examples/9250ac57ec81d5192e8ad4c462438489.asciidoc @@ -0,0 +1,42 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "jinaai-index", + operations: [ + { + index: { + _index: "jinaai-index", + _id: "1", + }, + }, + { + content: + "Sarah Johnson is a talented marine biologist working at the Oceanographic Institute. Her groundbreaking research on coral reef ecosystems has garnered international attention and numerous accolades.", + }, + { + index: { + _index: "jinaai-index", + _id: "2", + }, + }, + { + content: + "She spends months at a time diving in remote locations, meticulously documenting the intricate relationships between various marine species. ", + }, + { + index: { + _index: "jinaai-index", + _id: "3", + }, + }, + { + content: + "Her dedication to preserving these delicate underwater environments has inspired a new generation of conservationists.", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/926c0134aeaad53bd0f3bdad9c430217.asciidoc b/docs/doc_examples/926c0134aeaad53bd0f3bdad9c430217.asciidoc new file mode 100644 index 000000000..d680c008d --- /dev/null +++ b/docs/doc_examples/926c0134aeaad53bd0f3bdad9c430217.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + refresh: "true", + document: { + text: "words words", + flag: "foo", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9270964d35d172ea5b193c5fc7a473dd.asciidoc b/docs/doc_examples/9270964d35d172ea5b193c5fc7a473dd.asciidoc new file mode 100644 index 000000000..aba08bf33 --- /dev/null +++ b/docs/doc_examples/9270964d35d172ea5b193c5fc7a473dd.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.templates({ + name: "my-template-*", + v: "true", + s: "name", +}); +console.log(response); +---- diff --git a/docs/doc_examples/927b20a221f975b75d1227b67d0eb7e2.asciidoc b/docs/doc_examples/927b20a221f975b75d1227b67d0eb7e2.asciidoc new file mode 100644 index 000000000..446a2b57b --- /dev/null +++ b/docs/doc_examples/927b20a221f975b75d1227b67d0eb7e2.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.esql.query({ + query: + '\n FROM library\n | EVAL year = DATE_EXTRACT("year", release_date)\n | WHERE page_count > ? AND author == ?\n | STATS count = COUNT(*) by year\n | WHERE count > ?\n | LIMIT 5\n ', + params: [300, "Frank Herbert", 0], +}); +console.log(response); +---- diff --git a/docs/doc_examples/9298aaf8232a819e79b3bf8471245e98.asciidoc b/docs/doc_examples/9298aaf8232a819e79b3bf8471245e98.asciidoc new file mode 100644 index 000000000..df7ed68c4 --- /dev/null +++ b/docs/doc_examples/9298aaf8232a819e79b3bf8471245e98.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.getJobStats({ + job_id: "low_request_rate", +}); +console.log(response); +---- diff --git a/docs/doc_examples/92d0c12d53a900308150d572c3f2f82f.asciidoc b/docs/doc_examples/92d0c12d53a900308150d572c3f2f82f.asciidoc new file mode 100644 index 000000000..975c72f37 --- /dev/null +++ b/docs/doc_examples/92d0c12d53a900308150d572c3f2f82f.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + dynamic_templates: [ + { + strings_as_keywords: { + match_mapping_type: "string", + mapping: { + type: "keyword", + }, + }, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/92d343eb755971c44a939d0660bf5ac2.asciidoc b/docs/doc_examples/92d343eb755971c44a939d0660bf5ac2.asciidoc new file mode 100644 index 000000000..3680cbd11 --- /dev/null +++ b/docs/doc_examples/92d343eb755971c44a939d0660bf5ac2.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "test", + id: 1, + refresh: "true", + document: { + test: "test", + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "test", + id: 2, + refresh: "true", + document: { + test: "test", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/92f073762634a4b2274f71002494192e.asciidoc b/docs/doc_examples/92f073762634a4b2274f71002494192e.asciidoc new file mode 100644 index 000000000..5f45e9c3b --- /dev/null +++ b/docs/doc_examples/92f073762634a4b2274f71002494192e.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.state({ + filter_path: "metadata.cluster_coordination.voting_config_exclusions", +}); +console.log(response); +---- diff --git a/docs/doc_examples/92fa6608673cec5a2ed568a07e80d36b.asciidoc b/docs/doc_examples/92fa6608673cec5a2ed568a07e80d36b.asciidoc new file mode 100644 index 000000000..6f5ec0f73 --- /dev/null +++ b/docs/doc_examples/92fa6608673cec5a2ed568a07e80d36b.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + range: { + timestamp: { + gte: "2020-04-30T14:31:27-05:00", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/92fe53019958ba466d1272da0834cf53.asciidoc b/docs/doc_examples/92fe53019958ba466d1272da0834cf53.asciidoc new file mode 100644 index 000000000..0d9bdfb80 --- /dev/null +++ b/docs/doc_examples/92fe53019958ba466d1272da0834cf53.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.stats({ + index: "my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/930a3c5667e3bf47b4e8cc28e7bf8d5f.asciidoc b/docs/doc_examples/930a3c5667e3bf47b4e8cc28e7bf8d5f.asciidoc new file mode 100644 index 000000000..7091b8d7b --- /dev/null +++ b/docs/doc_examples/930a3c5667e3bf47b4e8cc28e7bf8d5f.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "my_admin_role", + refresh: "true", + cluster: ["manage"], + indices: [ + { + names: ["index1", "index2"], + privileges: ["manage"], + }, + ], + applications: [ + { + application: "myapp", + privileges: ["admin", "read"], + resources: ["*"], + }, + ], + run_as: ["analyst_user"], + metadata: { + version: 1, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/930ba37af73dd5ff0342ecfe6c60a4e9.asciidoc b/docs/doc_examples/930ba37af73dd5ff0342ecfe6c60a4e9.asciidoc new file mode 100644 index 000000000..1f6f7cebc --- /dev/null +++ b/docs/doc_examples/930ba37af73dd5ff0342ecfe6c60a4e9.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "exams", + size: 0, + aggs: { + grades_stats: { + extended_stats: { + field: "grade", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9313f534e1aa266cde7d4af74665497f.asciidoc b/docs/doc_examples/9313f534e1aa266cde7d4af74665497f.asciidoc new file mode 100644 index 000000000..86e737ce5 --- /dev/null +++ b/docs/doc_examples/9313f534e1aa266cde7d4af74665497f.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.connector.put({ + connector_id: "my-{service-name-stub}-connector", + index_name: "my-elasticsearch-index", + name: "Content synced from {service-name}", + service_type: "{service-name-stub}", +}); +console.log(response); +---- diff --git a/docs/doc_examples/931817b168e055ecf738785c721125dd.asciidoc b/docs/doc_examples/931817b168e055ecf738785c721125dd.asciidoc new file mode 100644 index 000000000..3d34dce97 --- /dev/null +++ b/docs/doc_examples/931817b168e055ecf738785c721125dd.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "query_helper_pipeline", + processors: [ + { + script: { + source: + "ctx.prompt = 'Please generate an elasticsearch search query on index `articles_index` for the following natural language query. Dates are in the field `@timestamp`, document types are in the field `type` (options are `news`, `publication`), categories in the field `category` and can be multiple (options are `medicine`, `pharmaceuticals`, `technology`), and document names are in the field `title` which should use a fuzzy match. Ignore fields which cannot be determined from the natural language query context: ' + ctx.content", + }, + }, + { + inference: { + model_id: "openai_chat_completions", + input_output: { + input_field: "prompt", + output_field: "query", + }, + }, + }, + { + remove: { + field: "prompt", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/931da02a06953a768f4ad3fecfd7b2df.asciidoc b/docs/doc_examples/931da02a06953a768f4ad3fecfd7b2df.asciidoc new file mode 100644 index 000000000..1401e6fff --- /dev/null +++ b/docs/doc_examples/931da02a06953a768f4ad3fecfd7b2df.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getSettings({ + index: "my-index-000001", + name: "index.routing.allocation.total_shards_per_node", + flat_settings: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9326e323f7ffde678fa04d2d1de3d3bc.asciidoc b/docs/doc_examples/9326e323f7ffde678fa04d2d1de3d3bc.asciidoc new file mode 100644 index 000000000..8ca628047 --- /dev/null +++ b/docs/doc_examples/9326e323f7ffde678fa04d2d1de3d3bc.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "alibabacloud-ai-search-embeddings", + knn: { + field: "content_embedding", + query_vector_builder: { + text_embedding: { + model_id: "alibabacloud_ai_search_embeddings", + model_text: "Calculate fuel cost", + }, + }, + k: 10, + num_candidates: 100, + }, + _source: ["id", "content"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/9334ccd09548b585cd637d7c66c5ae65.asciidoc b/docs/doc_examples/9334ccd09548b585cd637d7c66c5ae65.asciidoc new file mode 100644 index 000000000..914fa5b5b --- /dev/null +++ b/docs/doc_examples/9334ccd09548b585cd637d7c66c5ae65.asciidoc @@ -0,0 +1,49 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match: { + message: { + operator: "or", + query: "the quick brown", + }, + }, + }, + rescore: [ + { + window_size: 100, + query: { + rescore_query: { + match_phrase: { + message: { + query: "the quick brown", + slop: 2, + }, + }, + }, + query_weight: 0.7, + rescore_query_weight: 1.2, + }, + }, + { + window_size: 10, + query: { + score_mode: "multiply", + rescore_query: { + function_score: { + script_score: { + script: { + source: "Math.log10(doc.count.value + 2)", + }, + }, + }, + }, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/93429d2bfbc0a9b7a4854b27e34658cf.asciidoc b/docs/doc_examples/93429d2bfbc0a9b7a4854b27e34658cf.asciidoc new file mode 100644 index 000000000..90418dd1a --- /dev/null +++ b/docs/doc_examples/93429d2bfbc0a9b7a4854b27e34658cf.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + message: { + type: "text", + }, + query: { + type: "percolator", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/93444b445446c1a6033347d6267253d6.asciidoc b/docs/doc_examples/93444b445446c1a6033347d6267253d6.asciidoc new file mode 100644 index 000000000..c06ee69df --- /dev/null +++ b/docs/doc_examples/93444b445446c1a6033347d6267253d6.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match_phrase_prefix: { + message: { + query: "quick brown f", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/934aa38c3adcc4cf74ea40cd8736876c.asciidoc b/docs/doc_examples/934aa38c3adcc4cf74ea40cd8736876c.asciidoc new file mode 100644 index 000000000..929aab1a1 --- /dev/null +++ b/docs/doc_examples/934aa38c3adcc4cf74ea40cd8736876c.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "test", + settings: { + number_of_shards: 1, + }, + mappings: { + properties: { + field1: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/934ced0998552cc95a28e48554147e8b.asciidoc b/docs/doc_examples/934ced0998552cc95a28e48554147e8b.asciidoc new file mode 100644 index 000000000..7b219da91 --- /dev/null +++ b/docs/doc_examples/934ced0998552cc95a28e48554147e8b.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.allocationExplain({ + index: "my-index", + shard: 0, + primary: false, + current_node: "my-node", +}); +console.log(response); +---- diff --git a/docs/doc_examples/935566d5426d44ade486a49ec5289741.asciidoc b/docs/doc_examples/935566d5426d44ade486a49ec5289741.asciidoc new file mode 100644 index 000000000..197ace01b --- /dev/null +++ b/docs/doc_examples/935566d5426d44ade486a49ec5289741.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + wait_for_completion: "false", + source: { + index: "test-data", + size: 10, + }, + dest: { + index: "semantic-embeddings", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/935ee7c1b86ba9592604834bb673c7a3.asciidoc b/docs/doc_examples/935ee7c1b86ba9592604834bb673c7a3.asciidoc new file mode 100644 index 000000000..c18b0521e --- /dev/null +++ b/docs/doc_examples/935ee7c1b86ba9592604834bb673c7a3.asciidoc @@ -0,0 +1,93 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "museums", + mappings: { + properties: { + location: { + type: "geo_point", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "museums", + refresh: "true", + operations: [ + { + index: { + _id: 1, + }, + }, + { + location: "POINT (4.912350 52.374081)", + name: "NEMO Science Museum", + }, + { + index: { + _id: 2, + }, + }, + { + location: "POINT (4.901618 52.369219)", + name: "Museum Het Rembrandthuis", + }, + { + index: { + _id: 3, + }, + }, + { + location: "POINT (4.914722 52.371667)", + name: "Nederlands Scheepvaartmuseum", + }, + { + index: { + _id: 4, + }, + }, + { + location: "POINT (4.405200 51.222900)", + name: "Letterenhuis", + }, + { + index: { + _id: 5, + }, + }, + { + location: "POINT (2.336389 48.861111)", + name: "Musée du Louvre", + }, + { + index: { + _id: 6, + }, + }, + { + location: "POINT (2.327000 48.860000)", + name: "Musée d'Orsay", + }, + ], +}); +console.log(response1); + +const response2 = await client.search({ + index: "museums", + size: 0, + aggregations: { + "large-grid": { + geotile_grid: { + field: "location", + precision: 8, + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/936d809c848f8b77d5b55f57f0aab89a.asciidoc b/docs/doc_examples/936d809c848f8b77d5b55f57f0aab89a.asciidoc new file mode 100644 index 000000000..d05a21ad8 --- /dev/null +++ b/docs/doc_examples/936d809c848f8b77d5b55f57f0aab89a.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + date_detection: false, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + create_date: "2015/09/02", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/937089157fc82cf08b68a954d0e6d52c.asciidoc b/docs/doc_examples/937089157fc82cf08b68a954d0e6d52c.asciidoc new file mode 100644 index 000000000..e8709d3b8 --- /dev/null +++ b/docs/doc_examples/937089157fc82cf08b68a954d0e6d52c.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-data-stream", + query: + '\n sequence with maxspan=1h\n [ process where process.name == "regsvr32.exe" ]\n [ file where stringContains(file.name, "scrobj.dll") ]\n ', +}); +console.log(response); +---- diff --git a/docs/doc_examples/9370e4935ab6678571d3227973b8c830.asciidoc b/docs/doc_examples/9370e4935ab6678571d3227973b8c830.asciidoc new file mode 100644 index 000000000..22384fd94 --- /dev/null +++ b/docs/doc_examples/9370e4935ab6678571d3227973b8c830.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.get({ + index: "_all", + filter_path: "*.aliases", +}); +console.log(response); +---- diff --git a/docs/doc_examples/937ffc65cbb20505a8aba25b37a796a5.asciidoc b/docs/doc_examples/937ffc65cbb20505a8aba25b37a796a5.asciidoc new file mode 100644 index 000000000..a0bfc9691 --- /dev/null +++ b/docs/doc_examples/937ffc65cbb20505a8aba25b37a796a5.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + is_published: { + type: "boolean", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + refresh: "true", + document: { + is_published: "true", + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + query: { + term: { + is_published: true, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/9382f022086c692ba05efb0acae65946.asciidoc b/docs/doc_examples/9382f022086c692ba05efb0acae65946.asciidoc new file mode 100644 index 000000000..5b5333497 --- /dev/null +++ b/docs/doc_examples/9382f022086c692ba05efb0acae65946.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "idx", + id: 1, + document: { + foo: [ + { + bar: 1, + }, + { + bar: 2, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9399cbbd133ec2b7aad2820fa617ae3a.asciidoc b/docs/doc_examples/9399cbbd133ec2b7aad2820fa617ae3a.asciidoc new file mode 100644 index 000000000..c0d1061cf --- /dev/null +++ b/docs/doc_examples/9399cbbd133ec2b7aad2820fa617ae3a.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "child_example", + mappings: { + properties: { + join: { + type: "join", + relations: { + question: "answer", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/93bd651aff81daa2b86f9f2089e6d088.asciidoc b/docs/doc_examples/93bd651aff81daa2b86f9f2089e6d088.asciidoc new file mode 100644 index 000000000..ca7e7083d --- /dev/null +++ b/docs/doc_examples/93bd651aff81daa2b86f9f2089e6d088.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + refresh: "true", + document: { + my_id: "1", + text: "This is a question", + my_join_field: { + name: "question", + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 2, + refresh: "true", + document: { + my_id: "2", + text: "This is another question", + my_join_field: { + name: "question", + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/93cd0fdd5ca22838db06aa1cabdbe8bd.asciidoc b/docs/doc_examples/93cd0fdd5ca22838db06aa1cabdbe8bd.asciidoc new file mode 100644 index 000000000..bb5e1dd88 --- /dev/null +++ b/docs/doc_examples/93cd0fdd5ca22838db06aa1cabdbe8bd.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "hugging-face-embeddings", + knn: { + field: "content_embedding", + query_vector_builder: { + text_embedding: { + model_id: "hugging_face_embeddings", + model_text: "What's margin of error?", + }, + }, + k: 10, + num_candidates: 100, + }, + _source: ["id", "content"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/93d7ba4130722cae04f9690e52a8f54f.asciidoc b/docs/doc_examples/93d7ba4130722cae04f9690e52a8f54f.asciidoc new file mode 100644 index 000000000..94645c842 --- /dev/null +++ b/docs/doc_examples/93d7ba4130722cae04f9690e52a8f54f.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: { + type: "envelope", + coordinates: [ + [100, 1], + [101, 0], + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/93f1bdd72e79827dcf9a34efa02fd977.asciidoc b/docs/doc_examples/93f1bdd72e79827dcf9a34efa02fd977.asciidoc deleted file mode 100644 index f01780a88..000000000 --- a/docs/doc_examples/93f1bdd72e79827dcf9a34efa02fd977.asciidoc +++ /dev/null @@ -1,22 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - aggs: { - genres: { - terms: { - field: 'genre', - order: { - _key: 'asc' - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/93fb59d3204f37af952198b331fb6bb7.asciidoc b/docs/doc_examples/93fb59d3204f37af952198b331fb6bb7.asciidoc new file mode 100644 index 000000000..71a90d24f --- /dev/null +++ b/docs/doc_examples/93fb59d3204f37af952198b331fb6bb7.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.tasks.get({ + task_id: "oTUltX4IQMOUUVeiohTt8A:12345", + wait_for_completion: "true", + timeout: "10s", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9403764e6eccad7b321b65e9a10c5727.asciidoc b/docs/doc_examples/9403764e6eccad7b321b65e9a10c5727.asciidoc new file mode 100644 index 000000000..aabf664ea --- /dev/null +++ b/docs/doc_examples/9403764e6eccad7b321b65e9a10c5727.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + tags: { + terms: { + field: "tags", + include: ".*sport.*", + exclude: "water_.*", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/940e8c2c7ff92d71f489bdb7183c1ce6.asciidoc b/docs/doc_examples/940e8c2c7ff92d71f489bdb7183c1ce6.asciidoc new file mode 100644 index 000000000..c96246fe5 --- /dev/null +++ b/docs/doc_examples/940e8c2c7ff92d71f489bdb7183c1ce6.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.segments({ + index: "test", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9410af79177dd1df9b7b16229a581e18.asciidoc b/docs/doc_examples/9410af79177dd1df9b7b16229a581e18.asciidoc new file mode 100644 index 000000000..41587cc87 --- /dev/null +++ b/docs/doc_examples/9410af79177dd1df9b7b16229a581e18.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.changePassword({ + username: "jacknich", + password: "new-test-password", +}); +console.log(response); +---- diff --git a/docs/doc_examples/941c8d05486200e835d97642e4ee05d5.asciidoc b/docs/doc_examples/941c8d05486200e835d97642e4ee05d5.asciidoc new file mode 100644 index 000000000..a185efe9b --- /dev/null +++ b/docs/doc_examples/941c8d05486200e835d97642e4ee05d5.asciidoc @@ -0,0 +1,40 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + text: { + type: "text", + term_vector: "with_positions_offsets_payloads", + store: true, + analyzer: "fulltext_analyzer", + }, + fullname: { + type: "text", + term_vector: "with_positions_offsets_payloads", + analyzer: "fulltext_analyzer", + }, + }, + }, + settings: { + index: { + number_of_shards: 1, + number_of_replicas: 0, + }, + analysis: { + analyzer: { + fulltext_analyzer: { + type: "custom", + tokenizer: "whitespace", + filter: ["lowercase", "type_as_payload"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/94246f45025ed394cd6415ed8d7a0588.asciidoc b/docs/doc_examples/94246f45025ed394cd6415ed8d7a0588.asciidoc new file mode 100644 index 000000000..44902fb96 --- /dev/null +++ b/docs/doc_examples/94246f45025ed394cd6415ed8d7a0588.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.rollup.deleteJob({ + id: "sensor", +}); +console.log(response); +---- diff --git a/docs/doc_examples/944806221eb89f5af2298ccdf2902277.asciidoc b/docs/doc_examples/944806221eb89f5af2298ccdf2902277.asciidoc new file mode 100644 index 000000000..3770b06fb --- /dev/null +++ b/docs/doc_examples/944806221eb89f5af2298ccdf2902277.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.rollup.getRollupCaps({ + id: "_all", +}); +console.log(response); +---- diff --git a/docs/doc_examples/944a2dc22dae2a8503299926326a9c18.asciidoc b/docs/doc_examples/944a2dc22dae2a8503299926326a9c18.asciidoc new file mode 100644 index 000000000..24ff68e5a --- /dev/null +++ b/docs/doc_examples/944a2dc22dae2a8503299926326a9c18.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + ip_addr: { + type: "ip", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + ip_addr: "192.168.1.1", + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + query: { + term: { + ip_addr: "192.168.0.0/16", + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/946522c26d02bebf5c527ba28e55c724.asciidoc b/docs/doc_examples/946522c26d02bebf5c527ba28e55c724.asciidoc new file mode 100644 index 000000000..69e043fad --- /dev/null +++ b/docs/doc_examples/946522c26d02bebf5c527ba28e55c724.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.updateByQuery({ + index: "my-index-000001", + routing: 1, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9467e52087a13b63b02d78c35ff6f798.asciidoc b/docs/doc_examples/9467e52087a13b63b02d78c35ff6f798.asciidoc new file mode 100644 index 000000000..9c2ef9bd8 --- /dev/null +++ b/docs/doc_examples/9467e52087a13b63b02d78c35ff6f798.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match_phrase: { + message: "this is a test", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/947efe87db7f8813c0878f8affc3e2d1.asciidoc b/docs/doc_examples/947efe87db7f8813c0878f8affc3e2d1.asciidoc new file mode 100644 index 000000000..1e8f4bd3e --- /dev/null +++ b/docs/doc_examples/947efe87db7f8813c0878f8affc3e2d1.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.resolveCluster(); +console.log(response); +---- diff --git a/docs/doc_examples/948418e0ef1b7e7cfee2f11be715d7d2.asciidoc b/docs/doc_examples/948418e0ef1b7e7cfee2f11be715d7d2.asciidoc new file mode 100644 index 000000000..5a4bb2510 --- /dev/null +++ b/docs/doc_examples/948418e0ef1b7e7cfee2f11be715d7d2.asciidoc @@ -0,0 +1,89 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "retrievers_example_nested", + settings: { + number_of_shards: 1, + }, + mappings: { + properties: { + nested_field: { + type: "nested", + properties: { + paragraph_id: { + type: "keyword", + }, + nested_vector: { + type: "dense_vector", + dims: 3, + similarity: "l2_norm", + index: true, + index_options: { + type: "flat", + }, + }, + }, + }, + topic: { + type: "keyword", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "retrievers_example_nested", + id: 1, + document: { + nested_field: [ + { + paragraph_id: "1a", + nested_vector: [-1.12, -0.59, 0.78], + }, + { + paragraph_id: "1b", + nested_vector: [-0.12, 1.56, 0.42], + }, + { + paragraph_id: "1c", + nested_vector: [1, -1, 0], + }, + ], + topic: ["ai"], + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "retrievers_example_nested", + id: 2, + document: { + nested_field: [ + { + paragraph_id: "2a", + nested_vector: [0.23, 1.24, 0.65], + }, + ], + topic: ["information_retrieval"], + }, +}); +console.log(response2); + +const response3 = await client.index({ + index: "retrievers_example_nested", + id: 3, + document: { + topic: ["ai"], + }, +}); +console.log(response3); + +const response4 = await client.indices.refresh({ + index: "retrievers_example_nested", +}); +console.log(response4); +---- diff --git a/docs/doc_examples/94cd66bf93f99881c1bda547283a0357.asciidoc b/docs/doc_examples/94cd66bf93f99881c1bda547283a0357.asciidoc new file mode 100644 index 000000000..4007f1b3d --- /dev/null +++ b/docs/doc_examples/94cd66bf93f99881c1bda547283a0357.asciidoc @@ -0,0 +1,40 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "quantized-image-index", + refresh: "true", + operations: [ + { + index: { + _id: "1", + }, + }, + { + "image-vector": [0.1, -2], + title: "moose family", + }, + { + index: { + _id: "2", + }, + }, + { + "image-vector": [0.75, -1], + title: "alpine lake", + }, + { + index: { + _id: "3", + }, + }, + { + "image-vector": [1.2, 0.1], + title: "full moon", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/9501e6c8e95c21838653ea15b9b7ed5f.asciidoc b/docs/doc_examples/9501e6c8e95c21838653ea15b9b7ed5f.asciidoc new file mode 100644 index 000000000..bc6ad4ec1 --- /dev/null +++ b/docs/doc_examples/9501e6c8e95c21838653ea15b9b7ed5f.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + term: { + "query.extraction_result": "failed", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/950f1230536422567f99a205ff4165ec.asciidoc b/docs/doc_examples/950f1230536422567f99a205ff4165ec.asciidoc new file mode 100644 index 000000000..ff8b26755 --- /dev/null +++ b/docs/doc_examples/950f1230536422567f99a205ff4165ec.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.rollover({ + alias: "my-write-alias", + conditions: { + max_age: "7d", + max_docs: 1000, + max_primary_shard_size: "50gb", + max_primary_shard_docs: "2000", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/95414139c7b1203e3c2d99a354415801.asciidoc b/docs/doc_examples/95414139c7b1203e3c2d99a354415801.asciidoc new file mode 100644 index 000000000..dd746d18a --- /dev/null +++ b/docs/doc_examples/95414139c7b1203e3c2d99a354415801.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.recovery({ + v: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9559de0c2190f99fcc344887fc7b232a.asciidoc b/docs/doc_examples/9559de0c2190f99fcc344887fc7b232a.asciidoc new file mode 100644 index 000000000..4b9fedd1f --- /dev/null +++ b/docs/doc_examples/9559de0c2190f99fcc344887fc7b232a.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "bicycles", + mappings: { + properties: { + cycle_type: { + type: "constant_keyword", + value: "bicycle", + }, + name: { + type: "text", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.create({ + index: "other_cycles", + mappings: { + properties: { + cycle_type: { + type: "keyword", + }, + name: { + type: "text", + }, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/956cb470258024af964cd2dabbaf7c7c.asciidoc b/docs/doc_examples/956cb470258024af964cd2dabbaf7c7c.asciidoc new file mode 100644 index 000000000..f70c4bed1 --- /dev/null +++ b/docs/doc_examples/956cb470258024af964cd2dabbaf7c7c.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "my-index", + settings: { + "index.routing.allocation.require.data": null, + "index.routing.allocation.include._tier_preference": "data_warm,data_hot", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/957d2e6ddbb9a9b16549c5e67b93b41b.asciidoc b/docs/doc_examples/957d2e6ddbb9a9b16549c5e67b93b41b.asciidoc new file mode 100644 index 000000000..53a2b96c5 --- /dev/null +++ b/docs/doc_examples/957d2e6ddbb9a9b16549c5e67b93b41b.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + query_string: { + fields: ["content", "name"], + query: "this AND that", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9584b042223982e0bfde8d12d42c9705.asciidoc b/docs/doc_examples/9584b042223982e0bfde8d12d42c9705.asciidoc new file mode 100644 index 000000000..9583fffb8 --- /dev/null +++ b/docs/doc_examples/9584b042223982e0bfde8d12d42c9705.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "kerbrolemapping", + roles: ["monitoring_user"], + enabled: true, + rules: { + field: { + username: "user@REALM", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/95b3f53f2065737bbeba6199e8a12df3.asciidoc b/docs/doc_examples/95b3f53f2065737bbeba6199e8a12df3.asciidoc new file mode 100644 index 000000000..de34c8968 --- /dev/null +++ b/docs/doc_examples/95b3f53f2065737bbeba6199e8a12df3.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + document: { + color: ["blue", "green"], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/95c03bdef4faf6bef039c986f4cb3aba.asciidoc b/docs/doc_examples/95c03bdef4faf6bef039c986f4cb3aba.asciidoc new file mode 100644 index 000000000..aab4afc2b --- /dev/null +++ b/docs/doc_examples/95c03bdef4faf6bef039c986f4cb3aba.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: ".watcher-history*", + pretty: "true", + query: { + match: { + "result.condition.met": true, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/95c1b376652533c352bbf793c74d1b08.asciidoc b/docs/doc_examples/95c1b376652533c352bbf793c74d1b08.asciidoc new file mode 100644 index 000000000..4dcac513f --- /dev/null +++ b/docs/doc_examples/95c1b376652533c352bbf793c74d1b08.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.queryRole({ + query: { + match: { + description: { + query: "user access", + }, + }, + }, + size: 1, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9606c271921cb800d5ea395b16d6ceaf.asciidoc b/docs/doc_examples/9606c271921cb800d5ea395b16d6ceaf.asciidoc new file mode 100644 index 000000000..226313ea3 --- /dev/null +++ b/docs/doc_examples/9606c271921cb800d5ea395b16d6ceaf.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "galician_example", + settings: { + analysis: { + filter: { + galician_stop: { + type: "stop", + stopwords: "_galician_", + }, + galician_keywords: { + type: "keyword_marker", + keywords: ["exemplo"], + }, + galician_stemmer: { + type: "stemmer", + language: "galician", + }, + }, + analyzer: { + rebuilt_galician: { + tokenizer: "standard", + filter: [ + "lowercase", + "galician_stop", + "galician_keywords", + "galician_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9608820dbeac261ba53fb89bb9400560.asciidoc b/docs/doc_examples/9608820dbeac261ba53fb89bb9400560.asciidoc new file mode 100644 index 000000000..116032d11 --- /dev/null +++ b/docs/doc_examples/9608820dbeac261ba53fb89bb9400560.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getApiKey({ + owner: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/962e6187bbd71c5749376efed04b65ba.asciidoc b/docs/doc_examples/962e6187bbd71c5749376efed04b65ba.asciidoc new file mode 100644 index 000000000..eb3734014 --- /dev/null +++ b/docs/doc_examples/962e6187bbd71c5749376efed04b65ba.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "test_role6", + indices: [ + { + names: ["*"], + privileges: ["read"], + field_security: { + except: ["customer.handle"], + grant: ["customer.*"], + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/966ff3a4c5b61ed1a36d44c17ce06157.asciidoc b/docs/doc_examples/966ff3a4c5b61ed1a36d44c17ce06157.asciidoc new file mode 100644 index 000000000..bcb11e41e --- /dev/null +++ b/docs/doc_examples/966ff3a4c5b61ed1a36d44c17ce06157.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "index", + settings: { + analysis: { + char_filter: { + quote: { + type: "mapping", + mappings: ['« => "', '» => "'], + }, + }, + normalizer: { + my_normalizer: { + type: "custom", + char_filter: ["quote"], + filter: ["lowercase", "asciifolding"], + }, + }, + }, + }, + mappings: { + properties: { + foo: { + type: "keyword", + normalizer: "my_normalizer", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9684e5fa8c22a07a372feb6fc1f5f7c0.asciidoc b/docs/doc_examples/9684e5fa8c22a07a372feb6fc1f5f7c0.asciidoc new file mode 100644 index 000000000..20185078b --- /dev/null +++ b/docs/doc_examples/9684e5fa8c22a07a372feb6fc1f5f7c0.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.hasPrivileges({ + cluster: ["monitor", "manage"], + index: [ + { + names: ["suppliers", "products"], + privileges: ["read"], + }, + { + names: ["inventory"], + privileges: ["read", "write"], + }, + ], + application: [ + { + application: "inventory_manager", + privileges: ["read", "data:write/inventory"], + resources: ["product/1852563"], + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/968fb5b92aa65af09544f7c002b0953e.asciidoc b/docs/doc_examples/968fb5b92aa65af09544f7c002b0953e.asciidoc new file mode 100644 index 000000000..410d4fc38 --- /dev/null +++ b/docs/doc_examples/968fb5b92aa65af09544f7c002b0953e.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "semantic-embeddings", + query: { + semantic: { + field: "content", + query: "How to avoid muscle soreness while running?", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/96b9289c3c4c6b135ab3386562c4ee8d.asciidoc b/docs/doc_examples/96b9289c3c4c6b135ab3386562c4ee8d.asciidoc new file mode 100644 index 000000000..1be0187a6 --- /dev/null +++ b/docs/doc_examples/96b9289c3c4c6b135ab3386562c4ee8d.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "cluster.max_shards_per_node": 1200, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/96de5703ba0bd43fd4ac239ec5408542.asciidoc b/docs/doc_examples/96de5703ba0bd43fd4ac239ec5408542.asciidoc deleted file mode 100644 index 7de06d9cd..000000000 --- a/docs/doc_examples/96de5703ba0bd43fd4ac239ec5408542.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.update({ - index: 'test', - id: '1', - body: { - script: { - source: 'ctx._source.counter += params.count', - lang: 'painless', - params: { - count: 4 - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/96e137e42d12c180e2c702db30714a9e.asciidoc b/docs/doc_examples/96e137e42d12c180e2c702db30714a9e.asciidoc new file mode 100644 index 000000000..1f0fb8c86 --- /dev/null +++ b/docs/doc_examples/96e137e42d12c180e2c702db30714a9e.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + full_name: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/96e88611f99e6834bd64b58dc8a282c1.asciidoc b/docs/doc_examples/96e88611f99e6834bd64b58dc8a282c1.asciidoc new file mode 100644 index 000000000..d3786611f --- /dev/null +++ b/docs/doc_examples/96e88611f99e6834bd64b58dc8a282c1.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000002", + mappings: { + properties: { + inference_field: { + type: "semantic_text", + inference_id: "my-openai-endpoint", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/96ea0e80323d6d2d99964625c004a44d.asciidoc b/docs/doc_examples/96ea0e80323d6d2d99964625c004a44d.asciidoc new file mode 100644 index 000000000..0ce997004 --- /dev/null +++ b/docs/doc_examples/96ea0e80323d6d2d99964625c004a44d.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putDataLifecycle({ + name: "dsl-data-stream", + data_retention: "7d", + enabled: false, +}); +console.log(response); +---- diff --git a/docs/doc_examples/971c7a36ee79f2b3aa82c64ea338de70.asciidoc b/docs/doc_examples/971c7a36ee79f2b3aa82c64ea338de70.asciidoc new file mode 100644 index 000000000..f3aa57240 --- /dev/null +++ b/docs/doc_examples/971c7a36ee79f2b3aa82c64ea338de70.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "index", + mappings: { + properties: { + foo: { + type: "keyword", + eager_global_ordinals: true, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/971fd23adb81bb5842c7750e0379336a.asciidoc b/docs/doc_examples/971fd23adb81bb5842c7750e0379336a.asciidoc new file mode 100644 index 000000000..e1b27bc79 --- /dev/null +++ b/docs/doc_examples/971fd23adb81bb5842c7750e0379336a.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "movies", + retriever: { + text_similarity_reranker: { + retriever: { + standard: { + query: { + match: { + genre: "drama", + }, + }, + }, + }, + field: "plot", + inference_id: "my-msmarco-minilm-model", + inference_text: "films that explore psychological depths", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/973a3ff47fc4ce036ecd9bd363fef9f7.asciidoc b/docs/doc_examples/973a3ff47fc4ce036ecd9bd363fef9f7.asciidoc index 3cd422a08..2620184e7 100644 --- a/docs/doc_examples/973a3ff47fc4ce036ecd9bd363fef9f7.asciidoc +++ b/docs/doc_examples/973a3ff47fc4ce036ecd9bd363fef9f7.asciidoc @@ -4,19 +4,17 @@ [source, js] ---- const response = await client.reindex({ - body: { - source: { - index: 'metricbeat-*' - }, - dest: { - index: 'metricbeat' - }, - script: { - lang: 'painless', - source: "ctx._index = 'metricbeat-' + (ctx._index.substring('metricbeat-'.length(), ctx._index.length())) + '-1'" - } - } -}) -console.log(response) + source: { + index: "metricbeat-*", + }, + dest: { + index: "metricbeat", + }, + script: { + lang: "painless", + source: + "ctx._index = 'metricbeat-' + (ctx._index.substring('metricbeat-'.length(), ctx._index.length())) + '-1'", + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/975b4b92464d52068516aa2f0f955cc1.asciidoc b/docs/doc_examples/975b4b92464d52068516aa2f0f955cc1.asciidoc new file mode 100644 index 000000000..0dc310f84 --- /dev/null +++ b/docs/doc_examples/975b4b92464d52068516aa2f0f955cc1.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.segments({ + index: "test1,test2", +}); +console.log(response); +---- diff --git a/docs/doc_examples/976e5f9baf81bd6ca0e9f80916a0a4f9.asciidoc b/docs/doc_examples/976e5f9baf81bd6ca0e9f80916a0a4f9.asciidoc new file mode 100644 index 000000000..f47d5d772 --- /dev/null +++ b/docs/doc_examples/976e5f9baf81bd6ca0e9f80916a0a4f9.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "test_role1", + indices: [ + { + names: ["events-*"], + privileges: ["read"], + field_security: { + grant: ["category", "@timestamp", "message"], + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/97916243f245478b735471a9e37f33d1.asciidoc b/docs/doc_examples/97916243f245478b735471a9e37f33d1.asciidoc new file mode 100644 index 000000000..967dc2118 --- /dev/null +++ b/docs/doc_examples/97916243f245478b735471a9e37f33d1.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "ip_addresses", + size: 10, + aggs: { + ip_ranges: { + ip_range: { + field: "ip", + ranges: [ + { + to: "10.0.0.5", + }, + { + from: "10.0.0.5", + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/979d25dff2d8987119410291ad47b0d1.asciidoc b/docs/doc_examples/979d25dff2d8987119410291ad47b0d1.asciidoc deleted file mode 100644 index 69b52ce27..000000000 --- a/docs/doc_examples/979d25dff2d8987119410291ad47b0d1.asciidoc +++ /dev/null @@ -1,29 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - sort: [ - { - _geo_distance: { - 'pin.location': { - lat: 40, - lon: -70 - }, - order: 'asc', - unit: 'km' - } - } - ], - query: { - term: { - user: 'kimchy' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/97a3216af3d4b4d805d467d9c715cb3e.asciidoc b/docs/doc_examples/97a3216af3d4b4d805d467d9c715cb3e.asciidoc new file mode 100644 index 000000000..be8ac4caf --- /dev/null +++ b/docs/doc_examples/97a3216af3d4b4d805d467d9c715cb3e.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "GET", + path: "/_internal/desired_balance", +}); +console.log(response); +---- diff --git a/docs/doc_examples/97ae2b62aa372a955278be6f660356ba.asciidoc b/docs/doc_examples/97ae2b62aa372a955278be6f660356ba.asciidoc new file mode 100644 index 000000000..b423d4033 --- /dev/null +++ b/docs/doc_examples/97ae2b62aa372a955278be6f660356ba.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + combined_fields: { + query: "distributed consensus", + fields: ["title^2", "body"], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/97babc8d19ef0866774576716eb6d19e.asciidoc b/docs/doc_examples/97babc8d19ef0866774576716eb6d19e.asciidoc new file mode 100644 index 000000000..bc8757667 --- /dev/null +++ b/docs/doc_examples/97babc8d19ef0866774576716eb6d19e.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.updateByQuery({ + index: "test", + refresh: "true", + conflicts: "proceed", +}); +console.log(response); + +const response1 = await client.search({ + index: "test", + filter_path: "hits.total", + query: { + match: { + flag: "foo", + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/97c6c07f46f4177f0565a04bc50924a3.asciidoc b/docs/doc_examples/97c6c07f46f4177f0565a04bc50924a3.asciidoc new file mode 100644 index 000000000..ae96b5501 --- /dev/null +++ b/docs/doc_examples/97c6c07f46f4177f0565a04bc50924a3.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "retrievers_example", + retriever: { + rrf: { + retrievers: [ + { + standard: { + query: { + query_string: { + query: "(information retrieval) OR (artificial intelligence)", + default_field: "text", + }, + }, + }, + }, + { + knn: { + field: "vector", + query_vector: [0.23, 0.67, 0.89], + k: 3, + num_candidates: 5, + }, + }, + ], + rank_window_size: 10, + rank_constant: 1, + }, + }, + _source: false, +}); +console.log(response); +---- diff --git a/docs/doc_examples/97da68c09c9f1a97a21780fd404e213a.asciidoc b/docs/doc_examples/97da68c09c9f1a97a21780fd404e213a.asciidoc new file mode 100644 index 000000000..3ea9ccfc3 --- /dev/null +++ b/docs/doc_examples/97da68c09c9f1a97a21780fd404e213a.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "network-traffic", + size: 0, + aggs: { + "ipv4-subnets": { + ip_prefix: { + field: "ipv4", + prefix_length: 24, + append_prefix_length: true, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/97ea5ab17213cb1faaf6f3ea13607098.asciidoc b/docs/doc_examples/97ea5ab17213cb1faaf6f3ea13607098.asciidoc new file mode 100644 index 000000000..76e056902 --- /dev/null +++ b/docs/doc_examples/97ea5ab17213cb1faaf6f3ea13607098.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.start(); +console.log(response); +---- diff --git a/docs/doc_examples/97f5df84efec655f479fad78bc392d4d.asciidoc b/docs/doc_examples/97f5df84efec655f479fad78bc392d4d.asciidoc new file mode 100644 index 000000000..62eeaf223 --- /dev/null +++ b/docs/doc_examples/97f5df84efec655f479fad78bc392d4d.asciidoc @@ -0,0 +1,40 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + profile: true, + query: { + term: { + "user.id": { + value: "elkbee", + }, + }, + }, + aggs: { + my_scoped_agg: { + terms: { + field: "http.response.status_code", + }, + }, + my_global_agg: { + global: {}, + aggs: { + my_level_agg: { + terms: { + field: "http.response.status_code", + }, + }, + }, + }, + }, + post_filter: { + match: { + message: "search", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/98234499cfec70487cec5d013e976a84.asciidoc b/docs/doc_examples/98234499cfec70487cec5d013e976a84.asciidoc deleted file mode 100644 index 669ec3eb3..000000000 --- a/docs/doc_examples/98234499cfec70487cec5d013e976a84.asciidoc +++ /dev/null @@ -1,12 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.exists({ - index: 'twitter', - id: '0' -}) -console.log(response) ----- - diff --git a/docs/doc_examples/983fbb78e57e8fe98db38cf2d217e943.asciidoc b/docs/doc_examples/983fbb78e57e8fe98db38cf2d217e943.asciidoc new file mode 100644 index 000000000..375c8c058 --- /dev/null +++ b/docs/doc_examples/983fbb78e57e8fe98db38cf2d217e943.asciidoc @@ -0,0 +1,56 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "test", + mappings: { + properties: { + comments: { + type: "nested", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "test", + id: 1, + refresh: "true", + document: { + title: "Test title", + comments: [ + { + author: "kimchy", + text: "comment text", + }, + { + author: "nik9000", + text: "words words words", + }, + ], + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "test", + query: { + nested: { + path: "comments", + query: { + match: { + "comments.text": "words", + }, + }, + inner_hits: { + _source: false, + docvalue_fields: ["comments.text.keyword"], + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/9851f5225150bc032fb3b195cd447f4f.asciidoc b/docs/doc_examples/9851f5225150bc032fb3b195cd447f4f.asciidoc new file mode 100644 index 000000000..1adf874ce --- /dev/null +++ b/docs/doc_examples/9851f5225150bc032fb3b195cd447f4f.asciidoc @@ -0,0 +1,40 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "byte-image-index", + refresh: "true", + operations: [ + { + index: { + _id: "1", + }, + }, + { + "byte-image-vector": [5, -20], + title: "moose family", + }, + { + index: { + _id: "2", + }, + }, + { + "byte-image-vector": [8, -15], + title: "alpine lake", + }, + { + index: { + _id: "3", + }, + }, + { + "byte-image-vector": [11, 23], + title: "full moon", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/98574a419b6be603a0af8f7f22a92d23.asciidoc b/docs/doc_examples/98574a419b6be603a0af8f7f22a92d23.asciidoc new file mode 100644 index 000000000..bbfc2a28b --- /dev/null +++ b/docs/doc_examples/98574a419b6be603a0af8f7f22a92d23.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.processorGrok(); +console.log(response); +---- diff --git a/docs/doc_examples/98621bea4765b1b838cc9daa914bf5c5.asciidoc b/docs/doc_examples/98621bea4765b1b838cc9daa914bf5c5.asciidoc new file mode 100644 index 000000000..f7c866698 --- /dev/null +++ b/docs/doc_examples/98621bea4765b1b838cc9daa914bf5c5.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-data-stream", + query: + '\n sequence with maxspan=1h\n [ process where process.name == "regsvr32.exe" ] by process.pid\n [ file where stringContains(file.name, "scrobj.dll") ] by process.pid\n ', +}); +console.log(response); +---- diff --git a/docs/doc_examples/986f892bfa4dfdf1da8455fdf84a4b0c.asciidoc b/docs/doc_examples/986f892bfa4dfdf1da8455fdf84a4b0c.asciidoc new file mode 100644 index 000000000..570db554b --- /dev/null +++ b/docs/doc_examples/986f892bfa4dfdf1da8455fdf84a4b0c.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "text_embedding", + inference_id: "alibabacloud_ai_search_embeddings", + inference_config: { + service: "alibabacloud-ai-search", + service_settings: { + api_key: "", + service_id: "ops-text-embedding-001", + host: "default-j01.platform-cn-shanghai.opensearch.aliyuncs.com", + workspace: "default", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/98855f4bda8726d5d123aeebf7869e47.asciidoc b/docs/doc_examples/98855f4bda8726d5d123aeebf7869e47.asciidoc new file mode 100644 index 000000000..bc24781f2 --- /dev/null +++ b/docs/doc_examples/98855f4bda8726d5d123aeebf7869e47.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.nodeattrs({ + v: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9887f65af249bbf09190b1153ea2597b.asciidoc b/docs/doc_examples/9887f65af249bbf09190b1153ea2597b.asciidoc new file mode 100644 index 000000000..a37868f52 --- /dev/null +++ b/docs/doc_examples/9887f65af249bbf09190b1153ea2597b.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.getAsyncStatus({ + id: "FnR0TDhyWUVmUmVtWXRWZER4MXZiNFEad2F5UDk2ZVdTVHV1S0xDUy00SklUdzozMTU=", +}); +console.log(response); +---- diff --git a/docs/doc_examples/98aeb275f829b5f7b8eb2147701565ff.asciidoc b/docs/doc_examples/98aeb275f829b5f7b8eb2147701565ff.asciidoc deleted file mode 100644 index 15a6eef31..000000000 --- a/docs/doc_examples/98aeb275f829b5f7b8eb2147701565ff.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.update({ - index: 'test', - id: '1', - body: { - script: { - source: "if (ctx._source.tags.contains(params.tag)) { ctx.op = 'delete' } else { ctx.op = 'none' }", - lang: 'painless', - params: { - tag: 'green' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/98b121bf47cebd85671a2cb519688d28.asciidoc b/docs/doc_examples/98b121bf47cebd85671a2cb519688d28.asciidoc deleted file mode 100644 index 37593d0bb..000000000 --- a/docs/doc_examples/98b121bf47cebd85671a2cb519688d28.asciidoc +++ /dev/null @@ -1,32 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - aggs: { - JapaneseCars: { - terms: { - field: 'make', - include: [ - 'mazda', - 'honda' - ] - } - }, - ActiveCarManufacturers: { - terms: { - field: 'make', - exclude: [ - 'rover', - 'jensen' - ] - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/98b403c356a9b14544e9b9f646845e9f.asciidoc b/docs/doc_examples/98b403c356a9b14544e9b9f646845e9f.asciidoc new file mode 100644 index 000000000..2054692fc --- /dev/null +++ b/docs/doc_examples/98b403c356a9b14544e9b9f646845e9f.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.putScript({ + id: "my-search-template", + script: { + lang: "mustache", + source: { + query: { + multi_match: { + query: "{{query_string}}", + fields: + "[{{#text_fields}}{{user_name}}{{^last}},{{/last}}{{/text_fields}}]", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/98c1080d8630d3a18d564312300d020f.asciidoc b/docs/doc_examples/98c1080d8630d3a18d564312300d020f.asciidoc new file mode 100644 index 000000000..962c58754 --- /dev/null +++ b/docs/doc_examples/98c1080d8630d3a18d564312300d020f.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.simulate({ + pipeline: { + processors: [ + { + network_direction: { + internal_networks: ["private"], + }, + }, + ], + }, + docs: [ + { + _source: { + source: { + ip: "128.232.110.120", + }, + destination: { + ip: "192.168.1.1", + }, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/98f14fddddea54a7d6149ab7b92e099d.asciidoc b/docs/doc_examples/98f14fddddea54a7d6149ab7b92e099d.asciidoc deleted file mode 100644 index 97b01621e..000000000 --- a/docs/doc_examples/98f14fddddea54a7d6149ab7b92e099d.asciidoc +++ /dev/null @@ -1,11 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.delete({ - index: 'twitter' -}) -console.log(response) ----- - diff --git a/docs/doc_examples/98f43710cedd28a464e8abf4b09bcc9a.asciidoc b/docs/doc_examples/98f43710cedd28a464e8abf4b09bcc9a.asciidoc new file mode 100644 index 000000000..01c0b0b60 --- /dev/null +++ b/docs/doc_examples/98f43710cedd28a464e8abf4b09bcc9a.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + range: { + "@timestamp": { + gte: "now-1d/d", + lt: "now/d", + }, + }, + }, + aggs: { + "my-agg-name": { + terms: { + field: "my-field", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/98f7525ec0bc8945eafa008a5a9c50c0.asciidoc b/docs/doc_examples/98f7525ec0bc8945eafa008a5a9c50c0.asciidoc new file mode 100644 index 000000000..c64fae279 --- /dev/null +++ b/docs/doc_examples/98f7525ec0bc8945eafa008a5a9c50c0.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-data-stream", + wait_for_completion_timeout: "2s", + query: '\n process where process.name == "cmd.exe"\n ', +}); +console.log(response); +---- diff --git a/docs/doc_examples/990c0d794ed6f05d1620b5d49f7aff6e.asciidoc b/docs/doc_examples/990c0d794ed6f05d1620b5d49f7aff6e.asciidoc new file mode 100644 index 000000000..ac79f2a9d --- /dev/null +++ b/docs/doc_examples/990c0d794ed6f05d1620b5d49f7aff6e.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getDataLifecycle({ + name: "my-data-stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/99160b7c3c3fc1fac98aeb426dbcb3cb.asciidoc b/docs/doc_examples/99160b7c3c3fc1fac98aeb426dbcb3cb.asciidoc new file mode 100644 index 000000000..cfffd248f --- /dev/null +++ b/docs/doc_examples/99160b7c3c3fc1fac98aeb426dbcb3cb.asciidoc @@ -0,0 +1,44 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + first_name: { + type: "text", + }, + last_name: { + type: "text", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + refresh: "true", + document: { + first_name: "Barry", + last_name: "White", + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + script_fields: { + full_name: { + script: { + lang: "painless", + source: "params._source.first_name + ' ' + params._source.last_name", + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/991b9ba53f0eccec8ec5a42f8d9b655c.asciidoc b/docs/doc_examples/991b9ba53f0eccec8ec5a42f8d9b655c.asciidoc new file mode 100644 index 000000000..6d805dd85 --- /dev/null +++ b/docs/doc_examples/991b9ba53f0eccec8ec5a42f8d9b655c.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match: { + "user.id": "kimchy", + }, + }, + highlight: { + fields: { + body: {}, + "blog.title": { + number_of_fragments: 0, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/99474a7e7979816c874aeac4403be5d0.asciidoc b/docs/doc_examples/99474a7e7979816c874aeac4403be5d0.asciidoc new file mode 100644 index 000000000..eebad0741 --- /dev/null +++ b/docs/doc_examples/99474a7e7979816c874aeac4403be5d0.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + by_date: { + date_histogram: { + field: "date", + calendar_interval: "month", + }, + aggs: { + avg_price: { + rate: { + field: "price", + unit: "day", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/996521cef7803ef363a49ac6321ea1de.asciidoc b/docs/doc_examples/996521cef7803ef363a49ac6321ea1de.asciidoc new file mode 100644 index 000000000..673a49d35 --- /dev/null +++ b/docs/doc_examples/996521cef7803ef363a49ac6321ea1de.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-data-stream", + query: + '\n sequence with maxspan=1d\n [ process where process.name == "cmd.exe" ]\n ![ process where stringContains(process.command_line, "ocx") ]\n [ file where stringContains(file.name, "scrobj.dll") ]\n ', +}); +console.log(response); +---- diff --git a/docs/doc_examples/996f320a0f537c24b9cd0d71b5f7c1f8.asciidoc b/docs/doc_examples/996f320a0f537c24b9cd0d71b5f7c1f8.asciidoc new file mode 100644 index 000000000..41212a2e2 --- /dev/null +++ b/docs/doc_examples/996f320a0f537c24b9cd0d71b5f7c1f8.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + function_score: { + query: { + match: { + message: "elasticsearch", + }, + }, + script_score: { + script: { + params: { + a: 5, + b: 1.2, + }, + source: "params.a / Math.pow(params.b, doc['my-int'].value)", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/99803d7b111b862c0c82e9908e549b16.asciidoc b/docs/doc_examples/99803d7b111b862c0c82e9908e549b16.asciidoc new file mode 100644 index 000000000..620c04c62 --- /dev/null +++ b/docs/doc_examples/99803d7b111b862c0c82e9908e549b16.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "text_embedding", + inference_id: "mistral-embeddings-test", + inference_config: { + service: "mistral", + service_settings: { + api_key: "", + model: "mistral-embed", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/998651b98e152add530084a631a4ab5a.asciidoc b/docs/doc_examples/998651b98e152add530084a631a4ab5a.asciidoc new file mode 100644 index 000000000..9b0378abb --- /dev/null +++ b/docs/doc_examples/998651b98e152add530084a631a4ab5a.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "indices.lifecycle.poll_interval": "1m", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/998c8479c8704bca0e121d5969859517.asciidoc b/docs/doc_examples/998c8479c8704bca0e121d5969859517.asciidoc new file mode 100644 index 000000000..130ceb562 --- /dev/null +++ b/docs/doc_examples/998c8479c8704bca0e121d5969859517.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.count({ + index: "music", +}); +console.log(response); +---- diff --git a/docs/doc_examples/99a52be903945b17e734a1d02a57e958.asciidoc b/docs/doc_examples/99a52be903945b17e734a1d02a57e958.asciidoc deleted file mode 100644 index 0eae9d3c7..000000000 --- a/docs/doc_examples/99a52be903945b17e734a1d02a57e958.asciidoc +++ /dev/null @@ -1,12 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.getFieldMapping({ - index: 'my-index', - fields: 'employee-id' -}) -console.log(response) ----- - diff --git a/docs/doc_examples/99a56f423df3a0e57b7f20146f0d33b5.asciidoc b/docs/doc_examples/99a56f423df3a0e57b7f20146f0d33b5.asciidoc new file mode 100644 index 000000000..24b0adfe1 --- /dev/null +++ b/docs/doc_examples/99a56f423df3a0e57b7f20146f0d33b5.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "logs", + mappings: { + properties: { + "@timestamp": { + type: "date", + }, + message: { + type: "match_only_text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/99b617a0a83fcfbe5755ccc724a4ce62.asciidoc b/docs/doc_examples/99b617a0a83fcfbe5755ccc724a4ce62.asciidoc new file mode 100644 index 000000000..74a851858 --- /dev/null +++ b/docs/doc_examples/99b617a0a83fcfbe5755ccc724a4ce62.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "place_path_category", + id: 1, + document: { + suggest: ["timmy's", "starbucks", "dunkin donuts"], + cat: ["cafe", "food"], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/99c1cfe60f3ccf5bf3abd24c31ed9034.asciidoc b/docs/doc_examples/99c1cfe60f3ccf5bf3abd24c31ed9034.asciidoc new file mode 100644 index 000000000..e6e7c97af --- /dev/null +++ b/docs/doc_examples/99c1cfe60f3ccf5bf3abd24c31ed9034.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.putAutoFollowPattern({ + name: "", + remote_cluster: "", + leader_index_patterns: [""], + leader_index_exclusion_patterns: [""], + follow_index_pattern: "", +}); +console.log(response); +---- diff --git a/docs/doc_examples/99fb82d49ac477e6a9dfdd71f9465374.asciidoc b/docs/doc_examples/99fb82d49ac477e6a9dfdd71f9465374.asciidoc new file mode 100644 index 000000000..e5bfa5787 --- /dev/null +++ b/docs/doc_examples/99fb82d49ac477e6a9dfdd71f9465374.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.deleteIpLocationDatabase({ + id: "example-database-id", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9a02bd47c000a3d9a8911233c37c890f.asciidoc b/docs/doc_examples/9a02bd47c000a3d9a8911233c37c890f.asciidoc new file mode 100644 index 000000000..4a993773e --- /dev/null +++ b/docs/doc_examples/9a02bd47c000a3d9a8911233c37c890f.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + refresh: "true", + document: { + date: "2015-10-01T00:30:00Z", + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 2, + refresh: "true", + document: { + date: "2015-10-01T01:30:00Z", + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + size: 0, + aggs: { + by_day: { + date_histogram: { + field: "date", + calendar_interval: "day", + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/9a036a792be1d39af9fd0d1adb5f3402.asciidoc b/docs/doc_examples/9a036a792be1d39af9fd0d1adb5f3402.asciidoc new file mode 100644 index 000000000..fda0186bc --- /dev/null +++ b/docs/doc_examples/9a036a792be1d39af9fd0d1adb5f3402.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "whitespace", + filter: [ + { + type: "keep", + keep_words: ["dog", "elephant", "fox"], + }, + ], + text: "the quick fox jumps over the lazy dog", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9a05cc10eea1251e23b82a4549913536.asciidoc b/docs/doc_examples/9a05cc10eea1251e23b82a4549913536.asciidoc new file mode 100644 index 000000000..e86a646f7 --- /dev/null +++ b/docs/doc_examples/9a05cc10eea1251e23b82a4549913536.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.allocation({ + v: "true", + s: "node", + h: "node,shards,disk.percent,disk.indices,disk.used", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9a09d33ec11e20b6081cae882282ca60.asciidoc b/docs/doc_examples/9a09d33ec11e20b6081cae882282ca60.asciidoc new file mode 100644 index 000000000..64d3dfd53 --- /dev/null +++ b/docs/doc_examples/9a09d33ec11e20b6081cae882282ca60.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.clearCachedPrivileges({ + application: "*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9a203aae3e1412d919546276fb52a5ca.asciidoc b/docs/doc_examples/9a203aae3e1412d919546276fb52a5ca.asciidoc new file mode 100644 index 000000000..e1311ca66 --- /dev/null +++ b/docs/doc_examples/9a203aae3e1412d919546276fb52a5ca.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "text_embedding", + inference_id: "cohere-embeddings", + inference_config: { + service: "cohere", + service_settings: { + api_key: "", + model_id: "embed-english-light-v3.0", + embedding_type: "byte", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9a26759ccbd338224ecaacf7c49ab08e.asciidoc b/docs/doc_examples/9a26759ccbd338224ecaacf7c49ab08e.asciidoc deleted file mode 100644 index f143ebc34..000000000 --- a/docs/doc_examples/9a26759ccbd338224ecaacf7c49ab08e.asciidoc +++ /dev/null @@ -1,19 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - from: 0, - size: 10, - query: { - term: { - user: 'kimchy' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/9a49b7572d571e00e20dbebdd30f9368.asciidoc b/docs/doc_examples/9a49b7572d571e00e20dbebdd30f9368.asciidoc new file mode 100644 index 000000000..d0eb6f8e0 --- /dev/null +++ b/docs/doc_examples/9a49b7572d571e00e20dbebdd30f9368.asciidoc @@ -0,0 +1,50 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + size: 10000, + query: { + geo_bounding_box: { + "my-geo-field": { + top_left: { + lat: -40.979898069620134, + lon: -45, + }, + bottom_right: { + lat: -66.51326044311186, + lon: 0, + }, + }, + }, + }, + aggregations: { + grid: { + geotile_grid: { + field: "my-geo-field", + precision: 11, + size: 65536, + bounds: { + top_left: { + lat: -40.979898069620134, + lon: -45, + }, + bottom_right: { + lat: -66.51326044311186, + lon: 0, + }, + }, + }, + }, + bounds: { + geo_bounds: { + field: "my-geo-field", + wrap_longitude: false, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9a4d5e41c52c20635d1fd9c6e13f6c7a.asciidoc b/docs/doc_examples/9a4d5e41c52c20635d1fd9c6e13f6c7a.asciidoc index db81c549c..cd2f894b2 100644 --- a/docs/doc_examples/9a4d5e41c52c20635d1fd9c6e13f6c7a.asciidoc +++ b/docs/doc_examples/9a4d5e41c52c20635d1fd9c6e13f6c7a.asciidoc @@ -3,24 +3,23 @@ [source, js] ---- -const response0 = await client.index({ - index: 'metricbeat-2016.05.30', - id: '1', - refresh: true, - body: { - 'system.cpu.idle.pct': 0.908 - } -}) -console.log(response0) +const response = await client.index({ + index: "metricbeat-2016.05.30", + id: 1, + refresh: "true", + document: { + "system.cpu.idle.pct": 0.908, + }, +}); +console.log(response); const response1 = await client.index({ - index: 'metricbeat-2016.05.31', - id: '1', - refresh: true, - body: { - 'system.cpu.idle.pct': 0.105 - } -}) -console.log(response1) + index: "metricbeat-2016.05.31", + id: 1, + refresh: "true", + document: { + "system.cpu.idle.pct": 0.105, + }, +}); +console.log(response1); ---- - diff --git a/docs/doc_examples/9a743b6575c6fe5acdf46024a7fda8a1.asciidoc b/docs/doc_examples/9a743b6575c6fe5acdf46024a7fda8a1.asciidoc new file mode 100644 index 000000000..30ecd2bf5 --- /dev/null +++ b/docs/doc_examples/9a743b6575c6fe5acdf46024a7fda8a1.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_test_scores_2", + query: { + term: { + grad_year: "2099", + }, + }, + sort: [ + { + total_score: { + order: "desc", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/9a8995fd31351045d99c78e40444c8ea.asciidoc b/docs/doc_examples/9a8995fd31351045d99c78e40444c8ea.asciidoc deleted file mode 100644 index b736725fb..000000000 --- a/docs/doc_examples/9a8995fd31351045d99c78e40444c8ea.asciidoc +++ /dev/null @@ -1,19 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - aggs: { - genres: { - terms: { - field: 'genre' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/9ab351893dae65ec97fd8cb6832950fb.asciidoc b/docs/doc_examples/9ab351893dae65ec97fd8cb6832950fb.asciidoc new file mode 100644 index 000000000..531a8e414 --- /dev/null +++ b/docs/doc_examples/9ab351893dae65ec97fd8cb6832950fb.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "product-index", + query: { + script_score: { + query: { + bool: { + filter: { + range: { + price: { + gte: 1000, + }, + }, + }, + }, + }, + script: { + source: "cosineSimilarity(params.queryVector, 'product-vector') + 1.0", + params: { + queryVector: [-0.5, 90, -10, 14.8, -156], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9ad14a9d7bf2699e2d86b6a607d410c0.asciidoc b/docs/doc_examples/9ad14a9d7bf2699e2d86b6a607d410c0.asciidoc new file mode 100644 index 000000000..aa641b73c --- /dev/null +++ b/docs/doc_examples/9ad14a9d7bf2699e2d86b6a607d410c0.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.get({ + name: "my_search_application", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9ad38ab4d9c3983e97e8c38fec611f10.asciidoc b/docs/doc_examples/9ad38ab4d9c3983e97e8c38fec611f10.asciidoc new file mode 100644 index 000000000..f2f4ae872 --- /dev/null +++ b/docs/doc_examples/9ad38ab4d9c3983e97e8c38fec611f10.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + cluster: { + remote: { + leader: { + seeds: ["127.0.0.1:9300"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9ae268058c0ea32ef8926568e011c728.asciidoc b/docs/doc_examples/9ae268058c0ea32ef8926568e011c728.asciidoc new file mode 100644 index 000000000..185a6a226 --- /dev/null +++ b/docs/doc_examples/9ae268058c0ea32ef8926568e011c728.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_connector/my-connector/_features", + body: { + features: { + document_level_security: { + enabled: true, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9aedc45f83e022732789e8d796f5a43c.asciidoc b/docs/doc_examples/9aedc45f83e022732789e8d796f5a43c.asciidoc new file mode 100644 index 000000000..f4f4c1e5d --- /dev/null +++ b/docs/doc_examples/9aedc45f83e022732789e8d796f5a43c.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.reroute({ + commands: [ + { + move: { + index: "test", + shard: 0, + from_node: "node1", + to_node: "node2", + }, + }, + { + allocate_replica: { + index: "test", + shard: 1, + node: "node3", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/9af44592fb2e78fb17ad3e834bbef7a7.asciidoc b/docs/doc_examples/9af44592fb2e78fb17ad3e834bbef7a7.asciidoc new file mode 100644 index 000000000..5e9aae7b4 --- /dev/null +++ b/docs/doc_examples/9af44592fb2e78fb17ad3e834bbef7a7.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.geoIpStats(); +console.log(response); +---- diff --git a/docs/doc_examples/9afa0844883b7471883aa378a8dd10b4.asciidoc b/docs/doc_examples/9afa0844883b7471883aa378a8dd10b4.asciidoc new file mode 100644 index 000000000..e890718c7 --- /dev/null +++ b/docs/doc_examples/9afa0844883b7471883aa378a8dd10b4.asciidoc @@ -0,0 +1,41 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.postBehavioralAnalyticsEvent({ + collection_name: "my_analytics_collection", + event_type: "search_click", + payload: { + session: { + id: "1797ca95-91c9-4e2e-b1bd-9c38e6f386a9", + }, + user: { + id: "5f26f01a-bbee-4202-9298-81261067abbd", + }, + search: { + query: "search term", + results: { + items: [ + { + document: { + id: "123", + index: "products", + }, + }, + ], + total_results: 10, + }, + sort: { + name: "relevance", + }, + search_application: "website", + }, + document: { + id: "123", + index: "products", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9b0f34d122a4b348dc86df7410d6ebb6.asciidoc b/docs/doc_examples/9b0f34d122a4b348dc86df7410d6ebb6.asciidoc new file mode 100644 index 000000000..2f79ad0e7 --- /dev/null +++ b/docs/doc_examples/9b0f34d122a4b348dc86df7410d6ebb6.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_connector/_sync_job/my-connector-sync-job-id/_cancel", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9b30a69fec54cf01f7af1b04a6e15239.asciidoc b/docs/doc_examples/9b30a69fec54cf01f7af1b04a6e15239.asciidoc new file mode 100644 index 000000000..54a531e1a --- /dev/null +++ b/docs/doc_examples/9b30a69fec54cf01f7af1b04a6e15239.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.stats(); +console.log(response); +---- diff --git a/docs/doc_examples/9b345e0bfd45f3a37194585ec9193478.asciidoc b/docs/doc_examples/9b345e0bfd45f3a37194585ec9193478.asciidoc new file mode 100644 index 000000000..79d2f713b --- /dev/null +++ b/docs/doc_examples/9b345e0bfd45f3a37194585ec9193478.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.forcemerge({ + index: "my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9b68748c061b768c0153c1f2508ce207.asciidoc b/docs/doc_examples/9b68748c061b768c0153c1f2508ce207.asciidoc new file mode 100644 index 000000000..889534c39 --- /dev/null +++ b/docs/doc_examples/9b68748c061b768c0153c1f2508ce207.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + cluster: { + remote: { + clusterA: { + mode: "proxy", + skip_unavailable: "true", + server_name: "clustera.es.region-a.gcp.elastic-cloud.com", + proxy_socket_connections: "18", + proxy_address: "clustera.es.region-a.gcp.elastic-cloud.com:9400", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9b92266d87170e93a84f9700596d9035.asciidoc b/docs/doc_examples/9b92266d87170e93a84f9700596d9035.asciidoc new file mode 100644 index 000000000..b9c2e1dfe --- /dev/null +++ b/docs/doc_examples/9b92266d87170e93a84f9700596d9035.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "example", + mappings: { + properties: { + location: { + type: "geo_shape", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "example", + refresh: "true", + document: { + name: "Wind & Wetter, Berlin, Germany", + location: { + type: "point", + coordinates: [13.400544, 52.530286], + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/9ba6f1e64c1dfff5aac26eaa1d093f48.asciidoc b/docs/doc_examples/9ba6f1e64c1dfff5aac26eaa1d093f48.asciidoc new file mode 100644 index 000000000..c124140d0 --- /dev/null +++ b/docs/doc_examples/9ba6f1e64c1dfff5aac26eaa1d093f48.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "standard", + filter: ["lowercase", "custom_stems", "porter_stem"], + }, + }, + filter: { + custom_stems: { + type: "stemmer_override", + rules: ["running, runs => run", "stemmer => stemmer"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9ba868784f417a8d3679b3c8ed5939ad.asciidoc b/docs/doc_examples/9ba868784f417a8d3679b3c8ed5939ad.asciidoc new file mode 100644 index 000000000..2a5963d6d --- /dev/null +++ b/docs/doc_examples/9ba868784f417a8d3679b3c8ed5939ad.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + hot: { + actions: { + rollover: { + max_size: "100gb", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9bae72e974bdeb56007d9104e73eff92.asciidoc b/docs/doc_examples/9bae72e974bdeb56007d9104e73eff92.asciidoc new file mode 100644 index 000000000..9e58839bd --- /dev/null +++ b/docs/doc_examples/9bae72e974bdeb56007d9104e73eff92.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.update({ + index: "test", + id: 1, + script: "ctx._source.remove('new_field')", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9bb24fe09e3d1c73a71d00b994ba8cfb.asciidoc b/docs/doc_examples/9bb24fe09e3d1c73a71d00b994ba8cfb.asciidoc new file mode 100644 index 000000000..5486c20ed --- /dev/null +++ b/docs/doc_examples/9bb24fe09e3d1c73a71d00b994ba8cfb.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.shards({ + index: "my-index-*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9bd5a470ee6d2b4a1f5280adc39675d2.asciidoc b/docs/doc_examples/9bd5a470ee6d2b4a1f5280adc39675d2.asciidoc new file mode 100644 index 000000000..2a2b518d4 --- /dev/null +++ b/docs/doc_examples/9bd5a470ee6d2b4a1f5280adc39675d2.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.update({ + index: ".elastic-connectors", + id: "connector_id", + doc: { + configuration: { + tables: { + type: "list", + value: "*", + }, + ssl_enabled: { + type: "bool", + value: false, + }, + ssl_ca: { + type: "str", + value: "", + }, + fetch_size: { + type: "int", + value: 50, + }, + retry_count: { + type: "int", + value: 3, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9beb260834f8cfb240f6308950dbb9c2.asciidoc b/docs/doc_examples/9beb260834f8cfb240f6308950dbb9c2.asciidoc new file mode 100644 index 000000000..87d69814b --- /dev/null +++ b/docs/doc_examples/9beb260834f8cfb240f6308950dbb9c2.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + sort: [ + { + _geo_distance: { + "pin.location": "drm3btev3e86", + order: "asc", + unit: "km", + }, + }, + ], + query: { + term: { + user: "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9bfdda207b701028a3439e495e800c02.asciidoc b/docs/doc_examples/9bfdda207b701028a3439e495e800c02.asciidoc new file mode 100644 index 000000000..287a69f92 --- /dev/null +++ b/docs/doc_examples/9bfdda207b701028a3439e495e800c02.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sales_over_time: { + date_histogram: { + field: "date", + calendar_interval: "1M", + format: "yyyy-MM-dd", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9c01db07c9ac395b6370e3b33965c21f.asciidoc b/docs/doc_examples/9c01db07c9ac395b6370e3b33965c21f.asciidoc new file mode 100644 index 000000000..9000232a8 --- /dev/null +++ b/docs/doc_examples/9c01db07c9ac395b6370e3b33965c21f.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.oidcAuthenticate({ + redirect_uri: + "https://oidc-kibana.elastic.co:5603/api/security/oidc/callback?code=jtI3Ntt8v3_XvcLzCFGq&state=4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I", + state: "4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I", + nonce: "WaBPH0KqPVdG5HHdSxPRjfoZbXMCicm5v1OiAj0DUFM", + realm: "oidc1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9c021836acf7c0370e289f611325868d.asciidoc b/docs/doc_examples/9c021836acf7c0370e289f611325868d.asciidoc new file mode 100644 index 000000000..ed4557890 --- /dev/null +++ b/docs/doc_examples/9c021836acf7c0370e289f611325868d.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.connector.updateConfiguration({ + connector_id: "my-spo-connector", + values: { + tenant_id: "my-tenant-id", + tenant_name: "my-sharepoint-site", + client_id: "foo", + secret_value: "bar", + site_collections: "*", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9c2ce0132e4527077443f007d27b1158.asciidoc b/docs/doc_examples/9c2ce0132e4527077443f007d27b1158.asciidoc new file mode 100644 index 000000000..a85d946d2 --- /dev/null +++ b/docs/doc_examples/9c2ce0132e4527077443f007d27b1158.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, + }, + }, + mappings: { + properties: { + flattened: { + type: "flattened", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "idx", + id: 1, + document: { + flattened: { + field: ["foo"], + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/9c4ac64e73141f6cbf2fb6da0743d9b7.asciidoc b/docs/doc_examples/9c4ac64e73141f6cbf2fb6da0743d9b7.asciidoc new file mode 100644 index 000000000..6de7fa524 --- /dev/null +++ b/docs/doc_examples/9c4ac64e73141f6cbf2fb6da0743d9b7.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + match: { + message: { + query: "Quick foxes", + analyzer: "stop", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9c5cbbdbe0075ab9c2611627fe4748fb.asciidoc b/docs/doc_examples/9c5cbbdbe0075ab9c2611627fe4748fb.asciidoc new file mode 100644 index 000000000..221cdeaa1 --- /dev/null +++ b/docs/doc_examples/9c5cbbdbe0075ab9c2611627fe4748fb.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "decimal_digit_example", + settings: { + analysis: { + analyzer: { + whitespace_decimal_digit: { + tokenizer: "whitespace", + filter: ["decimal_digit"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9c6ea5fe2339d6c7e5e4bf1b98990248.asciidoc b/docs/doc_examples/9c6ea5fe2339d6c7e5e4bf1b98990248.asciidoc new file mode 100644 index 000000000..f1d5306fe --- /dev/null +++ b/docs/doc_examples/9c6ea5fe2339d6c7e5e4bf1b98990248.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "image-index", + knn: { + field: "image-vector", + query_vector: [-5, 9, -12], + k: 10, + num_candidates: 100, + }, + fields: ["title", "file-type"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/9c7c8051592b6af3adb5d7c490849068.asciidoc b/docs/doc_examples/9c7c8051592b6af3adb5d7c490849068.asciidoc new file mode 100644 index 000000000..38e4630c1 --- /dev/null +++ b/docs/doc_examples/9c7c8051592b6af3adb5d7c490849068.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.putDatafeed({ + datafeed_id: "datafeed-test-job", + pretty: "true", + indices: ["kibana_sample_data_logs"], + query: { + bool: { + must: [ + { + match_all: {}, + }, + ], + }, + }, + job_id: "test-job", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9cb150d67dfa0947f29aa809bcc93c6e.asciidoc b/docs/doc_examples/9cb150d67dfa0947f29aa809bcc93c6e.asciidoc new file mode 100644 index 000000000..d842bf3e9 --- /dev/null +++ b/docs/doc_examples/9cb150d67dfa0947f29aa809bcc93c6e.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getSettings({ + index: "my-index-000001", + filter_path: "*.settings.index.routing.allocation.include._tier_preference", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9cbb097e5498a9fde39e3b1d3b62a4d2.asciidoc b/docs/doc_examples/9cbb097e5498a9fde39e3b1d3b62a4d2.asciidoc new file mode 100644 index 000000000..2f7852343 --- /dev/null +++ b/docs/doc_examples/9cbb097e5498a9fde39e3b1d3b62a4d2.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.inferTrainedModel({ + model_id: "model2", + docs: [ + { + text_field: "This is a very happy person", + }, + ], + inference_config: { + zero_shot_classification: { + labels: ["glad", "sad", "bad", "rad"], + multi_label: false, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9cc64ab2f60f995f5dbfaca67aa6dd41.asciidoc b/docs/doc_examples/9cc64ab2f60f995f5dbfaca67aa6dd41.asciidoc new file mode 100644 index 000000000..25b3a01c8 --- /dev/null +++ b/docs/doc_examples/9cc64ab2f60f995f5dbfaca67aa6dd41.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.esql.query({ + query: + "\n FROM library\n | EVAL year = DATE_TRUNC(1 YEARS, release_date)\n | STATS MAX(page_count) BY year\n | SORT year\n | LIMIT 5\n ", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9cc952d4a03264b700136cbc45abc8c6.asciidoc b/docs/doc_examples/9cc952d4a03264b700136cbc45abc8c6.asciidoc new file mode 100644 index 000000000..1bc8b2cc7 --- /dev/null +++ b/docs/doc_examples/9cc952d4a03264b700136cbc45abc8c6.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-rank-vectors-byte", + mappings: { + properties: { + my_vector: { + type: "rank_vectors", + element_type: "byte", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-rank-vectors-byte", + id: 1, + document: { + my_vector: [ + [1, 2, 3], + [4, 5, 6], + ], + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/9cd37d0ccbc66ad47ddb626564b27cc8.asciidoc b/docs/doc_examples/9cd37d0ccbc66ad47ddb626564b27cc8.asciidoc new file mode 100644 index 000000000..4c9ea62c8 --- /dev/null +++ b/docs/doc_examples/9cd37d0ccbc66ad47ddb626564b27cc8.asciidoc @@ -0,0 +1,44 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.executeWatch({ + watch: { + trigger: { + schedule: { + interval: "10s", + }, + }, + input: { + search: { + request: { + indices: ["logs"], + body: { + query: { + match: { + message: "error", + }, + }, + }, + }, + }, + }, + condition: { + compare: { + "ctx.payload.hits.total": { + gt: 0, + }, + }, + }, + actions: { + log_error: { + logging: { + text: "Found {{ctx.payload.hits.total}} errors in the logs", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9cf6c7012a4f2bb562bc256aa28c3409.asciidoc b/docs/doc_examples/9cf6c7012a4f2bb562bc256aa28c3409.asciidoc new file mode 100644 index 000000000..9944c2a27 --- /dev/null +++ b/docs/doc_examples/9cf6c7012a4f2bb562bc256aa28c3409.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.executeWatch({ + id: "my_watch", + action_modes: { + _all: "force_execute", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9cfbc41bb7b6fbdb26550dd2789c274e.asciidoc b/docs/doc_examples/9cfbc41bb7b6fbdb26550dd2789c274e.asciidoc new file mode 100644 index 000000000..72fdaf837 --- /dev/null +++ b/docs/doc_examples/9cfbc41bb7b6fbdb26550dd2789c274e.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.deleteByQuery({ + index: "my-index-000001", + refresh: "true", + slices: 5, + query: { + range: { + "http.response.bytes": { + lt: 2000000, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9d1fb129ac783355a20097effded1845.asciidoc b/docs/doc_examples/9d1fb129ac783355a20097effded1845.asciidoc new file mode 100644 index 000000000..edbed6a51 --- /dev/null +++ b/docs/doc_examples/9d1fb129ac783355a20097effded1845.asciidoc @@ -0,0 +1,52 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "test", + refresh: "true", + operations: [ + { + index: {}, + }, + { + s: 1, + m: 3.1415, + }, + { + index: {}, + }, + { + s: 2, + m: 1, + }, + { + index: {}, + }, + { + s: 3, + m: 2.71828, + }, + ], +}); +console.log(response); + +const response1 = await client.search({ + index: "test", + filter_path: "aggregations", + aggs: { + tm: { + top_metrics: { + metrics: { + field: "m", + }, + sort: { + s: "desc", + }, + }, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/9d31c7eaf8c6b56cee2fdfdde8a442bb.asciidoc b/docs/doc_examples/9d31c7eaf8c6b56cee2fdfdde8a442bb.asciidoc new file mode 100644 index 000000000..047504f75 --- /dev/null +++ b/docs/doc_examples/9d31c7eaf8c6b56cee2fdfdde8a442bb.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + warm: { + actions: { + shrink: { + max_primary_shard_size: "50gb", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9d396afad93782699d7a929578c85284.asciidoc b/docs/doc_examples/9d396afad93782699d7a929578c85284.asciidoc new file mode 100644 index 000000000..2f0a85e90 --- /dev/null +++ b/docs/doc_examples/9d396afad93782699d7a929578c85284.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "text_embedding", + inference_id: "google_vertex_ai_embeddings", + inference_config: { + service: "googlevertexai", + service_settings: { + service_account_json: "", + model_id: "text-embedding-004", + location: "", + project_id: "", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9d461ae140ddc018efd2650559800cd1.asciidoc b/docs/doc_examples/9d461ae140ddc018efd2650559800cd1.asciidoc new file mode 100644 index 000000000..13f8b9c99 --- /dev/null +++ b/docs/doc_examples/9d461ae140ddc018efd2650559800cd1.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + warm: { + actions: { + allocate: { + number_of_replicas: 1, + require: { + box_type: "cold", + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9d5855075e7008270459cc88c189043d.asciidoc b/docs/doc_examples/9d5855075e7008270459cc88c189043d.asciidoc new file mode 100644 index 000000000..bd11081bd --- /dev/null +++ b/docs/doc_examples/9d5855075e7008270459cc88c189043d.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putUser({ + username: "cross-cluster-user", + password: "l0ng-r4nd0m-p@ssw0rd", + roles: ["remote-replication"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/9d662fc9f943c287b7144f5e4e2ae358.asciidoc b/docs/doc_examples/9d662fc9f943c287b7144f5e4e2ae358.asciidoc new file mode 100644 index 000000000..01c5e240c --- /dev/null +++ b/docs/doc_examples/9d662fc9f943c287b7144f5e4e2ae358.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "reviews", + size: 0, + aggs: { + review_variability: { + median_absolute_deviation: { + field: "rating", + compression: 100, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9d66cb59711f24e6b4ff85608c9b5a1b.asciidoc b/docs/doc_examples/9d66cb59711f24e6b4ff85608c9b5a1b.asciidoc new file mode 100644 index 000000000..8c361f48c --- /dev/null +++ b/docs/doc_examples/9d66cb59711f24e6b4ff85608c9b5a1b.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.tasks.list({ + pretty: "true", + human: "true", + detailed: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9d67db8370a98854812d38ae73ee2a12.asciidoc b/docs/doc_examples/9d67db8370a98854812d38ae73ee2a12.asciidoc new file mode 100644 index 000000000..357b432b0 --- /dev/null +++ b/docs/doc_examples/9d67db8370a98854812d38ae73ee2a12.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "index2", + query: { + query_string: { + query: "running with scissors", + fields: ["comment", "comment.english"], + }, + }, + highlight: { + order: "score", + fields: { + comment: { + type: "fvh", + matched_fields: ["comment", "comment.english"], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9d79645ab3a9da3f63c54a1516214a5a.asciidoc b/docs/doc_examples/9d79645ab3a9da3f63c54a1516214a5a.asciidoc new file mode 100644 index 000000000..08ce23ec8 --- /dev/null +++ b/docs/doc_examples/9d79645ab3a9da3f63c54a1516214a5a.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.healthReport(); +console.log(response); +---- diff --git a/docs/doc_examples/9d9c8d715b72ce336e604c2c8a2b540e.asciidoc b/docs/doc_examples/9d9c8d715b72ce336e604c2c8a2b540e.asciidoc new file mode 100644 index 000000000..1cc661947 --- /dev/null +++ b/docs/doc_examples/9d9c8d715b72ce336e604c2c8a2b540e.asciidoc @@ -0,0 +1,38 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sales_per_month: { + date_histogram: { + field: "date", + calendar_interval: "month", + }, + aggs: { + total_sales: { + sum: { + field: "price", + }, + }, + sales_bucket_sort: { + bucket_sort: { + sort: [ + { + total_sales: { + order: "desc", + }, + }, + ], + size: 3, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9de10a59a5f56dd0906be627896cc789.asciidoc b/docs/doc_examples/9de10a59a5f56dd0906be627896cc789.asciidoc new file mode 100644 index 000000000..7e05930ac --- /dev/null +++ b/docs/doc_examples/9de10a59a5f56dd0906be627896cc789.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "bicycles,other_cycles", + query: { + match: { + description: "dutch", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9de4704d2f047dae1259249112488697.asciidoc b/docs/doc_examples/9de4704d2f047dae1259249112488697.asciidoc new file mode 100644 index 000000000..68b58c208 --- /dev/null +++ b/docs/doc_examples/9de4704d2f047dae1259249112488697.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.createRepository({ + name: "my_backup", + repository: { + type: "azure", + settings: { + client: "secondary", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9de4ea9d5f3d427a71ee07d998cb5611.asciidoc b/docs/doc_examples/9de4ea9d5f3d427a71ee07d998cb5611.asciidoc new file mode 100644 index 000000000..f22be9ab8 --- /dev/null +++ b/docs/doc_examples/9de4ea9d5f3d427a71ee07d998cb5611.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.addBlock({ + index: "my-index-000001", + block: "write", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9de4edafd22a8b9cb557632b2c8779cd.asciidoc b/docs/doc_examples/9de4edafd22a8b9cb557632b2c8779cd.asciidoc new file mode 100644 index 000000000..f5015c526 --- /dev/null +++ b/docs/doc_examples/9de4edafd22a8b9cb557632b2c8779cd.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.esql.query({ + query: + '\n FROM library\n | EVAL year = DATE_EXTRACT("year", release_date)\n | WHERE page_count > ?1 AND author == ?2\n | STATS count = COUNT(*) by year\n | WHERE count > ?3\n | LIMIT 5\n ', + params: [300, "Frank Herbert", 0], +}); +console.log(response); +---- diff --git a/docs/doc_examples/9e0e3ce27967f164f4585c5231ba9c75.asciidoc b/docs/doc_examples/9e0e3ce27967f164f4585c5231ba9c75.asciidoc new file mode 100644 index 000000000..cbe2f22fa --- /dev/null +++ b/docs/doc_examples/9e0e3ce27967f164f4585c5231ba9c75.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + refresh: "true", + document: { + my_field: "quick brown fox jump lazy dog", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9e3c28d5820c38ea117eb2e9a5061089.asciidoc b/docs/doc_examples/9e3c28d5820c38ea117eb2e9a5061089.asciidoc new file mode 100644 index 000000000..8b5d51237 --- /dev/null +++ b/docs/doc_examples/9e3c28d5820c38ea117eb2e9a5061089.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "test", + query: { + rank_feature: { + field: "pagerank", + sigmoid: { + pivot: 7, + exponent: 0.6, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9e563b8d5a7845f644db8d5bbf453eb6.asciidoc b/docs/doc_examples/9e563b8d5a7845f644db8d5bbf453eb6.asciidoc new file mode 100644 index 000000000..0baae0b73 --- /dev/null +++ b/docs/doc_examples/9e563b8d5a7845f644db8d5bbf453eb6.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.synonyms.putSynonym({ + id: "my-synonyms-set", + synonyms_set: [ + { + id: "test-1", + synonyms: "hello, hi", + }, + { + synonyms: "bye, goodbye", + }, + { + id: "test-2", + synonyms: "test => check", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/9e56d79ad9a02b642c361f0b85dd95d7.asciidoc b/docs/doc_examples/9e56d79ad9a02b642c361f0b85dd95d7.asciidoc deleted file mode 100644 index ca673d252..000000000 --- a/docs/doc_examples/9e56d79ad9a02b642c361f0b85dd95d7.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'my_index', - body: { - mappings: { - properties: { - color: { - type: 'keyword' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/9e5ae957fd0663662bfbed9d1effe99e.asciidoc b/docs/doc_examples/9e5ae957fd0663662bfbed9d1effe99e.asciidoc new file mode 100644 index 000000000..51c64f7e8 --- /dev/null +++ b/docs/doc_examples/9e5ae957fd0663662bfbed9d1effe99e.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "my-pipeline", + processors: [ + { + set: { + description: "Set '_routing' to 'geoip.country_iso_code' value", + field: "_routing", + value: "{{{geoip.country_iso_code}}}", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/9e962baf1fb407c21d6c47dcd37cec29.asciidoc b/docs/doc_examples/9e962baf1fb407c21d6c47dcd37cec29.asciidoc new file mode 100644 index 000000000..891d78d79 --- /dev/null +++ b/docs/doc_examples/9e962baf1fb407c21d6c47dcd37cec29.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.renderSearchTemplate({ + source: { + query: { + match: { + message: "{{query_string}}", + }, + }, + from: "{{from}}{{^from}}0{{/from}}", + size: "{{size}}{{^size}}10{{/size}}", + }, + params: { + query_string: "hello world", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9e9717d9108ae1425bfacf71c7c44539.asciidoc b/docs/doc_examples/9e9717d9108ae1425bfacf71c7c44539.asciidoc new file mode 100644 index 000000000..5b700034d --- /dev/null +++ b/docs/doc_examples/9e9717d9108ae1425bfacf71c7c44539.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.indices({ + bytes: "b", + s: "store.size:desc,index:asc", + v: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9eda9c39428b0c2c53cbd8ee7ae0f888.asciidoc b/docs/doc_examples/9eda9c39428b0c2c53cbd8ee7ae0f888.asciidoc new file mode 100644 index 000000000..960e07320 --- /dev/null +++ b/docs/doc_examples/9eda9c39428b0c2c53cbd8ee7ae0f888.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.samlAuthenticate({ + content: + "PHNhbWxwOlJlc3BvbnNlIHhtbG5zOnNhbWxwPSJ1cm46b2FzaXM6bmFtZXM6dGM6U0FNTDoyLjA6cHJvdG9jb2wiIHhtbG5zOnNhbWw9InVybjpvYXNpczpuYW1lczp0YzpTQU1MOjIuMD.....", + ids: [], +}); +console.log(response); +---- diff --git a/docs/doc_examples/9eef31d85ebaf6c27054d7375715dbe0.asciidoc b/docs/doc_examples/9eef31d85ebaf6c27054d7375715dbe0.asciidoc new file mode 100644 index 000000000..3855ad705 --- /dev/null +++ b/docs/doc_examples/9eef31d85ebaf6c27054d7375715dbe0.asciidoc @@ -0,0 +1,45 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.putWatch({ + id: "log_event_watch", + trigger: { + schedule: { + interval: "5m", + }, + }, + input: { + search: { + request: { + indices: "log-events", + body: { + query: { + match: { + status: "error", + }, + }, + }, + }, + }, + }, + condition: { + compare: { + "ctx.payload.hits.total": { + gt: 0, + }, + }, + }, + actions: { + log_hits: { + foreach: "ctx.payload.hits.hits", + max_iterations: 500, + logging: { + text: "Found id {{ctx.payload._id}} with field {{ctx.payload._source.my_field}}", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9f04cc1a0c6cdb3ed2247f1399713767.asciidoc b/docs/doc_examples/9f04cc1a0c6cdb3ed2247f1399713767.asciidoc new file mode 100644 index 000000000..98a85f208 --- /dev/null +++ b/docs/doc_examples/9f04cc1a0c6cdb3ed2247f1399713767.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + tags: { + type: "keyword", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9f0a0029982d9b3423a2a3de1f1b5136.asciidoc b/docs/doc_examples/9f0a0029982d9b3423a2a3de1f1b5136.asciidoc new file mode 100644 index 000000000..8beac4d25 --- /dev/null +++ b/docs/doc_examples/9f0a0029982d9b3423a2a3de1f1b5136.asciidoc @@ -0,0 +1,98 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "museums", + mappings: { + properties: { + location: { + type: "point", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "museums", + refresh: "true", + operations: [ + { + index: { + _id: 1, + }, + }, + { + location: "POINT (491.2350 5237.4081)", + city: "Amsterdam", + name: "NEMO Science Museum", + }, + { + index: { + _id: 2, + }, + }, + { + location: "POINT (490.1618 5236.9219)", + city: "Amsterdam", + name: "Museum Het Rembrandthuis", + }, + { + index: { + _id: 3, + }, + }, + { + location: "POINT (491.4722 5237.1667)", + city: "Amsterdam", + name: "Nederlands Scheepvaartmuseum", + }, + { + index: { + _id: 4, + }, + }, + { + location: "POINT (440.5200 5122.2900)", + city: "Antwerp", + name: "Letterenhuis", + }, + { + index: { + _id: 5, + }, + }, + { + location: "POINT (233.6389 4886.1111)", + city: "Paris", + name: "Musée du Louvre", + }, + { + index: { + _id: 6, + }, + }, + { + location: "POINT (232.7000 4886.0000)", + city: "Paris", + name: "Musée d'Orsay", + }, + ], +}); +console.log(response1); + +const response2 = await client.search({ + index: "museums", + size: 0, + aggs: { + centroid: { + cartesian_centroid: { + field: "location", + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/9f22a0920cc763eefa233ced963d9624.asciidoc b/docs/doc_examples/9f22a0920cc763eefa233ced963d9624.asciidoc new file mode 100644 index 000000000..873439e01 --- /dev/null +++ b/docs/doc_examples/9f22a0920cc763eefa233ced963d9624.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + span_term: { + "user.id": { + term: "kimchy", + boost: 2, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9f286416f1b18940f13cb27ab5c8458e.asciidoc b/docs/doc_examples/9f286416f1b18940f13cb27ab5c8458e.asciidoc new file mode 100644 index 000000000..b3b8507ef --- /dev/null +++ b/docs/doc_examples/9f286416f1b18940f13cb27ab5c8458e.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "keyword", + filter: ["my_pattern_replace_filter"], + }, + }, + filter: { + my_pattern_replace_filter: { + type: "pattern_replace", + pattern: "[£|€]", + replacement: "", + all: false, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9f3341489fefd38c4e439c29f6dcb86c.asciidoc b/docs/doc_examples/9f3341489fefd38c4e439c29f6dcb86c.asciidoc new file mode 100644 index 000000000..173a6c38c --- /dev/null +++ b/docs/doc_examples/9f3341489fefd38c4e439c29f6dcb86c.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "job-candidates", + query: { + terms_set: { + programming_languages: { + terms: ["c++", "java", "php"], + minimum_should_match_script: { + source: "Math.min(params.num_terms, doc['required_matches'].value)", + }, + boost: 1, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9f66b5243050f71ed51bc787a7ac1218.asciidoc b/docs/doc_examples/9f66b5243050f71ed51bc787a7ac1218.asciidoc new file mode 100644 index 000000000..7570912eb --- /dev/null +++ b/docs/doc_examples/9f66b5243050f71ed51bc787a7ac1218.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "index2", + refresh: "true", + operations: [ + { + index: { + _id: "doc1", + }, + }, + { + comment: "run with scissors", + }, + { + index: { + _id: "doc2", + }, + }, + { + comment: "running with scissors", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/9f7671119236423e0e40801ef6485af1.asciidoc b/docs/doc_examples/9f7671119236423e0e40801ef6485af1.asciidoc new file mode 100644 index 000000000..0a1a681c9 --- /dev/null +++ b/docs/doc_examples/9f7671119236423e0e40801ef6485af1.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: ["uppercase"], + text: "the Quick FoX JUMPs", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9f99be2d58c48a6bf8e892aa24604197.asciidoc b/docs/doc_examples/9f99be2d58c48a6bf8e892aa24604197.asciidoc new file mode 100644 index 000000000..7d840f8ed --- /dev/null +++ b/docs/doc_examples/9f99be2d58c48a6bf8e892aa24604197.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.updateDataFrameAnalytics({ + id: "loganalytics", + model_memory_limit: "200mb", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9fa55fc76ec4bd81f372e9389f1da851.asciidoc b/docs/doc_examples/9fa55fc76ec4bd81f372e9389f1da851.asciidoc new file mode 100644 index 000000000..96753281c --- /dev/null +++ b/docs/doc_examples/9fa55fc76ec4bd81f372e9389f1da851.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "my-data-stream", + settings: { + index: { + refresh_interval: "30s", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9fda516a5dc60ba477b970eaad4429db.asciidoc b/docs/doc_examples/9fda516a5dc60ba477b970eaad4429db.asciidoc new file mode 100644 index 000000000..00673e725 --- /dev/null +++ b/docs/doc_examples/9fda516a5dc60ba477b970eaad4429db.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getDataLifecycle({ + name: "my-data-stream*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9feff356f302ea4915347ab71cc4887a.asciidoc b/docs/doc_examples/9feff356f302ea4915347ab71cc4887a.asciidoc new file mode 100644 index 000000000..28f39bf5a --- /dev/null +++ b/docs/doc_examples/9feff356f302ea4915347ab71cc4887a.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.simulateTemplate({ + index_patterns: ["my-index-*"], + composed_of: ["ct2"], + priority: 10, + template: { + settings: { + "index.number_of_replicas": 1, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/9ff9b2a73419a6c82f17a358b4991499.asciidoc b/docs/doc_examples/9ff9b2a73419a6c82f17a358b4991499.asciidoc new file mode 100644 index 000000000..972ad5d5e --- /dev/null +++ b/docs/doc_examples/9ff9b2a73419a6c82f17a358b4991499.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.closePointInTime({ + id: "46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA==", +}); +console.log(response); +---- diff --git a/docs/doc_examples/9ffe41322c095af1b6ea45a79b640a6f.asciidoc b/docs/doc_examples/9ffe41322c095af1b6ea45a79b640a6f.asciidoc new file mode 100644 index 000000000..7742b99dd --- /dev/null +++ b/docs/doc_examples/9ffe41322c095af1b6ea45a79b640a6f.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + span_within: { + little: { + span_term: { + field1: "foo", + }, + }, + big: { + span_near: { + clauses: [ + { + span_term: { + field1: "bar", + }, + }, + { + span_term: { + field1: "baz", + }, + }, + ], + slop: 5, + in_order: true, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a00311843b5f8f3e9f7d511334a828b1.asciidoc b/docs/doc_examples/a00311843b5f8f3e9f7d511334a828b1.asciidoc new file mode 100644 index 000000000..df7650ac2 --- /dev/null +++ b/docs/doc_examples/a00311843b5f8f3e9f7d511334a828b1.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.rollup.getRollupCaps({ + id: "sensor-*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a008f42379930edc354b4074e0a33344.asciidoc b/docs/doc_examples/a008f42379930edc354b4074e0a33344.asciidoc new file mode 100644 index 000000000..3f2438447 --- /dev/null +++ b/docs/doc_examples/a008f42379930edc354b4074e0a33344.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "index", + id: 1, + document: { + designation: "spoon", + price: 13, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a01753fa7b4ba6dc19054f4f42d91cd9.asciidoc b/docs/doc_examples/a01753fa7b4ba6dc19054f4f42d91cd9.asciidoc new file mode 100644 index 000000000..9461dd6d9 --- /dev/null +++ b/docs/doc_examples/a01753fa7b4ba6dc19054f4f42d91cd9.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.renderSearchTemplate({ + source: + '{ "query": { "bool": { "filter": [ { "range": { "@timestamp": { "gte": {{#year_scope}} "now-1y/d" {{/year_scope}} {{^year_scope}} "now-1d/d" {{/year_scope}} , "lt": "now/d" }}}, { "term": { "user.id": "{{user_id}}" }}]}}}', + params: { + year_scope: true, + user_id: "kimchy", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a037beb3d02296e1d36dd43ef5c935dd.asciidoc b/docs/doc_examples/a037beb3d02296e1d36dd43ef5c935dd.asciidoc new file mode 100644 index 000000000..c584944c7 --- /dev/null +++ b/docs/doc_examples/a037beb3d02296e1d36dd43ef5c935dd.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "whitespace", + filter: ["keyword_repeat"], + text: "fox running and jumping", + explain: true, + attributes: "keyword", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a0497157fdefecd04e597edb800a1a95.asciidoc b/docs/doc_examples/a0497157fdefecd04e597edb800a1a95.asciidoc new file mode 100644 index 000000000..2c2bb2bdd --- /dev/null +++ b/docs/doc_examples/a0497157fdefecd04e597edb800a1a95.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + _source: "obj.*", + query: { + match: { + "user.id": "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a04a8d90f8245ff5f30a9983909faa1d.asciidoc b/docs/doc_examples/a04a8d90f8245ff5f30a9983909faa1d.asciidoc new file mode 100644 index 000000000..a035737b6 --- /dev/null +++ b/docs/doc_examples/a04a8d90f8245ff5f30a9983909faa1d.asciidoc @@ -0,0 +1,45 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my_queries1", + settings: { + analysis: { + analyzer: { + wildcard_prefix: { + type: "custom", + tokenizer: "standard", + filter: ["lowercase", "wildcard_edge_ngram"], + }, + }, + filter: { + wildcard_edge_ngram: { + type: "edge_ngram", + min_gram: 1, + max_gram: 32, + }, + }, + }, + }, + mappings: { + properties: { + query: { + type: "percolator", + }, + my_field: { + type: "text", + fields: { + prefix: { + type: "text", + analyzer: "wildcard_prefix", + search_analyzer: "standard", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a0871be90badeecd2f8d8ec90230e248.asciidoc b/docs/doc_examples/a0871be90badeecd2f8d8ec90230e248.asciidoc new file mode 100644 index 000000000..288ddcba7 --- /dev/null +++ b/docs/doc_examples/a0871be90badeecd2f8d8ec90230e248.asciidoc @@ -0,0 +1,43 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "standard", + char_filter: ["my_char_filter"], + filter: ["lowercase"], + }, + }, + char_filter: { + my_char_filter: { + type: "pattern_replace", + pattern: "(?<=\\p{Lower})(?=\\p{Upper})", + replacement: " ", + }, + }, + }, + }, + mappings: { + properties: { + text: { + type: "text", + analyzer: "my_analyzer", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.analyze({ + index: "my-index-000001", + analyzer: "my_analyzer", + text: "The fooBarBaz method", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/a0a7557bb7e2aff7918557cd648f41af.asciidoc b/docs/doc_examples/a0a7557bb7e2aff7918557cd648f41af.asciidoc new file mode 100644 index 000000000..036309628 --- /dev/null +++ b/docs/doc_examples/a0a7557bb7e2aff7918557cd648f41af.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "index", + aggs: { + price_ranges: { + range: { + field: "price", + ranges: [ + { + to: 10, + }, + { + from: 10, + to: 100, + }, + { + from: 100, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a0c64894f14d28b7e0c902add71d2e9a.asciidoc b/docs/doc_examples/a0c64894f14d28b7e0c902add71d2e9a.asciidoc new file mode 100644 index 000000000..de2db6d50 --- /dev/null +++ b/docs/doc_examples/a0c64894f14d28b7e0c902add71d2e9a.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "xpack.profiling.templates.enabled": true, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a0c868282c0514a342ad04998cdc2175.asciidoc b/docs/doc_examples/a0c868282c0514a342ad04998cdc2175.asciidoc new file mode 100644 index 000000000..500a4aa86 --- /dev/null +++ b/docs/doc_examples/a0c868282c0514a342ad04998cdc2175.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.deleteByQuery({ + index: "my-index-000001", + conflicts: "proceed", + query: { + match_all: {}, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a0d53dcb3df938fc0a01d248571a41e4.asciidoc b/docs/doc_examples/a0d53dcb3df938fc0a01d248571a41e4.asciidoc new file mode 100644 index 000000000..d8c564f74 --- /dev/null +++ b/docs/doc_examples/a0d53dcb3df938fc0a01d248571a41e4.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + runtime_mappings: { + "price.discounted": { + type: "double", + script: + "\n double price = doc['price'].value;\n if (doc['product'].value == 'mad max') {\n price *= 0.8;\n }\n emit(price);\n ", + }, + }, + size: 0, + aggs: { + my_buckets: { + composite: { + sources: [ + { + price: { + histogram: { + interval: 5, + field: "price.discounted", + }, + }, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a0f4e902d18460337684d74ea932fbe9.asciidoc b/docs/doc_examples/a0f4e902d18460337684d74ea932fbe9.asciidoc new file mode 100644 index 000000000..8e8910255 --- /dev/null +++ b/docs/doc_examples/a0f4e902d18460337684d74ea932fbe9.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.update({ + index: "test", + id: 1, + doc: { + name: "new_name", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a1070cf2f5969d42d71cda057223f152.asciidoc b/docs/doc_examples/a1070cf2f5969d42d71cda057223f152.asciidoc new file mode 100644 index 000000000..945dcb537 --- /dev/null +++ b/docs/doc_examples/a1070cf2f5969d42d71cda057223f152.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.shards({ + v: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a116949e446f34dc25ae57d4b703d0c1.asciidoc b/docs/doc_examples/a116949e446f34dc25ae57d4b703d0c1.asciidoc deleted file mode 100644 index ddb66dfb9..000000000 --- a/docs/doc_examples/a116949e446f34dc25ae57d4b703d0c1.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - range: { - age: { - gte: 10, - lte: 20, - boost: 2 - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/a1377b32d7fe3680079ae0df73009b0e.asciidoc b/docs/doc_examples/a1377b32d7fe3680079ae0df73009b0e.asciidoc new file mode 100644 index 000000000..619608a67 --- /dev/null +++ b/docs/doc_examples/a1377b32d7fe3680079ae0df73009b0e.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + query: { + term: { + tags: "car", + }, + }, + aggs: { + by_sale: { + nested: { + path: "comments", + }, + aggs: { + by_user: { + terms: { + field: "comments.username", + size: 1, + }, + aggs: { + by_nested: { + top_hits: {}, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a1490f71d705053951870fd2d3bceb39.asciidoc b/docs/doc_examples/a1490f71d705053951870fd2d3bceb39.asciidoc new file mode 100644 index 000000000..19c475da7 --- /dev/null +++ b/docs/doc_examples/a1490f71d705053951870fd2d3bceb39.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + session_data: { + type: "object", + enabled: false, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: "session_1", + document: { + session_data: "foo bar", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/a159143bb578403bb9c7ff37d635d7ad.asciidoc b/docs/doc_examples/a159143bb578403bb9c7ff37d635d7ad.asciidoc new file mode 100644 index 000000000..2f4c0b3b7 --- /dev/null +++ b/docs/doc_examples/a159143bb578403bb9c7ff37d635d7ad.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "whitespace", + filter: [ + { + type: "predicate_token_filter", + script: { + source: "\n token.term.length() > 3\n ", + }, + }, + ], + text: "the fox jumps the lazy dog", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a159e1ce0cba7a35ce44db9bebad22f3.asciidoc b/docs/doc_examples/a159e1ce0cba7a35ce44db9bebad22f3.asciidoc new file mode 100644 index 000000000..668b41dfa --- /dev/null +++ b/docs/doc_examples/a159e1ce0cba7a35ce44db9bebad22f3.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.slm.getLifecycle(); +console.log(response); +---- diff --git a/docs/doc_examples/a162eb50853331c80596f5994e9d1c38.asciidoc b/docs/doc_examples/a162eb50853331c80596f5994e9d1c38.asciidoc new file mode 100644 index 000000000..2979e9a90 --- /dev/null +++ b/docs/doc_examples/a162eb50853331c80596f5994e9d1c38.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.renderQuery({ + name: "my_search_application", + params: { + query_string: "rock climbing", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a180c97f8298fb2388fdcaf7b2e1b81e.asciidoc b/docs/doc_examples/a180c97f8298fb2388fdcaf7b2e1b81e.asciidoc new file mode 100644 index 000000000..d2545c390 --- /dev/null +++ b/docs/doc_examples/a180c97f8298fb2388fdcaf7b2e1b81e.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.slm.putLifecycle({ + policy_id: "nightly-snapshots", + schedule: "0 30 2 * * ?", + name: "", + repository: "my_repository", + config: { + indices: "*", + include_global_state: true, + feature_states: ["kibana", "security"], + }, + retention: { + expire_after: "30d", + min_count: 5, + max_count: 50, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a1879930c1dac36a57d7f094a680420b.asciidoc b/docs/doc_examples/a1879930c1dac36a57d7f094a680420b.asciidoc new file mode 100644 index 000000000..f7f63fea4 --- /dev/null +++ b/docs/doc_examples/a1879930c1dac36a57d7f094a680420b.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "museums", + size: 0, + aggregations: { + "zoomed-in": { + filter: { + geo_bounding_box: { + location: { + top_left: "u17", + bottom_right: "u17", + }, + }, + }, + aggregations: { + zoom1: { + geohash_grid: { + field: "location", + precision: 8, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a197076e0e74951ea88f20309ec257e2.asciidoc b/docs/doc_examples/a197076e0e74951ea88f20309ec257e2.asciidoc new file mode 100644 index 000000000..9b7a0457a --- /dev/null +++ b/docs/doc_examples/a197076e0e74951ea88f20309ec257e2.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "palindrome_list", + settings: { + analysis: { + analyzer: { + whitespace_reverse_first_token: { + tokenizer: "whitespace", + filter: ["reverse_first_token"], + }, + }, + filter: { + reverse_first_token: { + type: "condition", + filter: ["reverse"], + script: { + source: "token.getPosition() === 0", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a1acf454bd6477183ce27ace872deb46.asciidoc b/docs/doc_examples/a1acf454bd6477183ce27ace872deb46.asciidoc new file mode 100644 index 000000000..7bae40744 --- /dev/null +++ b/docs/doc_examples/a1acf454bd6477183ce27ace872deb46.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "test_role7", + indices: [ + { + names: ["*"], + privileges: ["read"], + field_security: { + grant: ["a.*"], + except: ["a.b*"], + }, + }, + ], +}); +console.log(response); + +const response1 = await client.security.putRole({ + name: "test_role8", + indices: [ + { + names: ["*"], + privileges: ["read"], + field_security: { + grant: ["a.b*"], + except: ["a.b.c*"], + }, + }, + ], +}); +console.log(response1); +---- diff --git a/docs/doc_examples/a1b668795243398f5bc40bcc9bead884.asciidoc b/docs/doc_examples/a1b668795243398f5bc40bcc9bead884.asciidoc new file mode 100644 index 000000000..31d504f3c --- /dev/null +++ b/docs/doc_examples/a1b668795243398f5bc40bcc9bead884.asciidoc @@ -0,0 +1,52 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, + }, + }, + mappings: { + properties: { + my_range: { + type: "long_range", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "idx", + id: 1, + document: { + my_range: [ + { + gte: 200, + lte: 300, + }, + { + gte: 1, + lte: 100, + }, + { + gte: 200, + lte: 300, + }, + { + gte: 200, + lte: 500, + }, + ], + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/a1ccd51eef37e43c935a047b0ee15daa.asciidoc b/docs/doc_examples/a1ccd51eef37e43c935a047b0ee15daa.asciidoc new file mode 100644 index 000000000..4d6375dbb --- /dev/null +++ b/docs/doc_examples/a1ccd51eef37e43c935a047b0ee15daa.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.rollover({ + alias: "my-data-stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a1d0603b24a5b048f0959975d8057534.asciidoc b/docs/doc_examples/a1d0603b24a5b048f0959975d8057534.asciidoc new file mode 100644 index 000000000..0d951cbb3 --- /dev/null +++ b/docs/doc_examples/a1d0603b24a5b048f0959975d8057534.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.termvectors({ + index: "my-index-000001", + doc: { + fullname: "John Doe", + text: "test test test", + }, + fields: ["fullname"], + per_field_analyzer: { + fullname: "keyword", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a1db5c822745fe167e9ef854dca3d129.asciidoc b/docs/doc_examples/a1db5c822745fe167e9ef854dca3d129.asciidoc deleted file mode 100644 index f15cf7bf7..000000000 --- a/docs/doc_examples/a1db5c822745fe167e9ef854dca3d129.asciidoc +++ /dev/null @@ -1,26 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - sort: [ - { - _geo_distance: { - 'pin.location': 'drm3btev3e86', - order: 'asc', - unit: 'km' - } - } - ], - query: { - term: { - user: 'kimchy' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/a1dcc6668d13271c8207ff5ff1d35492.asciidoc b/docs/doc_examples/a1dcc6668d13271c8207ff5ff1d35492.asciidoc new file mode 100644 index 000000000..47f0b8b89 --- /dev/null +++ b/docs/doc_examples/a1dcc6668d13271c8207ff5ff1d35492.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.get({ + index: "my-index-000001,my-index-000002", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a1dda7e7c01be96a4acf7b725d70385f.asciidoc b/docs/doc_examples/a1dda7e7c01be96a4acf7b725d70385f.asciidoc new file mode 100644 index 000000000..fffbd6549 --- /dev/null +++ b/docs/doc_examples/a1dda7e7c01be96a4acf7b725d70385f.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "index", + retriever: { + text_similarity_reranker: { + retriever: { + standard: { + query: { + match_phrase: { + text: "landmark in Paris", + }, + }, + }, + }, + field: "text", + inference_id: "my-cohere-rerank-model", + inference_text: "Most famous landmark in Paris", + rank_window_size: 100, + min_score: 0.5, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a1e5884051755b5a5f4d7549f319f4c7.asciidoc b/docs/doc_examples/a1e5884051755b5a5f4d7549f319f4c7.asciidoc new file mode 100644 index 000000000..1442efa7b --- /dev/null +++ b/docs/doc_examples/a1e5884051755b5a5f4d7549f319f4c7.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "products", + mappings: { + properties: { + resellers: { + type: "nested", + properties: { + reseller: { + type: "keyword", + }, + price: { + type: "double", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a1e5f3956f9a697e79478fc9a6e30e1f.asciidoc b/docs/doc_examples/a1e5f3956f9a697e79478fc9a6e30e1f.asciidoc new file mode 100644 index 000000000..3ffa26e73 --- /dev/null +++ b/docs/doc_examples/a1e5f3956f9a697e79478fc9a6e30e1f.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "thai", + text: "การที่ได้ต้องแสดงว่างานดี", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a1f70bc71b763b58206814c40a7440e7.asciidoc b/docs/doc_examples/a1f70bc71b763b58206814c40a7440e7.asciidoc new file mode 100644 index 000000000..e8b0d8042 --- /dev/null +++ b/docs/doc_examples/a1f70bc71b763b58206814c40a7440e7.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_watcher/settings", + body: { + "index.auto_expand_replicas": "0-4", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a21319c9eff1ac47d7fe7490f1ef2efa.asciidoc b/docs/doc_examples/a21319c9eff1ac47d7fe7490f1ef2efa.asciidoc new file mode 100644 index 000000000..51e111829 --- /dev/null +++ b/docs/doc_examples/a21319c9eff1ac47d7fe7490f1ef2efa.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "whitespace", + filter: ["decimal_digit"], + text: "१-one two-२ ३", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a21a7bf052b41f5b996dc58f7b69770f.asciidoc b/docs/doc_examples/a21a7bf052b41f5b996dc58f7b69770f.asciidoc new file mode 100644 index 000000000..3d5c6a62d --- /dev/null +++ b/docs/doc_examples/a21a7bf052b41f5b996dc58f7b69770f.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.setUpgradeMode({ + enabled: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a253a1712953f7292bdd646c48ec7fd2.asciidoc b/docs/doc_examples/a253a1712953f7292bdd646c48ec7fd2.asciidoc new file mode 100644 index 000000000..7ef15ed42 --- /dev/null +++ b/docs/doc_examples/a253a1712953f7292bdd646c48ec7fd2.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + sort: "@timestamp:desc", + size: 1, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a28111cdd9b5aaea96c779cbfbf38780.asciidoc b/docs/doc_examples/a28111cdd9b5aaea96c779cbfbf38780.asciidoc new file mode 100644 index 000000000..9a5a812a7 --- /dev/null +++ b/docs/doc_examples/a28111cdd9b5aaea96c779cbfbf38780.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "czech_example", + settings: { + analysis: { + filter: { + czech_stop: { + type: "stop", + stopwords: "_czech_", + }, + czech_keywords: { + type: "keyword_marker", + keywords: ["příklad"], + }, + czech_stemmer: { + type: "stemmer", + language: "czech", + }, + }, + analyzer: { + rebuilt_czech: { + tokenizer: "standard", + filter: [ + "lowercase", + "czech_stop", + "czech_keywords", + "czech_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a2a25aad1fea9a541b52ac613c78fb64.asciidoc b/docs/doc_examples/a2a25aad1fea9a541b52ac613c78fb64.asciidoc deleted file mode 100644 index af5b9dea4..000000000 --- a/docs/doc_examples/a2a25aad1fea9a541b52ac613c78fb64.asciidoc +++ /dev/null @@ -1,22 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - query_string: { - fields: [ - 'content', - 'name^5' - ], - query: 'this AND that OR thus', - tie_breaker: 0 - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/a2abd6b6b6b6df7c574a557b5468b5e1.asciidoc b/docs/doc_examples/a2abd6b6b6b6df7c574a557b5468b5e1.asciidoc new file mode 100644 index 000000000..721f3efb9 --- /dev/null +++ b/docs/doc_examples/a2abd6b6b6b6df7c574a557b5468b5e1.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "index2", + mappings: { + properties: { + comment: { + type: "text", + analyzer: "standard", + term_vector: "with_positions_offsets", + fields: { + english: { + type: "text", + analyzer: "english", + term_vector: "with_positions_offsets", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a2b2ce031120dac49b5120b26eea8758.asciidoc b/docs/doc_examples/a2b2ce031120dac49b5120b26eea8758.asciidoc new file mode 100644 index 000000000..ce99834e7 --- /dev/null +++ b/docs/doc_examples/a2b2ce031120dac49b5120b26eea8758.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.indices({ + index: "my-index-*", + v: "true", + s: "index", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a2bab367f0e598ae27a2f4ec82e778e9.asciidoc b/docs/doc_examples/a2bab367f0e598ae27a2f4ec82e778e9.asciidoc new file mode 100644 index 000000000..c88eacc13 --- /dev/null +++ b/docs/doc_examples/a2bab367f0e598ae27a2f4ec82e778e9.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.rollup.putJob({ + id: "sensor", + index_pattern: "sensor-*", + rollup_index: "sensor_rollup", + cron: "0 0 * * * *", + page_size: 1000, + groups: { + date_histogram: { + field: "timestamp", + fixed_interval: "60m", + }, + terms: { + fields: ["node"], + }, + }, + metrics: [ + { + field: "temperature", + metrics: ["min", "max", "sum"], + }, + { + field: "voltage", + metrics: ["avg"], + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/a2bd0782aadfd0a902d7f590ee7f49fe.asciidoc b/docs/doc_examples/a2bd0782aadfd0a902d7f590ee7f49fe.asciidoc new file mode 100644 index 000000000..1432ddcbb --- /dev/null +++ b/docs/doc_examples/a2bd0782aadfd0a902d7f590ee7f49fe.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index", + mappings: { + properties: { + content_embedding: { + type: "sparse_vector", + }, + content: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a2c3e284354e8d49cf51bb8dd5ef3613.asciidoc b/docs/doc_examples/a2c3e284354e8d49cf51bb8dd5ef3613.asciidoc new file mode 100644 index 000000000..98b41d8fb --- /dev/null +++ b/docs/doc_examples/a2c3e284354e8d49cf51bb8dd5ef3613.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.upgradeTransforms(); +console.log(response); +---- diff --git a/docs/doc_examples/a2dabdcbb661e7690166ae6d0de27e46.asciidoc b/docs/doc_examples/a2dabdcbb661e7690166ae6d0de27e46.asciidoc new file mode 100644 index 000000000..5aa8a5c4d --- /dev/null +++ b/docs/doc_examples/a2dabdcbb661e7690166ae6d0de27e46.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.fieldCaps({ + index: "trips", + fields: "route_*,transit_mode", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a322c8c73d6f2f5e1e375588ed20b636.asciidoc b/docs/doc_examples/a322c8c73d6f2f5e1e375588ed20b636.asciidoc new file mode 100644 index 000000000..58862e58f --- /dev/null +++ b/docs/doc_examples/a322c8c73d6f2f5e1e375588ed20b636.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "remote-search", + indices: [ + { + names: ["target-indices"], + privileges: ["read", "read_cross_cluster"], + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/a325f31e94fb1e8739258910593504a8.asciidoc b/docs/doc_examples/a325f31e94fb1e8739258910593504a8.asciidoc new file mode 100644 index 000000000..47b557307 --- /dev/null +++ b/docs/doc_examples/a325f31e94fb1e8739258910593504a8.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "facilitator-role", + cluster: ["manage_oidc", "manage_token"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/a3464bd6f0a61623562162859566b078.asciidoc b/docs/doc_examples/a3464bd6f0a61623562162859566b078.asciidoc new file mode 100644 index 000000000..088a60324 --- /dev/null +++ b/docs/doc_examples/a3464bd6f0a61623562162859566b078.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.follow({ + index: "kibana_sample_data_ecommerce2", + wait_for_active_shards: 1, + remote_cluster: "clusterA", + leader_index: "kibana_sample_data_ecommerce", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a34d70d7022eb4ba48909d440c80390f.asciidoc b/docs/doc_examples/a34d70d7022eb4ba48909d440c80390f.asciidoc new file mode 100644 index 000000000..6202751e6 --- /dev/null +++ b/docs/doc_examples/a34d70d7022eb4ba48909d440c80390f.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: ",,", + query: { + match: { + test: "data", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a34e758e019f563d323ca90ad9fd6e3e.asciidoc b/docs/doc_examples/a34e758e019f563d323ca90ad9fd6e3e.asciidoc new file mode 100644 index 000000000..fff9d8917 --- /dev/null +++ b/docs/doc_examples/a34e758e019f563d323ca90ad9fd6e3e.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getAlias({ + index: "my-data-stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a3646b59da66b9ab68bdbc8dc2e6a9be.asciidoc b/docs/doc_examples/a3646b59da66b9ab68bdbc8dc2e6a9be.asciidoc new file mode 100644 index 000000000..abe4e885c --- /dev/null +++ b/docs/doc_examples/a3646b59da66b9ab68bdbc8dc2e6a9be.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "restaurants", + retriever: { + standard: { + query: { + bool: { + should: [ + { + match: { + region: "Austria", + }, + }, + ], + filter: [ + { + term: { + year: "2019", + }, + }, + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a3779f21f132787c48681bfb50453592.asciidoc b/docs/doc_examples/a3779f21f132787c48681bfb50453592.asciidoc new file mode 100644 index 000000000..aa591634b --- /dev/null +++ b/docs/doc_examples/a3779f21f132787c48681bfb50453592.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "ip_location", + description: "Add ip geolocation info", + processors: [ + { + ip_location: { + field: "ip", + }, + }, + ], +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: "my_id", + pipeline: "ip_location", + document: { + ip: "89.160.20.128", + }, +}); +console.log(response1); + +const response2 = await client.get({ + index: "my-index-000001", + id: "my_id", +}); +console.log(response2); +---- diff --git a/docs/doc_examples/a38f29375eabd0103f8d7c00b17bb0ab.asciidoc b/docs/doc_examples/a38f29375eabd0103f8d7c00b17bb0ab.asciidoc new file mode 100644 index 000000000..f3456d0d4 --- /dev/null +++ b/docs/doc_examples/a38f29375eabd0103f8d7c00b17bb0ab.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.health(); +console.log(response); +---- diff --git a/docs/doc_examples/a3a14f7f0e80725f695a901a7e1d579d.asciidoc b/docs/doc_examples/a3a14f7f0e80725f695a901a7e1d579d.asciidoc new file mode 100644 index 000000000..c0eaf8801 --- /dev/null +++ b/docs/doc_examples/a3a14f7f0e80725f695a901a7e1d579d.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "keyword", + filter: ["trim"], + text: " fox ", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a3a2856ac2338a624a1fa5f31aec4db4.asciidoc b/docs/doc_examples/a3a2856ac2338a624a1fa5f31aec4db4.asciidoc new file mode 100644 index 000000000..31dec62e6 --- /dev/null +++ b/docs/doc_examples/a3a2856ac2338a624a1fa5f31aec4db4.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.createApiKey({ + name: "my-api-key", + role_descriptors: {}, + metadata: { + application: "myapp", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a3a64d568fe93a22b042a8b31b9905b0.asciidoc b/docs/doc_examples/a3a64d568fe93a22b042a8b31b9905b0.asciidoc new file mode 100644 index 000000000..932cd80ac --- /dev/null +++ b/docs/doc_examples/a3a64d568fe93a22b042a8b31b9905b0.asciidoc @@ -0,0 +1,43 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.simulate({ + verbose: "true", + pipeline: { + description: "_description", + processors: [ + { + set: { + field: "field2", + value: "_value2", + }, + }, + { + set: { + field: "field3", + value: "_value3", + }, + }, + ], + }, + docs: [ + { + _index: "index", + _id: "id", + _source: { + foo: "bar", + }, + }, + { + _index: "index", + _id: "id", + _source: { + foo: "rab", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/a3c8f474b0700711a356682f37e62b39.asciidoc b/docs/doc_examples/a3c8f474b0700711a356682f37e62b39.asciidoc new file mode 100644 index 000000000..b99d50485 --- /dev/null +++ b/docs/doc_examples/a3c8f474b0700711a356682f37e62b39.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "azure-ai-studio-embeddings", + mappings: { + properties: { + content_embedding: { + type: "dense_vector", + dims: 1536, + element_type: "float", + similarity: "dot_product", + }, + content: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a3ce0cfe2176f3d8a36959a5916995f0.asciidoc b/docs/doc_examples/a3ce0cfe2176f3d8a36959a5916995f0.asciidoc new file mode 100644 index 000000000..82ca0c679 --- /dev/null +++ b/docs/doc_examples/a3ce0cfe2176f3d8a36959a5916995f0.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.tasks.list({ + group_by: "none", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a3cfd350c73a104b99a998c6be931408.asciidoc b/docs/doc_examples/a3cfd350c73a104b99a998c6be931408.asciidoc new file mode 100644 index 000000000..5c3d07b1c --- /dev/null +++ b/docs/doc_examples/a3cfd350c73a104b99a998c6be931408.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.state({ + metric: "blocks", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a3d13833714f9bb918e5e0f62a49bd0e.asciidoc b/docs/doc_examples/a3d13833714f9bb918e5e0f62a49bd0e.asciidoc new file mode 100644 index 000000000..43d3aadd4 --- /dev/null +++ b/docs/doc_examples/a3d13833714f9bb918e5e0f62a49bd0e.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "ip_addresses", + size: 0, + aggs: { + ip_ranges: { + ip_range: { + field: "ip", + ranges: [ + { + to: "10.0.0.5", + }, + { + from: "10.0.0.5", + }, + ], + keyed: true, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a3d943ac9d45b4eff4aa0c679b4eceb3.asciidoc b/docs/doc_examples/a3d943ac9d45b4eff4aa0c679b4eceb3.asciidoc new file mode 100644 index 000000000..2a294fe3f --- /dev/null +++ b/docs/doc_examples/a3d943ac9d45b4eff4aa0c679b4eceb3.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.danglingIndices.importDanglingIndex({ + index_uuid: "", + accept_data_loss: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a3e79d6c626a490341c5b731acbb4a5d.asciidoc b/docs/doc_examples/a3e79d6c626a490341c5b731acbb4a5d.asciidoc new file mode 100644 index 000000000..869dc012e --- /dev/null +++ b/docs/doc_examples/a3e79d6c626a490341c5b731acbb4a5d.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.existsSource({ + index: "my-index-000001", + id: 1, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a3f19f3787cb331f230cdac67ff578e8.asciidoc b/docs/doc_examples/a3f19f3787cb331f230cdac67ff578e8.asciidoc new file mode 100644 index 000000000..457176790 --- /dev/null +++ b/docs/doc_examples/a3f19f3787cb331f230cdac67ff578e8.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + tags: { + significant_terms: { + field: "tags", + execution_hint: "map", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a3f3c1f3f31dbd225da5fd14633bc4a0.asciidoc b/docs/doc_examples/a3f3c1f3f31dbd225da5fd14633bc4a0.asciidoc new file mode 100644 index 000000000..3d954b8f4 --- /dev/null +++ b/docs/doc_examples/a3f3c1f3f31dbd225da5fd14633bc4a0.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.get({ + index: "users", + id: 0, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a3f56fa16c6cc67c2db31a4ba9ca11a7.asciidoc b/docs/doc_examples/a3f56fa16c6cc67c2db31a4ba9ca11a7.asciidoc new file mode 100644 index 000000000..07806845c --- /dev/null +++ b/docs/doc_examples/a3f56fa16c6cc67c2db31a4ba9ca11a7.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.enrich.putPolicy({ + name: "networks-policy", + range: { + indices: "networks", + match_field: "range", + enrich_fields: ["name", "department"], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a3f66deb467df86edbf66e1dca31da51.asciidoc b/docs/doc_examples/a3f66deb467df86edbf66e1dca31da51.asciidoc new file mode 100644 index 000000000..56afe492c --- /dev/null +++ b/docs/doc_examples/a3f66deb467df86edbf66e1dca31da51.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "music", + _source: "suggest", + suggest: { + "song-suggest": { + prefix: "nir", + completion: { + field: "suggest", + size: 5, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a412fe22a74900c72434391ed75139dc.asciidoc b/docs/doc_examples/a412fe22a74900c72434391ed75139dc.asciidoc new file mode 100644 index 000000000..868cabb01 --- /dev/null +++ b/docs/doc_examples/a412fe22a74900c72434391ed75139dc.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "museums", + size: 0, + aggregations: { + "zoomed-in": { + filter: { + geo_bounding_box: { + location: { + top_left: "POINT (4.9 52.4)", + bottom_right: "POINT (5.0 52.3)", + }, + }, + }, + aggregations: { + zoom1: { + geohex_grid: { + field: "location", + precision: 12, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a425fcab60f603504becee7d001f0a4b.asciidoc b/docs/doc_examples/a425fcab60f603504becee7d001f0a4b.asciidoc new file mode 100644 index 000000000..c2c279354 --- /dev/null +++ b/docs/doc_examples/a425fcab60f603504becee7d001f0a4b.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "index_4", + settings: { + "index.priority": 1, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a428d518162918733d49261ffd65cfc1.asciidoc b/docs/doc_examples/a428d518162918733d49261ffd65cfc1.asciidoc new file mode 100644 index 000000000..8d45447a2 --- /dev/null +++ b/docs/doc_examples/a428d518162918733d49261ffd65cfc1.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "custom_unique_example", + settings: { + analysis: { + analyzer: { + standard_truncate: { + tokenizer: "standard", + filter: ["unique"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a42f33e15b0995bb4b6058659bfdea85.asciidoc b/docs/doc_examples/a42f33e15b0995bb4b6058659bfdea85.asciidoc deleted file mode 100644 index 34bd3c81a..000000000 --- a/docs/doc_examples/a42f33e15b0995bb4b6058659bfdea85.asciidoc +++ /dev/null @@ -1,22 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - function_score: { - query: { - match_all: {} - }, - boost: '5', - random_score: {}, - boost_mode: 'multiply' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/a43954d055f042d625a905513821f5f0.asciidoc b/docs/doc_examples/a43954d055f042d625a905513821f5f0.asciidoc new file mode 100644 index 000000000..0ecb3df45 --- /dev/null +++ b/docs/doc_examples/a43954d055f042d625a905513821f5f0.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.search({ + name: "my_search_application", + params: { + knn_field: "image-vector", + query_vector: [-5, 9, -12], + k: 10, + num_candidates: 100, + fields: ["title", "file-type"], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a45244aa3adbf3c793fede100786d1f5.asciidoc b/docs/doc_examples/a45244aa3adbf3c793fede100786d1f5.asciidoc new file mode 100644 index 000000000..7d7c8a339 --- /dev/null +++ b/docs/doc_examples/a45244aa3adbf3c793fede100786d1f5.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sales_over_time: { + auto_date_histogram: { + field: "date", + buckets: 10, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a45605347d6438e7aecdf3b37198616d.asciidoc b/docs/doc_examples/a45605347d6438e7aecdf3b37198616d.asciidoc new file mode 100644 index 000000000..802f482f4 --- /dev/null +++ b/docs/doc_examples/a45605347d6438e7aecdf3b37198616d.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.moveToStep({ + index: "my-index-000001", + current_step: { + phase: "new", + action: "complete", + name: "complete", + }, + next_step: { + phase: "warm", + action: "forcemerge", + name: "forcemerge", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a45810722dc4f468f81b1e8a451d21be.asciidoc b/docs/doc_examples/a45810722dc4f468f81b1e8a451d21be.asciidoc new file mode 100644 index 000000000..faefa6933 --- /dev/null +++ b/docs/doc_examples/a45810722dc4f468f81b1e8a451d21be.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "logger.org.elasticsearch.http.HttpTracer": "TRACE", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a45d80a3fdba70c1b1ba493e51652c8a.asciidoc b/docs/doc_examples/a45d80a3fdba70c1b1ba493e51652c8a.asciidoc new file mode 100644 index 000000000..4ef252dfd --- /dev/null +++ b/docs/doc_examples/a45d80a3fdba70c1b1ba493e51652c8a.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: { + type: "multipoint", + coordinates: [ + [1002, 1002], + [1003, 2000], + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a45eb0cdd138d9c894ca2de9352549a1.asciidoc b/docs/doc_examples/a45eb0cdd138d9c894ca2de9352549a1.asciidoc new file mode 100644 index 000000000..61cfb9a95 --- /dev/null +++ b/docs/doc_examples/a45eb0cdd138d9c894ca2de9352549a1.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.putWatch({ + id: "log_error_watch", + trigger: { + schedule: { + interval: "10s", + }, + }, + input: { + search: { + request: { + indices: ["logs"], + body: { + query: { + match: { + message: "error", + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a46f566ca031375658c22f89b87dc6d2.asciidoc b/docs/doc_examples/a46f566ca031375658c22f89b87dc6d2.asciidoc new file mode 100644 index 000000000..f11302fa4 --- /dev/null +++ b/docs/doc_examples/a46f566ca031375658c22f89b87dc6d2.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.indices({ + index: ".ml-anomalies-custom-example", + v: "true", + h: "index,store.size", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a49169b4622918992411fab4ec48191b.asciidoc b/docs/doc_examples/a49169b4622918992411fab4ec48191b.asciidoc deleted file mode 100644 index d47b04c96..000000000 --- a/docs/doc_examples/a49169b4622918992411fab4ec48191b.asciidoc +++ /dev/null @@ -1,23 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - aggs: { - genres: { - terms: { - field: 'genre', - script: { - source: "'Genre: ' +_value", - lang: 'painless' - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/a49acb27f56fe799a9b1342f85cba0f3.asciidoc b/docs/doc_examples/a49acb27f56fe799a9b1342f85cba0f3.asciidoc new file mode 100644 index 000000000..cda76bd30 --- /dev/null +++ b/docs/doc_examples/a49acb27f56fe799a9b1342f85cba0f3.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "keyword", + filter: ["word_delimiter_graph"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a4a3c3cd09efa75168dab90105afb2e9.asciidoc b/docs/doc_examples/a4a3c3cd09efa75168dab90105afb2e9.asciidoc new file mode 100644 index 000000000..b03688330 --- /dev/null +++ b/docs/doc_examples/a4a3c3cd09efa75168dab90105afb2e9.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.get({ + task_type: "sparse_embedding", + inference_id: "my-elser-model", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a4bae4d956bc0a663f42cfec36bf8e0b.asciidoc b/docs/doc_examples/a4bae4d956bc0a663f42cfec36bf8e0b.asciidoc new file mode 100644 index 000000000..3c2b0c7bf --- /dev/null +++ b/docs/doc_examples/a4bae4d956bc0a663f42cfec36bf8e0b.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "index", + mappings: { + properties: { + price_range: { + type: "keyword", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "index", + id: 1, + document: { + designation: "spoon", + price: 13, + price_range: "10-100", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/a4bd9bf52b4f098838d12bcb8dfc3482.asciidoc b/docs/doc_examples/a4bd9bf52b4f098838d12bcb8dfc3482.asciidoc new file mode 100644 index 000000000..c58479347 --- /dev/null +++ b/docs/doc_examples/a4bd9bf52b4f098838d12bcb8dfc3482.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sales_per_month: { + date_histogram: { + field: "date", + calendar_interval: "month", + }, + aggs: { + sales: { + sum: { + field: "price", + }, + }, + }, + }, + min_monthly_sales: { + min_bucket: { + buckets_path: "sales_per_month>sales", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a4dbd52004f3ab1580eb73997f77dcab.asciidoc b/docs/doc_examples/a4dbd52004f3ab1580eb73997f77dcab.asciidoc new file mode 100644 index 000000000..b7eab397b --- /dev/null +++ b/docs/doc_examples/a4dbd52004f3ab1580eb73997f77dcab.asciidoc @@ -0,0 +1,62 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.putTransform({ + transform_id: "ecommerce-customer-transform", + source: { + index: ["kibana_sample_data_ecommerce"], + query: { + bool: { + filter: { + term: { + currency: "EUR", + }, + }, + }, + }, + }, + pivot: { + group_by: { + customer_id: { + terms: { + field: "customer_id", + }, + }, + }, + aggregations: { + "total_quantity.sum": { + sum: { + field: "total_quantity", + }, + }, + "taxless_total_price.sum": { + sum: { + field: "taxless_total_price", + }, + }, + "total_quantity.max": { + max: { + field: "total_quantity", + }, + }, + "order_id.cardinality": { + cardinality: { + field: "order_id", + }, + }, + }, + }, + dest: { + index: "ecommerce-customers", + }, + retention_policy: { + time: { + field: "order_date", + max_age: "60d", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a4e510aa9145ccedae151c4a6634f0a4.asciidoc b/docs/doc_examples/a4e510aa9145ccedae151c4a6634f0a4.asciidoc new file mode 100644 index 000000000..e51f6e91e --- /dev/null +++ b/docs/doc_examples/a4e510aa9145ccedae151c4a6634f0a4.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: ["stemmer"], + text: "the foxes jumping quickly", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a4ee2214d621bcfaf768c46d21325958.asciidoc b/docs/doc_examples/a4ee2214d621bcfaf768c46d21325958.asciidoc new file mode 100644 index 000000000..1b51b6936 --- /dev/null +++ b/docs/doc_examples/a4ee2214d621bcfaf768c46d21325958.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "text_embedding", + inference_id: "hugging_face_embeddings", + inference_config: { + service: "hugging_face", + service_settings: { + api_key: "", + url: "", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a4f259522b4dc10a0323aff58236c2c2.asciidoc b/docs/doc_examples/a4f259522b4dc10a0323aff58236c2c2.asciidoc new file mode 100644 index 000000000..bcfc419b0 --- /dev/null +++ b/docs/doc_examples/a4f259522b4dc10a0323aff58236c2c2.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "music", + id: 1, + refresh: "true", + document: { + suggest: { + input: ["Nevermind", "Nirvana"], + weight: 34, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a512e4dd8880ce0395937db1bab1d205.asciidoc b/docs/doc_examples/a512e4dd8880ce0395937db1bab1d205.asciidoc new file mode 100644 index 000000000..40db80a05 --- /dev/null +++ b/docs/doc_examples/a512e4dd8880ce0395937db1bab1d205.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "edge_ngram", + text: "Quick Fox", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a520168c1c8b454a8f102d6a13027c73.asciidoc b/docs/doc_examples/a520168c1c8b454a8f102d6a13027c73.asciidoc new file mode 100644 index 000000000..b99501640 --- /dev/null +++ b/docs/doc_examples/a520168c1c8b454a8f102d6a13027c73.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.followInfo({ + index: "follower_index", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a5217a93efabceee9be19949e484f930.asciidoc b/docs/doc_examples/a5217a93efabceee9be19949e484f930.asciidoc new file mode 100644 index 000000000..8566229cb --- /dev/null +++ b/docs/doc_examples/a5217a93efabceee9be19949e484f930.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "music", + id: 1, + refresh: "true", + document: { + suggest: [ + { + input: "Nevermind", + weight: 10, + }, + { + input: "Nirvana", + weight: 3, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a547bb926c25f670078b98fbe67de3cc.asciidoc b/docs/doc_examples/a547bb926c25f670078b98fbe67de3cc.asciidoc new file mode 100644 index 000000000..0a9165a6e --- /dev/null +++ b/docs/doc_examples/a547bb926c25f670078b98fbe67de3cc.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.synonyms.deleteSynonymRule({ + set_id: "my-synonyms-set", + rule_id: "test-1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a56c20a733a350673d41829c8daaafbe.asciidoc b/docs/doc_examples/a56c20a733a350673d41829c8daaafbe.asciidoc new file mode 100644 index 000000000..c577e3576 --- /dev/null +++ b/docs/doc_examples/a56c20a733a350673d41829c8daaafbe.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.autoscaling.putAutoscalingPolicy({ + name: "my_autoscaling_policy", + policy: { + roles: ["data_hot"], + deciders: { + fixed: { + storage: "1tb", + memory: "32gb", + processors: 2.3, + nodes: 8, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a594f05459d9eecc8050c73fc8da336f.asciidoc b/docs/doc_examples/a594f05459d9eecc8050c73fc8da336f.asciidoc new file mode 100644 index 000000000..08570d5c6 --- /dev/null +++ b/docs/doc_examples/a594f05459d9eecc8050c73fc8da336f.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "text_embedding", + inference_id: "azure_openai_embeddings", + inference_config: { + service: "azureopenai", + service_settings: { + api_key: "", + resource_name: "", + deployment_id: "", + api_version: "2024-02-01", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a5a58e8ad66afe831bc295500e3e8739.asciidoc b/docs/doc_examples/a5a58e8ad66afe831bc295500e3e8739.asciidoc new file mode 100644 index 000000000..8a9680f82 --- /dev/null +++ b/docs/doc_examples/a5a58e8ad66afe831bc295500e3e8739.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + hot: { + actions: { + unfollow: {}, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a5a5fb129de2f492e8fd33043a73439c.asciidoc b/docs/doc_examples/a5a5fb129de2f492e8fd33043a73439c.asciidoc new file mode 100644 index 000000000..74eaa913d --- /dev/null +++ b/docs/doc_examples/a5a5fb129de2f492e8fd33043a73439c.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "dictionary_decompound_example", + settings: { + analysis: { + analyzer: { + standard_dictionary_decompound: { + tokenizer: "standard", + filter: ["22_char_dictionary_decompound"], + }, + }, + filter: { + "22_char_dictionary_decompound": { + type: "dictionary_decompounder", + word_list_path: "analysis/example_word_list.txt", + max_subword_size: 22, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a5a7050fb9dcb9574e081957ade28617.asciidoc b/docs/doc_examples/a5a7050fb9dcb9574e081957ade28617.asciidoc deleted file mode 100644 index b9cb57e59..000000000 --- a/docs/doc_examples/a5a7050fb9dcb9574e081957ade28617.asciidoc +++ /dev/null @@ -1,22 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.deleteByQuery({ - index: 'twitter', - refresh: true, - slices: '5', - body: { - query: { - range: { - likes: { - lt: 10 - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/a5aeb2c8bdf91f6146026ec8edc476b6.asciidoc b/docs/doc_examples/a5aeb2c8bdf91f6146026ec8edc476b6.asciidoc new file mode 100644 index 000000000..43cce7452 --- /dev/null +++ b/docs/doc_examples/a5aeb2c8bdf91f6146026ec8edc476b6.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, + }, + }, + mappings: { + properties: { + date: { + type: "date_nanos", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "idx", + id: 1, + document: { + date: ["2015-01-01T12:10:30.000Z", "2014-01-01T12:10:30.000Z"], + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/a5b59f0170a2feaa39e40243fd7ae359.asciidoc b/docs/doc_examples/a5b59f0170a2feaa39e40243fd7ae359.asciidoc new file mode 100644 index 000000000..5ed800291 --- /dev/null +++ b/docs/doc_examples/a5b59f0170a2feaa39e40243fd7ae359.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.put({ + name: "my-example-app", + search_application: { + indices: ["my-example-app"], + template: { + script: { + lang: "mustache", + source: + '\n {\n "query": {\n "bool": {\n "must": [\n {{#query}}\n {\n "query_string": {\n "query": "{{query}}",\n "search_fields": {{#toJson}}search_fields{{/toJson}}\n }\n }\n {{/query}}\n ]\n }\n }\n }\n ', + params: { + query: "", + search_fields: "", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a5dfcfd1cfb3558e7912456669c92eee.asciidoc b/docs/doc_examples/a5dfcfd1cfb3558e7912456669c92eee.asciidoc new file mode 100644 index 000000000..c2645790d --- /dev/null +++ b/docs/doc_examples/a5dfcfd1cfb3558e7912456669c92eee.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.samlPrepareAuthentication({ + realm: "saml1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a5e2b3588258430f2e595abda98e3943.asciidoc b/docs/doc_examples/a5e2b3588258430f2e595abda98e3943.asciidoc new file mode 100644 index 000000000..5dc12ff3d --- /dev/null +++ b/docs/doc_examples/a5e2b3588258430f2e595abda98e3943.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.clearCachedRealms({ + realms: "default_file", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a5e6ad9e65615f6f92ae6a19674dd742.asciidoc b/docs/doc_examples/a5e6ad9e65615f6f92ae6a19674dd742.asciidoc new file mode 100644 index 000000000..312d22490 --- /dev/null +++ b/docs/doc_examples/a5e6ad9e65615f6f92ae6a19674dd742.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + percolate: { + field: "query", + documents: [ + { + message: "Japanse art", + }, + { + message: "Holand culture", + }, + { + message: "Japanese art and Holand culture", + }, + { + message: "no-match", + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a5e6ccfb6019238e6db602373b9af147.asciidoc b/docs/doc_examples/a5e6ccfb6019238e6db602373b9af147.asciidoc new file mode 100644 index 000000000..8ef447ce8 --- /dev/null +++ b/docs/doc_examples/a5e6ccfb6019238e6db602373b9af147.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putDataLifecycle({ + name: "my-data-stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a5e793d82a4455cf4105dac82a156617.asciidoc b/docs/doc_examples/a5e793d82a4455cf4105dac82a156617.asciidoc new file mode 100644 index 000000000..5ffed26d1 --- /dev/null +++ b/docs/doc_examples/a5e793d82a4455cf4105dac82a156617.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.validateQuery({ + index: "my-index-000001", + rewrite: "true", + query: { + more_like_this: { + like: { + _id: "2", + }, + boost_terms: 1, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a5ebcd70c34d1ece77a4fb27cc050917.asciidoc b/docs/doc_examples/a5ebcd70c34d1ece77a4fb27cc050917.asciidoc new file mode 100644 index 000000000..634516cec --- /dev/null +++ b/docs/doc_examples/a5ebcd70c34d1ece77a4fb27cc050917.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "latency", + size: 0, + aggs: { + load_time_ranks: { + percentile_ranks: { + field: "load_time", + values: [500, 600], + keyed: false, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a5f9eb40087921e67d820775acf71522.asciidoc b/docs/doc_examples/a5f9eb40087921e67d820775acf71522.asciidoc new file mode 100644 index 000000000..041de0d6c --- /dev/null +++ b/docs/doc_examples/a5f9eb40087921e67d820775acf71522.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + city: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a60aaed30d7d26eaacbb2c0ed4ddc66d.asciidoc b/docs/doc_examples/a60aaed30d7d26eaacbb2c0ed4ddc66d.asciidoc new file mode 100644 index 000000000..73da91b2d --- /dev/null +++ b/docs/doc_examples/a60aaed30d7d26eaacbb2c0ed4ddc66d.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.cancelMigrateReindex({ + index: "my-data-stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a6169bc057ce8654bd306ff4b062081b.asciidoc b/docs/doc_examples/a6169bc057ce8654bd306ff4b062081b.asciidoc new file mode 100644 index 000000000..da8f00888 --- /dev/null +++ b/docs/doc_examples/a6169bc057ce8654bd306ff4b062081b.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "music", + pretty: "true", + suggest: { + "song-suggest": { + prefix: "nor", + completion: { + field: "suggest", + skip_duplicates: true, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a6204edaa0bcf7b82a89ab4f6bda0914.asciidoc b/docs/doc_examples/a6204edaa0bcf7b82a89ab4f6bda0914.asciidoc new file mode 100644 index 000000000..5fafb3e42 --- /dev/null +++ b/docs/doc_examples/a6204edaa0bcf7b82a89ab4f6bda0914.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.openJob({ + job_id: "low_request_rate", + timeout: "35m", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a62833baf15f2c9ac094a9289e56a012.asciidoc b/docs/doc_examples/a62833baf15f2c9ac094a9289e56a012.asciidoc new file mode 100644 index 000000000..38c0b3ec4 --- /dev/null +++ b/docs/doc_examples/a62833baf15f2c9ac094a9289e56a012.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "timeseries", + document: { + message: "logged the request", + "@timestamp": "1591890611", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a63e0d0504e0c9313814b7f4e2641353.asciidoc b/docs/doc_examples/a63e0d0504e0c9313814b7f4e2641353.asciidoc new file mode 100644 index 000000000..be5a9a106 --- /dev/null +++ b/docs/doc_examples/a63e0d0504e0c9313814b7f4e2641353.asciidoc @@ -0,0 +1,82 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "metrics_index", + mappings: { + properties: { + network: { + properties: { + name: { + type: "keyword", + }, + }, + }, + latency_histo: { + type: "histogram", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "metrics_index", + id: 1, + refresh: "true", + document: { + "network.name": "net-1", + latency_histo: { + values: [1, 3, 8, 12, 15], + counts: [3, 7, 23, 12, 6], + }, + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "metrics_index", + id: 2, + refresh: "true", + document: { + "network.name": "net-2", + latency_histo: { + values: [1, 6, 8, 12, 14], + counts: [8, 17, 8, 7, 6], + }, + }, +}); +console.log(response2); + +const response3 = await client.search({ + index: "metrics_index", + size: 0, + filter_path: "aggregations", + aggs: { + latency_ranges: { + range: { + field: "latency_histo", + ranges: [ + { + to: 2, + }, + { + from: 2, + to: 3, + }, + { + from: 3, + to: 10, + }, + { + from: 10, + }, + ], + }, + }, + }, +}); +console.log(response3); +---- diff --git a/docs/doc_examples/a669e9d56e34c95ef4c780e92ed307f1.asciidoc b/docs/doc_examples/a669e9d56e34c95ef4c780e92ed307f1.asciidoc new file mode 100644 index 000000000..2b4f275e8 --- /dev/null +++ b/docs/doc_examples/a669e9d56e34c95ef4c780e92ed307f1.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.get({ + id: "FjlmbndxNmJjU0RPdExBTGg0elNOOEEaQk9xSjJBQzBRMldZa1VVQ2pPa01YUToxMDY=", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a675fafa7c688cb3ea1be09bf887ebf0.asciidoc b/docs/doc_examples/a675fafa7c688cb3ea1be09bf887ebf0.asciidoc new file mode 100644 index 000000000..2837854a9 --- /dev/null +++ b/docs/doc_examples/a675fafa7c688cb3ea1be09bf887ebf0.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.get({ + index: ".migrated-ds-my-data-stream-2025.01.23-000001", + human: "true", + filter_path: "*.settings.index.version.created_string", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a692b4c0ca7825c467880b346841f5a5.asciidoc b/docs/doc_examples/a692b4c0ca7825c467880b346841f5a5.asciidoc new file mode 100644 index 000000000..04148381f --- /dev/null +++ b/docs/doc_examples/a692b4c0ca7825c467880b346841f5a5.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + name: { + properties: { + first: { + type: "text", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a699189c8d1a7573beeaea768f2fc618.asciidoc b/docs/doc_examples/a699189c8d1a7573beeaea768f2fc618.asciidoc new file mode 100644 index 000000000..c1836d6f8 --- /dev/null +++ b/docs/doc_examples/a699189c8d1a7573beeaea768f2fc618.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.restore({ + repository: "my_repository", + snapshot: "snapshot-20200617", + indices: "kibana_sample_data_flights,.ds-my-data-stream-2022.06.17-000001", + include_aliases: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a69b1ce5cc9528fb3639185eaf241ae3.asciidoc b/docs/doc_examples/a69b1ce5cc9528fb3639185eaf241ae3.asciidoc new file mode 100644 index 000000000..44f85408c --- /dev/null +++ b/docs/doc_examples/a69b1ce5cc9528fb3639185eaf241ae3.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.clearScroll({ + scroll_id: "DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ==", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a6b2815d54df34b6b8d00226e9a1af0c.asciidoc b/docs/doc_examples/a6b2815d54df34b6b8d00226e9a1af0c.asciidoc new file mode 100644 index 000000000..3781991c0 --- /dev/null +++ b/docs/doc_examples/a6b2815d54df34b6b8d00226e9a1af0c.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index", + mappings: { + properties: { + "my_embeddings.predicted_value": { + type: "dense_vector", + dims: 384, + }, + my_text_field: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a6bb306ca250cf651f19cae808b97012.asciidoc b/docs/doc_examples/a6bb306ca250cf651f19cae808b97012.asciidoc new file mode 100644 index 000000000..ab06f18be --- /dev/null +++ b/docs/doc_examples/a6bb306ca250cf651f19cae808b97012.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.get({ + index: "my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a6be6c1cb4a556866fdccb0dee2f1dea.asciidoc b/docs/doc_examples/a6be6c1cb4a556866fdccb0dee2f1dea.asciidoc new file mode 100644 index 000000000..c088b4ada --- /dev/null +++ b/docs/doc_examples/a6be6c1cb4a556866fdccb0dee2f1dea.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.existsTemplate({ + name: "template_1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a6ccac9f80c5e5efdaab992f3a32d919.asciidoc b/docs/doc_examples/a6ccac9f80c5e5efdaab992f3a32d919.asciidoc new file mode 100644 index 000000000..830812cdf --- /dev/null +++ b/docs/doc_examples/a6ccac9f80c5e5efdaab992f3a32d919.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getDataStream({ + name: "dsl-data-stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a6ef8cd8c8218d547727ffc5485bfbd7.asciidoc b/docs/doc_examples/a6ef8cd8c8218d547727ffc5485bfbd7.asciidoc new file mode 100644 index 000000000..5be7e17bc --- /dev/null +++ b/docs/doc_examples/a6ef8cd8c8218d547727ffc5485bfbd7.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + range: { + date_range: { + field: "date", + missing: "1976/11/30", + ranges: [ + { + key: "Older", + to: "2016/02/01", + }, + { + key: "Newer", + from: "2016/02/01", + to: "now/d", + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a6fdd0100cd362df54af6c95d1055c96.asciidoc b/docs/doc_examples/a6fdd0100cd362df54af6c95d1055c96.asciidoc new file mode 100644 index 000000000..c37e742e2 --- /dev/null +++ b/docs/doc_examples/a6fdd0100cd362df54af6c95d1055c96.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getMapping({ + index: "my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a71154ea11a5214f409ecfd118e9b5e3.asciidoc b/docs/doc_examples/a71154ea11a5214f409ecfd118e9b5e3.asciidoc new file mode 100644 index 000000000..176ffa5d0 --- /dev/null +++ b/docs/doc_examples/a71154ea11a5214f409ecfd118e9b5e3.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.samlInvalidate({ + query: + "SAMLRequest=nZFda4MwFIb%2FiuS%2BmviRpqFaClKQdbvo2g12M2KMraCJ9cRR9utnW4Wyi13sMie873MeznJ1aWrnS3VQGR0j4mLkKC1NUeljjA77zYyhVbIE0dR%2By7fmaHq7U%2BdegXWGpAZ%2B%2F4pR32luBFTAtWgUcCv56%2Fp5y30X87Yz1khTIycdgpUW9kY7WdsC9zxoXTvMvWuVV98YyMnSGH2SYE5pwALBIr9QKiwDGpW0oGVUznGeMyJZKFkQ4jBf5HnhUymjIhzCAL3KNFihbYx8TBYzzGaY7EnIyZwHzCWMfiDnbRIftkSjJr%2BFu0e9v%2B0EgOquRiiZjKpiVFp6j50T4WXoyNJ%2FEWC9fdqc1t%2F1%2B2F3aUpjzhPiXpqMz1%2FHSn4A&SigAlg=http%3A%2F%2Fwww.w3.org%2F2001%2F04%2Fxmldsig-more%23rsa-sha256&Signature=MsAYz2NFdovMG2mXf6TSpu5vlQQyEJAg%2B4KCwBqJTmrb3yGXKUtIgvjqf88eCAK32v3eN8vupjPC8LglYmke1ZnjK0%2FKxzkvSjTVA7mMQe2AQdKbkyC038zzRq%2FYHcjFDE%2Bz0qISwSHZY2NyLePmwU7SexEXnIz37jKC6NMEhus%3D", + realm: "saml1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a72613de3774571ba24def4b495161b5.asciidoc b/docs/doc_examples/a72613de3774571ba24def4b495161b5.asciidoc new file mode 100644 index 000000000..453cf9649 --- /dev/null +++ b/docs/doc_examples/a72613de3774571ba24def4b495161b5.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-index-000001", + properties: { + user_id: { + type: "alias", + path: "user_identifier", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a735081e715d385b4d471eea0f2b57da.asciidoc b/docs/doc_examples/a735081e715d385b4d471eea0f2b57da.asciidoc new file mode 100644 index 000000000..7f1c054ed --- /dev/null +++ b/docs/doc_examples/a735081e715d385b4d471eea0f2b57da.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "slm.retention_schedule": "0 30 1 * * ?", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a73a9a6f19516b8ead63182a9ae5b540.asciidoc b/docs/doc_examples/a73a9a6f19516b8ead63182a9ae5b540.asciidoc new file mode 100644 index 000000000..7ffc8de28 --- /dev/null +++ b/docs/doc_examples/a73a9a6f19516b8ead63182a9ae5b540.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: + "MULTILINESTRING ((1002.0 200.0, 1003.0 200.0, 1003.0 300.0, 1002.0 300.0), (1000.0 100.0, 1001.0 100.0, 1001.0 100.0, 1000.0 100.0), (1000.2 0.2, 1000.8 100.2, 1000.8 100.8, 1000.2 100.8))", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a75765e3fb130421dde6c3c2f12e8acb.asciidoc b/docs/doc_examples/a75765e3fb130421dde6c3c2f12e8acb.asciidoc new file mode 100644 index 000000000..7faea78a5 --- /dev/null +++ b/docs/doc_examples/a75765e3fb130421dde6c3c2f12e8acb.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_connector/_sync_job/my-connector-sync-job-id/_claim", + body: { + worker_hostname: "some-machine", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a769d696bf12f5e9de4b3250646d250c.asciidoc b/docs/doc_examples/a769d696bf12f5e9de4b3250646d250c.asciidoc new file mode 100644 index 000000000..6bce3ee50 --- /dev/null +++ b/docs/doc_examples/a769d696bf12f5e9de4b3250646d250c.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + wait_for_completion: "false", + source: { + index: "test-data", + size: 50, + }, + dest: { + index: "alibabacloud-ai-search-embeddings", + pipeline: "alibabacloud_ai_search_embeddings_pipeline", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a78dfb844d385405d4b0fb0e09b4a5a4.asciidoc b/docs/doc_examples/a78dfb844d385405d4b0fb0e09b4a5a4.asciidoc new file mode 100644 index 000000000..998774ae8 --- /dev/null +++ b/docs/doc_examples/a78dfb844d385405d4b0fb0e09b4a5a4.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.update({ + index: "test", + id: 1, + script: "ctx._source['my-object'].remove('my-subfield')", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a799477dff04578b200788a63f9cff71.asciidoc b/docs/doc_examples/a799477dff04578b200788a63f9cff71.asciidoc new file mode 100644 index 000000000..23f5982b1 --- /dev/null +++ b/docs/doc_examples/a799477dff04578b200788a63f9cff71.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "ip_addresses", + size: 0, + aggs: { + ip_ranges: { + ip_range: { + field: "ip", + ranges: [ + { + key: "infinity", + to: "10.0.0.5", + }, + { + key: "and-beyond", + from: "10.0.0.5", + }, + ], + keyed: true, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a7c15fe6b5779c84ce9a34bf4b2a7ab7.asciidoc b/docs/doc_examples/a7c15fe6b5779c84ce9a34bf4b2a7ab7.asciidoc deleted file mode 100644 index fb7990dd8..000000000 --- a/docs/doc_examples/a7c15fe6b5779c84ce9a34bf4b2a7ab7.asciidoc +++ /dev/null @@ -1,19 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.putMapping({ - index: 'my_index', - body: { - properties: { - my_field: { - type: 'text', - fielddata: true - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/a7cf31f4b907e4c00132aca75f55790c.asciidoc b/docs/doc_examples/a7cf31f4b907e4c00132aca75f55790c.asciidoc new file mode 100644 index 000000000..6fbda91b6 --- /dev/null +++ b/docs/doc_examples/a7cf31f4b907e4c00132aca75f55790c.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.deletePipeline({ + id: "pipeline-one", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a7d814caf2a995d2aeadecc3495011be.asciidoc b/docs/doc_examples/a7d814caf2a995d2aeadecc3495011be.asciidoc new file mode 100644 index 000000000..782a1c3cd --- /dev/null +++ b/docs/doc_examples/a7d814caf2a995d2aeadecc3495011be.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, + }, + }, + mappings: { + properties: { + bool: { + type: "boolean", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "idx", + id: 1, + document: { + bool: [true, false, true, false], + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/a7e58d4dc477a84c1306fd5749aafd8b.asciidoc b/docs/doc_examples/a7e58d4dc477a84c1306fd5749aafd8b.asciidoc new file mode 100644 index 000000000..b7b0aec7a --- /dev/null +++ b/docs/doc_examples/a7e58d4dc477a84c1306fd5749aafd8b.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + age: { + type: "integer", + }, + email: { + type: "keyword", + }, + name: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a7fb1c0d0827d66bfa66016f2564b10c.asciidoc b/docs/doc_examples/a7fb1c0d0827d66bfa66016f2564b10c.asciidoc new file mode 100644 index 000000000..7a38525fe --- /dev/null +++ b/docs/doc_examples/a7fb1c0d0827d66bfa66016f2564b10c.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-data-stream", + query: + '\n process where process.name == "regsvr32.exe" and process.command_line.keyword != null\n ', +}); +console.log(response); +---- diff --git a/docs/doc_examples/a8019280dab5b04211ae3b21e5e08223.asciidoc b/docs/doc_examples/a8019280dab5b04211ae3b21e5e08223.asciidoc new file mode 100644 index 000000000..8c7d9b215 --- /dev/null +++ b/docs/doc_examples/a8019280dab5b04211ae3b21e5e08223.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.createRepository({ + name: "my_fs_backup", + repository: { + type: "fs", + settings: { + location: "My_fs_backup_location", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a80f5db4357bb25b8704d374c18318ed.asciidoc b/docs/doc_examples/a80f5db4357bb25b8704d374c18318ed.asciidoc deleted file mode 100644 index 01c0eda5a..000000000 --- a/docs/doc_examples/a80f5db4357bb25b8704d374c18318ed.asciidoc +++ /dev/null @@ -1,19 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'my_index', - pretty: true, - body: { - query: { - match: { - full_text: 'Quick Brown Foxes!' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/a810da963d3b28d79dcd17be829bb271.asciidoc b/docs/doc_examples/a810da963d3b28d79dcd17be829bb271.asciidoc new file mode 100644 index 000000000..d242bfba4 --- /dev/null +++ b/docs/doc_examples/a810da963d3b28d79dcd17be829bb271.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + match: { + "user.id": "kimchy", + }, + }, + docvalue_fields: [ + "user.id", + "http.response.*", + { + field: "date", + format: "epoch_millis", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/a811b82ba4632bdd9065829085188bc9.asciidoc b/docs/doc_examples/a811b82ba4632bdd9065829085188bc9.asciidoc new file mode 100644 index 000000000..175e7cf13 --- /dev/null +++ b/docs/doc_examples/a811b82ba4632bdd9065829085188bc9.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.get({ + repository: "my_repository", + snapshot: "my_snapshot", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a84bc239eb2f607e8bed1fdb70d63823.asciidoc b/docs/doc_examples/a84bc239eb2f607e8bed1fdb70d63823.asciidoc new file mode 100644 index 000000000..6074505da --- /dev/null +++ b/docs/doc_examples/a84bc239eb2f607e8bed1fdb70d63823.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.autoscaling.putAutoscalingPolicy({ + name: "my_autoscaling_policy", + policy: { + roles: ["data_hot"], + deciders: { + proactive_storage: { + forecast_window: "10m", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a861a89f52008610e813b9f073951c58.asciidoc b/docs/doc_examples/a861a89f52008610e813b9f073951c58.asciidoc new file mode 100644 index 000000000..8199b57eb --- /dev/null +++ b/docs/doc_examples/a861a89f52008610e813b9f073951c58.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.stats({ + metric: "merge,refresh", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a89052bcdfe40e604a98d12be6ae59d2.asciidoc b/docs/doc_examples/a89052bcdfe40e604a98d12be6ae59d2.asciidoc new file mode 100644 index 000000000..a7161f96f --- /dev/null +++ b/docs/doc_examples/a89052bcdfe40e604a98d12be6ae59d2.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: "BBOX (100.0, 102.0, 2.0, 0.0)", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a8add749c3f41ad1308a45308df14103.asciidoc b/docs/doc_examples/a8add749c3f41ad1308a45308df14103.asciidoc new file mode 100644 index 000000000..4246c611a --- /dev/null +++ b/docs/doc_examples/a8add749c3f41ad1308a45308df14103.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "sales", + id: 1, + refresh: "true", + document: { + tags: ["car", "auto"], + comments: [ + { + username: "baddriver007", + comment: "This car could have better brakes", + }, + { + username: "dr_who", + comment: "Where's the autopilot? Can't find it", + }, + { + username: "ilovemotorbikes", + comment: "This car has two extra wheels", + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a8dff54362184b2732b9bd248cf6df8a.asciidoc b/docs/doc_examples/a8dff54362184b2732b9bd248cf6df8a.asciidoc new file mode 100644 index 000000000..e9c4ba6ea --- /dev/null +++ b/docs/doc_examples/a8dff54362184b2732b9bd248cf6df8a.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, + }, + }, + mappings: { + properties: { + my_range: { + type: "integer_range", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "idx", + id: 1, + document: { + my_range: { + lte: 2147483647, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/a9280b55a7284952f604ec7bece712f6.asciidoc b/docs/doc_examples/a9280b55a7284952f604ec7bece712f6.asciidoc new file mode 100644 index 000000000..9dde4fa2f --- /dev/null +++ b/docs/doc_examples/a9280b55a7284952f604ec7bece712f6.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + range: { + voltage_corrected: { + gte: 16, + lte: 20, + boost: 1, + }, + }, + }, + fields: ["voltage_corrected", "node"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/a941fd568f2e20e13df909ab24506073.asciidoc b/docs/doc_examples/a941fd568f2e20e13df909ab24506073.asciidoc new file mode 100644 index 000000000..9803d3ef9 --- /dev/null +++ b/docs/doc_examples/a941fd568f2e20e13df909ab24506073.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.getSettings(); +console.log(response); + +const response1 = await client.cluster.putSettings({ + persistent: { + "xpack.monitoring.collection.enabled": false, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/a9541c64512ebc5fcff2dc48487dc0b7.asciidoc b/docs/doc_examples/a9541c64512ebc5fcff2dc48487dc0b7.asciidoc new file mode 100644 index 000000000..52fedcb9b --- /dev/null +++ b/docs/doc_examples/a9541c64512ebc5fcff2dc48487dc0b7.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.esql.query({ + format: "txt", + query: + "FROM library | KEEP author, name, page_count, release_date | SORT page_count DESC | LIMIT 5", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a9554396506888e392a1aee0ca28e6fc.asciidoc b/docs/doc_examples/a9554396506888e392a1aee0ca28e6fc.asciidoc new file mode 100644 index 000000000..bf07ef824 --- /dev/null +++ b/docs/doc_examples/a9554396506888e392a1aee0ca28e6fc.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.updateAliases({ + actions: [ + { + add: { + index: "my-index-2099.05.06-000001", + alias: "my-alias", + filter: { + bool: { + filter: [ + { + range: { + "@timestamp": { + gte: "now-1d/d", + lt: "now/d", + }, + }, + }, + { + term: { + "user.id": "kimchy", + }, + }, + ], + }, + }, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/a95a123b9f862e52ab1e8f875961c852.asciidoc b/docs/doc_examples/a95a123b9f862e52ab1e8f875961c852.asciidoc new file mode 100644 index 000000000..2a6be34f4 --- /dev/null +++ b/docs/doc_examples/a95a123b9f862e52ab1e8f875961c852.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + indices_boost: [ + { + "my-index-000001": 1.4, + }, + { + "my-index-000002": 1.3, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/a95ae76fca7c3e273e4bd10323b3caa6.asciidoc b/docs/doc_examples/a95ae76fca7c3e273e4bd10323b3caa6.asciidoc new file mode 100644 index 000000000..fbff6cd91 --- /dev/null +++ b/docs/doc_examples/a95ae76fca7c3e273e4bd10323b3caa6.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "azure_openai_embeddings_pipeline", + processors: [ + { + inference: { + model_id: "azure_openai_embeddings", + input_output: { + input_field: "content", + output_field: "content_embedding", + }, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/a960b43e720b4934edb74ab4b085ca77.asciidoc b/docs/doc_examples/a960b43e720b4934edb74ab4b085ca77.asciidoc new file mode 100644 index 000000000..8e7ed5f3d --- /dev/null +++ b/docs/doc_examples/a960b43e720b4934edb74ab4b085ca77.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.connector.list(); +console.log(response); +---- diff --git a/docs/doc_examples/a97aace57c6442bbb90e1e14effbcda3.asciidoc b/docs/doc_examples/a97aace57c6442bbb90e1e14effbcda3.asciidoc new file mode 100644 index 000000000..330a2fead --- /dev/null +++ b/docs/doc_examples/a97aace57c6442bbb90e1e14effbcda3.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.query({ + format: "csv", + query: "SELECT * FROM library ORDER BY page_count DESC", + fetch_size: 5, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a97f984c01fa1d96e6d33a0e8e2cb90f.asciidoc b/docs/doc_examples/a97f984c01fa1d96e6d33a0e8e2cb90f.asciidoc new file mode 100644 index 000000000..b9768bb41 --- /dev/null +++ b/docs/doc_examples/a97f984c01fa1d96e6d33a0e8e2cb90f.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + query: { + type: "percolator", + }, + field: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a985e6b7b2ead9c3f30a9bc97d8b598e.asciidoc b/docs/doc_examples/a985e6b7b2ead9c3f30a9bc97d8b598e.asciidoc new file mode 100644 index 000000000..dc93cfbe7 --- /dev/null +++ b/docs/doc_examples/a985e6b7b2ead9c3f30a9bc97d8b598e.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.fieldCaps({ + fields: "rating,title", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a98692a565904ec0783884d81a7b71fc.asciidoc b/docs/doc_examples/a98692a565904ec0783884d81a7b71fc.asciidoc new file mode 100644 index 000000000..d8bba8dc5 --- /dev/null +++ b/docs/doc_examples/a98692a565904ec0783884d81a7b71fc.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.health({ + v: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a999b5661bebb802bbbfe04faacf1971.asciidoc b/docs/doc_examples/a999b5661bebb802bbbfe04faacf1971.asciidoc new file mode 100644 index 000000000..1102705e5 --- /dev/null +++ b/docs/doc_examples/a999b5661bebb802bbbfe04faacf1971.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + source: { + index: "my-index-2099.10.*", + }, + dest: { + index: "my-index-2099.10", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a99bc141066ef673e35f306157750ec9.asciidoc b/docs/doc_examples/a99bc141066ef673e35f306157750ec9.asciidoc new file mode 100644 index 000000000..1d26fbdef --- /dev/null +++ b/docs/doc_examples/a99bc141066ef673e35f306157750ec9.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "lowercase", + text: "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone.", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a99bf70ae38bdf1c6f350140b25e0422.asciidoc b/docs/doc_examples/a99bf70ae38bdf1c6f350140b25e0422.asciidoc new file mode 100644 index 000000000..42c77b7ae --- /dev/null +++ b/docs/doc_examples/a99bf70ae38bdf1c6f350140b25e0422.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + routing: "my-routing-value", + query: { + match: { + "user.id": "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a9c08023354aa9b9023807962df71d13.asciidoc b/docs/doc_examples/a9c08023354aa9b9023807962df71d13.asciidoc new file mode 100644 index 000000000..1a765c0b6 --- /dev/null +++ b/docs/doc_examples/a9c08023354aa9b9023807962df71d13.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.forcemerge({ + index: "my-index-000001,my-index-000002", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a9d44463dcea3cb0ea4c8f8460cea524.asciidoc b/docs/doc_examples/a9d44463dcea3cb0ea4c8f8460cea524.asciidoc new file mode 100644 index 000000000..c5129e611 --- /dev/null +++ b/docs/doc_examples/a9d44463dcea3cb0ea4c8f8460cea524.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "museums", + size: 0, + aggregations: { + "tiles-in-bounds": { + geohex_grid: { + field: "location", + precision: 12, + bounds: { + top_left: "POINT (4.9 52.4)", + bottom_right: "POINT (5.0 52.3)", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a9dd5cd3f2b31e7c8129ea63bab868b4.asciidoc b/docs/doc_examples/a9dd5cd3f2b31e7c8129ea63bab868b4.asciidoc new file mode 100644 index 000000000..da7520e11 --- /dev/null +++ b/docs/doc_examples/a9dd5cd3f2b31e7c8129ea63bab868b4.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.put({ + name: "my_search_application", + search_application: { + indices: ["index1", "index2"], + template: { + script: { + lang: "mustache", + source: + '\n {\n "query": {\n "bool": {\n "should": [\n {{#elser_fields}}\n {\n "sparse_vector": {\n "field": "ml.inference.{{.}}_expanded.predicted_value",\n "inference_id": "",\n "query": "{{query_string}}"\n }\n },\n {{/elser_fields}}\n ]\n }\n },\n "min_score": "{{min_score}}"\n }\n ', + params: { + query_string: "*", + min_score: "10", + elser_fields: [ + { + name: "title", + }, + { + name: "description", + }, + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a9dd9595e96c307b8c798beaeb571521.asciidoc b/docs/doc_examples/a9dd9595e96c307b8c798beaeb571521.asciidoc new file mode 100644 index 000000000..c7a9de6be --- /dev/null +++ b/docs/doc_examples/a9dd9595e96c307b8c798beaeb571521.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.upgradeJobSnapshot({ + job_id: "low_request_rate", + snapshot_id: 1828371, + timeout: "45m", + wait_for_completion: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/a9f14efc26fdd3c37a71f06c310163d9.asciidoc b/docs/doc_examples/a9f14efc26fdd3c37a71f06c310163d9.asciidoc new file mode 100644 index 000000000..488fb5205 --- /dev/null +++ b/docs/doc_examples/a9f14efc26fdd3c37a71f06c310163d9.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + retriever: { + text_similarity_reranker: { + retriever: { + standard: { + query: { + match: { + text: "How often does the moon hide the sun?", + }, + }, + }, + }, + field: "text", + inference_id: "my-elastic-rerank", + inference_text: "How often does the moon hide the sun?", + rank_window_size: 100, + min_score: 0.5, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/a9fe70387d9c96a07830e1859c57efbb.asciidoc b/docs/doc_examples/a9fe70387d9c96a07830e1859c57efbb.asciidoc new file mode 100644 index 000000000..3f53a8934 --- /dev/null +++ b/docs/doc_examples/a9fe70387d9c96a07830e1859c57efbb.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + number_of_shards: 3, + number_of_replicas: 2, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/aa1771b702f4b771491ba4ab743a9197.asciidoc b/docs/doc_examples/aa1771b702f4b771491ba4ab743a9197.asciidoc new file mode 100644 index 000000000..3ef343b39 --- /dev/null +++ b/docs/doc_examples/aa1771b702f4b771491ba4ab743a9197.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getSettings({ + index: "my-index-000001", + name: "index.number_of_replicas", +}); +console.log(response); +---- diff --git a/docs/doc_examples/aa3284717241ed79d3d1d3bdbbdce598.asciidoc b/docs/doc_examples/aa3284717241ed79d3d1d3bdbbdce598.asciidoc new file mode 100644 index 000000000..7f33df8f8 --- /dev/null +++ b/docs/doc_examples/aa3284717241ed79d3d1d3bdbbdce598.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: ["lowercase"], + text: "THE Quick FoX JUMPs", +}); +console.log(response); +---- diff --git a/docs/doc_examples/aa5c0fa51a3553ce7caa763c3832120d.asciidoc b/docs/doc_examples/aa5c0fa51a3553ce7caa763c3832120d.asciidoc new file mode 100644 index 000000000..753eecfad --- /dev/null +++ b/docs/doc_examples/aa5c0fa51a3553ce7caa763c3832120d.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.slm.putLifecycle({ + policy_id: "monthly-snapshots", + name: "", + schedule: "0 56 23 1 * ?", + repository: "my_repository", + config: { + indices: "*", + include_global_state: true, + }, + retention: { + expire_after: "366d", + min_count: 1, + max_count: 12, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/aa5fbb68d3a8e0d0c894791cb6cf0b13.asciidoc b/docs/doc_examples/aa5fbb68d3a8e0d0c894791cb6cf0b13.asciidoc new file mode 100644 index 000000000..09182d892 --- /dev/null +++ b/docs/doc_examples/aa5fbb68d3a8e0d0c894791cb6cf0b13.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "reverse_example", + settings: { + analysis: { + analyzer: { + whitespace_reverse: { + tokenizer: "whitespace", + filter: ["reverse"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/aa6282d4bc92c753c4bd7a5b166abece.asciidoc b/docs/doc_examples/aa6282d4bc92c753c4bd7a5b166abece.asciidoc new file mode 100644 index 000000000..ee105f278 --- /dev/null +++ b/docs/doc_examples/aa6282d4bc92c753c4bd7a5b166abece.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.startTrainedModelDeployment({ + model_id: "elastic__distilbert-base-uncased-finetuned-conll03-english", + wait_for: "started", + timeout: "1m", +}); +console.log(response); +---- diff --git a/docs/doc_examples/aa676d54a59dee87ecd28bcc1edce59b.asciidoc b/docs/doc_examples/aa676d54a59dee87ecd28bcc1edce59b.asciidoc new file mode 100644 index 000000000..a9bcf22e7 --- /dev/null +++ b/docs/doc_examples/aa676d54a59dee87ecd28bcc1edce59b.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "rerank", + inference_id: "alibabacloud_ai_search_rerank", + inference_config: { + service: "alibabacloud-ai-search", + service_settings: { + api_key: "", + service_id: "ops-bge-reranker-larger", + host: "default-j01.platform-cn-shanghai.opensearch.aliyuncs.com", + workspace: "default", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/aa699ff3234f54d091575a38e859a627.asciidoc b/docs/doc_examples/aa699ff3234f54d091575a38e859a627.asciidoc new file mode 100644 index 000000000..f05fd0730 --- /dev/null +++ b/docs/doc_examples/aa699ff3234f54d091575a38e859a627.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + typed_keys: "true", + aggs: { + "my-agg-name": { + histogram: { + field: "my-field", + interval: 1000, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/aa7cf5df36b867aee5e3314ac4b4fa68.asciidoc b/docs/doc_examples/aa7cf5df36b867aee5e3314ac4b4fa68.asciidoc new file mode 100644 index 000000000..d6cff373f --- /dev/null +++ b/docs/doc_examples/aa7cf5df36b867aee5e3314ac4b4fa68.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.slm.putLifecycle({ + policy_id: "daily-snapshots", + schedule: "0 30 1 * * ?", + name: "", + repository: "my_repository", + config: { + indices: ["data-*", "important"], + ignore_unavailable: false, + include_global_state: false, + }, + retention: { + expire_after: "30d", + min_count: 5, + max_count: 50, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/aa7f62279b487989440d423c1ed4a1c0.asciidoc b/docs/doc_examples/aa7f62279b487989440d423c1ed4a1c0.asciidoc new file mode 100644 index 000000000..d9523d9b3 --- /dev/null +++ b/docs/doc_examples/aa7f62279b487989440d423c1ed4a1c0.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getIndexTemplate({ + name: "*", + filter_path: + "index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/aa814309ad5f1630886ba75255b444f5.asciidoc b/docs/doc_examples/aa814309ad5f1630886ba75255b444f5.asciidoc new file mode 100644 index 000000000..736d58e62 --- /dev/null +++ b/docs/doc_examples/aa814309ad5f1630886ba75255b444f5.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.pendingTasks(); +console.log(response); +---- diff --git a/docs/doc_examples/aaa7a61b07861235fb6e489b946c705c.asciidoc b/docs/doc_examples/aaa7a61b07861235fb6e489b946c705c.asciidoc new file mode 100644 index 000000000..e391c4895 --- /dev/null +++ b/docs/doc_examples/aaa7a61b07861235fb6e489b946c705c.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + version: 2, + version_type: "external", + document: { + user: { + id: "elkbee", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/aab3de5a8a3fefbe012fc2ed50dfe4d6.asciidoc b/docs/doc_examples/aab3de5a8a3fefbe012fc2ed50dfe4d6.asciidoc new file mode 100644 index 000000000..3d38f131d --- /dev/null +++ b/docs/doc_examples/aab3de5a8a3fefbe012fc2ed50dfe4d6.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchableSnapshots.cacheStats(); +console.log(response); +---- diff --git a/docs/doc_examples/aab810de3314d5e11bd564ea096785b8.asciidoc b/docs/doc_examples/aab810de3314d5e11bd564ea096785b8.asciidoc new file mode 100644 index 000000000..5ab6d8d6d --- /dev/null +++ b/docs/doc_examples/aab810de3314d5e11bd564ea096785b8.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "cooking_blog", + query: { + bool: { + filter: [ + { + term: { + "category.keyword": "Breakfast", + }, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/aaba346e0becdf12db13658296e0b8a1.asciidoc b/docs/doc_examples/aaba346e0becdf12db13658296e0b8a1.asciidoc new file mode 100644 index 000000000..528068982 --- /dev/null +++ b/docs/doc_examples/aaba346e0becdf12db13658296e0b8a1.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + "index.number_of_shards": 2, + "index.lifecycle.name": "shrink-index", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/aac5996a8398cc8f7701a063df0b2346.asciidoc b/docs/doc_examples/aac5996a8398cc8f7701a063df0b2346.asciidoc new file mode 100644 index 000000000..f5d21c1c6 --- /dev/null +++ b/docs/doc_examples/aac5996a8398cc8f7701a063df0b2346.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "saml-finance", + roles: ["finance_data"], + enabled: true, + rules: { + all: [ + { + field: { + "realm.name": "saml1", + }, + }, + { + field: { + groups: "finance-team", + }, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/aad7d80990a6a3c391ff555ce09ae9dc.asciidoc b/docs/doc_examples/aad7d80990a6a3c391ff555ce09ae9dc.asciidoc new file mode 100644 index 000000000..bc29bce3b --- /dev/null +++ b/docs/doc_examples/aad7d80990a6a3c391ff555ce09ae9dc.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, + }, + }, + mappings: { + properties: { + f: { + type: "scaled_float", + scaling_factor: 0.01, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "idx", + id: 1, + document: { + f: 123, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/aadf36ae37460a735e06b953b4cee494.asciidoc b/docs/doc_examples/aadf36ae37460a735e06b953b4cee494.asciidoc new file mode 100644 index 000000000..3b1270415 --- /dev/null +++ b/docs/doc_examples/aadf36ae37460a735e06b953b4cee494.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "kibana_sample_data_ecommerce", + runtime_mappings: { + price_range: { + type: "keyword", + script: { + source: + "\n def bucket_start = (long) Math.floor(doc['taxful_total_price'].value / 50) * 50;\n def bucket_end = bucket_start + 50;\n emit(bucket_start.toString() + \"-\" + bucket_end.toString());\n ", + }, + }, + }, + size: 0, + aggs: { + my_agg: { + frequent_item_sets: { + minimum_set_size: 4, + fields: [ + { + field: "category.keyword", + }, + { + field: "price_range", + }, + { + field: "geoip.city_name", + }, + ], + size: 3, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ab0fd1908c9957cc7f63165c156e48cd.asciidoc b/docs/doc_examples/ab0fd1908c9957cc7f63165c156e48cd.asciidoc new file mode 100644 index 000000000..6cedc3adc --- /dev/null +++ b/docs/doc_examples/ab0fd1908c9957cc7f63165c156e48cd.asciidoc @@ -0,0 +1,56 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + user_id: { + type: "keyword", + }, + last_updated: { + type: "date", + }, + session_data: { + type: "object", + enabled: false, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: "session_1", + document: { + user_id: "kimchy", + session_data: { + arbitrary_object: { + some_array: [ + "foo", + "bar", + { + baz: 2, + }, + ], + }, + }, + last_updated: "2015-12-06T18:20:22", + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "my-index-000001", + id: "session_2", + document: { + user_id: "jpountz", + session_data: "none", + last_updated: "2015-12-06T18:22:13", + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/ab1372270c11bcd6f36d1a13e6c69276.asciidoc b/docs/doc_examples/ab1372270c11bcd6f36d1a13e6c69276.asciidoc new file mode 100644 index 000000000..bf78c663c --- /dev/null +++ b/docs/doc_examples/ab1372270c11bcd6f36d1a13e6c69276.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.asyncSearch.submit({ + index: + "my-index-000001,cluster_one:my-index-000001,cluster_two:my-index-000001", + ccs_minimize_roundtrips: "true", + query: { + match: { + "user.id": "kimchy", + }, + }, + _source: ["user.id", "message", "http.response.status_code"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/ab1a989958c1d345a9dc3dd36ad90c27.asciidoc b/docs/doc_examples/ab1a989958c1d345a9dc3dd36ad90c27.asciidoc new file mode 100644 index 000000000..08cd07a68 --- /dev/null +++ b/docs/doc_examples/ab1a989958c1d345a9dc3dd36ad90c27.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: + "POLYGON ((1000.0 1000.0, 1001.0 1000.0, 1001.0 1001.0, 1000.0 1001.0, 1000.0 1000.0), (1000.2 1000.2, 1000.8 1000.2, 1000.8 1000.8, 1000.2 1000.8, 1000.2 1000.2))", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ab24bfdfd8c1c7b3044b21a3b4684370.asciidoc b/docs/doc_examples/ab24bfdfd8c1c7b3044b21a3b4684370.asciidoc new file mode 100644 index 000000000..6a968f8e5 --- /dev/null +++ b/docs/doc_examples/ab24bfdfd8c1c7b3044b21a3b4684370.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + refresh: "true", + document: { + cost_price: 100, + }, +}); +console.log(response); + +const response1 = await client.search({ + index: "my-index-000001", + script_fields: { + sales_price: { + script: { + lang: "expression", + source: "doc['cost_price'] * markup", + params: { + markup: 0.2, + }, + }, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/ab29bfbd35ee482cf54052b03d62cd31.asciidoc b/docs/doc_examples/ab29bfbd35ee482cf54052b03d62cd31.asciidoc new file mode 100644 index 000000000..338d6cf21 --- /dev/null +++ b/docs/doc_examples/ab29bfbd35ee482cf54052b03d62cd31.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "museums", + size: 0, + aggs: { + rings: { + geo_distance: { + field: "location", + origin: "POINT (4.894 52.3760)", + unit: "km", + ranges: [ + { + to: 100, + }, + { + from: 100, + to: 300, + }, + { + from: 300, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ab317aa09c4bd44abbf02517141e37ef.asciidoc b/docs/doc_examples/ab317aa09c4bd44abbf02517141e37ef.asciidoc new file mode 100644 index 000000000..fa9f0fb4c --- /dev/null +++ b/docs/doc_examples/ab317aa09c4bd44abbf02517141e37ef.asciidoc @@ -0,0 +1,42 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + text: { + type: "text", + term_vector: "with_positions_offsets", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + text: "Quick brown fox", + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + query: { + match: { + text: "brown fox", + }, + }, + highlight: { + fields: { + text: {}, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/ab3c36b70459093beafbfd3a7ae75b9b.asciidoc b/docs/doc_examples/ab3c36b70459093beafbfd3a7ae75b9b.asciidoc new file mode 100644 index 000000000..2e5783745 --- /dev/null +++ b/docs/doc_examples/ab3c36b70459093beafbfd3a7ae75b9b.asciidoc @@ -0,0 +1,49 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + refresh: "true", + document: { + date: "2015-10-01T05:30:00Z", + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 2, + refresh: "true", + document: { + date: "2015-10-01T06:30:00Z", + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + size: 0, + aggs: { + my_buckets: { + composite: { + sources: [ + { + date: { + date_histogram: { + field: "date", + calendar_interval: "day", + offset: "+6h", + format: "iso8601", + }, + }, + }, + ], + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/ab8b4537fad80107bc88f633d4039a52.asciidoc b/docs/doc_examples/ab8b4537fad80107bc88f633d4039a52.asciidoc new file mode 100644 index 000000000..54ed0255c --- /dev/null +++ b/docs/doc_examples/ab8b4537fad80107bc88f633d4039a52.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "logs", + aliases: { + "": {}, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ab8de34fcfc0277901cb39618ecfc9d5.asciidoc b/docs/doc_examples/ab8de34fcfc0277901cb39618ecfc9d5.asciidoc new file mode 100644 index 000000000..520c175f2 --- /dev/null +++ b/docs/doc_examples/ab8de34fcfc0277901cb39618ecfc9d5.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.allocationExplain({ + index: "my-index-000001", + shard: 0, + primary: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/abb4a58089574211d434946a923e5725.asciidoc b/docs/doc_examples/abb4a58089574211d434946a923e5725.asciidoc new file mode 100644 index 000000000..705d8bb1f --- /dev/null +++ b/docs/doc_examples/abb4a58089574211d434946a923e5725.asciidoc @@ -0,0 +1,89 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "kibana_sample_data_logs", + size: 0, + aggs: { + client_ip: { + composite: { + sources: [ + { + client_ip: { + terms: { + field: "clientip", + }, + }, + }, + ], + }, + aggs: { + url_dc: { + cardinality: { + field: "url.keyword", + }, + }, + bytes_sum: { + sum: { + field: "bytes", + }, + }, + geo_src_dc: { + cardinality: { + field: "geo.src", + }, + }, + geo_dest_dc: { + cardinality: { + field: "geo.dest", + }, + }, + responses_total: { + value_count: { + field: "timestamp", + }, + }, + success: { + filter: { + term: { + response: "200", + }, + }, + }, + error404: { + filter: { + term: { + response: "404", + }, + }, + }, + error503: { + filter: { + term: { + response: "503", + }, + }, + }, + malicious_client_ip: { + inference: { + model_id: "malicious_clients_model", + buckets_path: { + response_count: "responses_total", + url_dc: "url_dc", + bytes_sum: "bytes_sum", + geo_src_dc: "geo_src_dc", + geo_dest_dc: "geo_dest_dc", + success: "success._count", + error404: "error404._count", + error503: "error503._count", + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/abc280775734daa6cf2c28868e155d10.asciidoc b/docs/doc_examples/abc280775734daa6cf2c28868e155d10.asciidoc new file mode 100644 index 000000000..3ea4cf6b5 --- /dev/null +++ b/docs/doc_examples/abc280775734daa6cf2c28868e155d10.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "exams", + refresh: "true", + document: { + grade: [1, 2, 3], + weight: 2, + }, +}); +console.log(response); + +const response1 = await client.search({ + index: "exams", + size: 0, + aggs: { + weighted_grade: { + weighted_avg: { + value: { + field: "grade", + }, + weight: { + field: "weight", + }, + }, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/abc496de5fd013099a134db369b34a8b.asciidoc b/docs/doc_examples/abc496de5fd013099a134db369b34a8b.asciidoc new file mode 100644 index 000000000..014375370 --- /dev/null +++ b/docs/doc_examples/abc496de5fd013099a134db369b34a8b.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + query: { + constant_score: { + filter: { + match: { + type: "hat", + }, + }, + }, + }, + aggs: { + hat_prices: { + sum: { + field: "price", + missing: 100, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/abc7a670a47516b58b6b07d7497b140c.asciidoc b/docs/doc_examples/abc7a670a47516b58b6b07d7497b140c.asciidoc new file mode 100644 index 000000000..061bd7255 --- /dev/null +++ b/docs/doc_examples/abc7a670a47516b58b6b07d7497b140c.asciidoc @@ -0,0 +1,44 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "index", + query: { + constant_score: { + filter: { + bool: { + should: [ + { + range: { + my_date: { + gte: "now-1h", + lte: "now-1h/m", + }, + }, + }, + { + range: { + my_date: { + gt: "now-1h/m", + lt: "now/m", + }, + }, + }, + { + range: { + my_date: { + gte: "now/m", + lte: "now", + }, + }, + }, + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/abd4fc3ce7784413a56fe2dcfe2809b5.asciidoc b/docs/doc_examples/abd4fc3ce7784413a56fe2dcfe2809b5.asciidoc new file mode 100644 index 000000000..f8ef5375f --- /dev/null +++ b/docs/doc_examples/abd4fc3ce7784413a56fe2dcfe2809b5.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "test", + filter_path: "hits.total", + query: { + match: { + flag: "foo", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/abdbc81e799e28c833556b1c29f03ba6.asciidoc b/docs/doc_examples/abdbc81e799e28c833556b1c29f03ba6.asciidoc new file mode 100644 index 000000000..6963bfe68 --- /dev/null +++ b/docs/doc_examples/abdbc81e799e28c833556b1c29f03ba6.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getUser(); +console.log(response); +---- diff --git a/docs/doc_examples/abf329ebefaf58acd4ee30e685731499.asciidoc b/docs/doc_examples/abf329ebefaf58acd4ee30e685731499.asciidoc deleted file mode 100644 index f319e125b..000000000 --- a/docs/doc_examples/abf329ebefaf58acd4ee30e685731499.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'index_double', - body: { - mappings: { - properties: { - field: { - type: 'double' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/ac22cc2b0f4ad659055feed2852a2d59.asciidoc b/docs/doc_examples/ac22cc2b0f4ad659055feed2852a2d59.asciidoc new file mode 100644 index 000000000..0e3921a75 --- /dev/null +++ b/docs/doc_examples/ac22cc2b0f4ad659055feed2852a2d59.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "retrievers_example", + retriever: { + text_similarity_reranker: { + retriever: { + text_similarity_reranker: { + retriever: { + knn: { + field: "vector", + query_vector: [0.23, 0.67, 0.89], + k: 3, + num_candidates: 5, + }, + }, + rank_window_size: 100, + field: "text", + inference_id: "my-rerank-model", + inference_text: + "What are the state of the art applications of AI in information retrieval?", + }, + }, + rank_window_size: 10, + field: "text", + inference_id: "my-other-more-expensive-rerank-model", + inference_text: + "Applications of Large Language Models in technology and their impact on user satisfaction", + }, + }, + _source: false, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ac366b9dda7040e743dee85335354094.asciidoc b/docs/doc_examples/ac366b9dda7040e743dee85335354094.asciidoc new file mode 100644 index 000000000..67c8ca406 --- /dev/null +++ b/docs/doc_examples/ac366b9dda7040e743dee85335354094.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "whitespace", + filter: [ + { + type: "shingle", + min_shingle_size: 2, + max_shingle_size: 3, + }, + ], + text: "quick brown fox jumps", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ac483996d479946d57c374c3a86b2621.asciidoc b/docs/doc_examples/ac483996d479946d57c374c3a86b2621.asciidoc new file mode 100644 index 000000000..efaf6b96b --- /dev/null +++ b/docs/doc_examples/ac483996d479946d57c374c3a86b2621.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + my_field: { + type: "search_as_you_type", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ac497917ef707538198a8458ae3d5c6b.asciidoc b/docs/doc_examples/ac497917ef707538198a8458ae3d5c6b.asciidoc new file mode 100644 index 000000000..f89e8cf0d --- /dev/null +++ b/docs/doc_examples/ac497917ef707538198a8458ae3d5c6b.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match: { + message: "this is a test", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ac544eb247a29ca42aab13826ca88561.asciidoc b/docs/doc_examples/ac544eb247a29ca42aab13826ca88561.asciidoc deleted file mode 100644 index 6ea5217a4..000000000 --- a/docs/doc_examples/ac544eb247a29ca42aab13826ca88561.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.update({ - index: 'test', - id: '1', - body: { - script: { - source: 'if (ctx._source.tags.contains(params.tag)) { ctx._source.tags.remove(ctx._source.tags.indexOf(params.tag)) }', - lang: 'painless', - params: { - tag: 'blue' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/ac5b91aa75696f9880451c9439fd9eec.asciidoc b/docs/doc_examples/ac5b91aa75696f9880451c9439fd9eec.asciidoc new file mode 100644 index 000000000..08b6a99c8 --- /dev/null +++ b/docs/doc_examples/ac5b91aa75696f9880451c9439fd9eec.asciidoc @@ -0,0 +1,44 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, + }, + }, + mappings: { + properties: { + my_range: { + type: "date_range", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "idx", + id: 1, + document: { + my_range: [ + { + gte: 1504224000000, + lte: 1504569600000, + }, + { + gte: "2017-09-01", + lte: "2017-09-10", + }, + ], + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/ac73895ca1882cd1ac65b1facfbb5c63.asciidoc b/docs/doc_examples/ac73895ca1882cd1ac65b1facfbb5c63.asciidoc new file mode 100644 index 000000000..7edc35273 --- /dev/null +++ b/docs/doc_examples/ac73895ca1882cd1ac65b1facfbb5c63.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.deleteByQuery({ + index: "my-index-000001", + query: { + match: { + "user.id": "elkbee", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ac8328bc51fd396b3ce5f7ef3e1e73df.asciidoc b/docs/doc_examples/ac8328bc51fd396b3ce5f7ef3e1e73df.asciidoc new file mode 100644 index 000000000..b0ca64df0 --- /dev/null +++ b/docs/doc_examples/ac8328bc51fd396b3ce5f7ef3e1e73df.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.getRepository(); +console.log(response); +---- diff --git a/docs/doc_examples/ac85e05c0bf2fd5099fbcb9c492f447e.asciidoc b/docs/doc_examples/ac85e05c0bf2fd5099fbcb9c492f447e.asciidoc new file mode 100644 index 000000000..d3ec042d7 --- /dev/null +++ b/docs/doc_examples/ac85e05c0bf2fd5099fbcb9c492f447e.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + flat_settings: "true", + transient: { + "indices.recovery.max_bytes_per_sec": "20mb", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ac9fe9b64891095bcf84066f719b3dc4.asciidoc b/docs/doc_examples/ac9fe9b64891095bcf84066f719b3dc4.asciidoc new file mode 100644 index 000000000..38ee3b575 --- /dev/null +++ b/docs/doc_examples/ac9fe9b64891095bcf84066f719b3dc4.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.createRepository({ + name: "my_src_only_repository", + repository: { + type: "source", + settings: { + delegate_type: "fs", + location: "my_backup_repository", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/acb10091ad335ddd15d71021aaf23c62.asciidoc b/docs/doc_examples/acb10091ad335ddd15d71021aaf23c62.asciidoc new file mode 100644 index 000000000..af0bba0f8 --- /dev/null +++ b/docs/doc_examples/acb10091ad335ddd15d71021aaf23c62.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + track_scores: true, + sort: [ + { + post_date: { + order: "desc", + }, + }, + { + name: "desc", + }, + { + age: "desc", + }, + ], + query: { + term: { + user: "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/acb850c08f51226eadb75be09e336076.asciidoc b/docs/doc_examples/acb850c08f51226eadb75be09e336076.asciidoc new file mode 100644 index 000000000..9c0903785 --- /dev/null +++ b/docs/doc_examples/acb850c08f51226eadb75be09e336076.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.asyncSearch.status({ + id: "FmRldE8zREVEUzA2ZVpUeGs2ejJFUFEaMkZ5QTVrSTZSaVN3WlNFVmtlWHJsdzoxMDc=", +}); +console.log(response); +---- diff --git a/docs/doc_examples/acc44366a9908684b2c8c2b119a4fb2b.asciidoc b/docs/doc_examples/acc44366a9908684b2c8c2b119a4fb2b.asciidoc new file mode 100644 index 000000000..2dcd961c6 --- /dev/null +++ b/docs/doc_examples/acc44366a9908684b2c8c2b119a4fb2b.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + retriever: { + rule: { + retriever: { + standard: { + query: { + query_string: { + query: "puggles", + }, + }, + }, + }, + match_criteria: { + query_string: "puggles", + user_country: "us", + }, + ruleset_ids: ["my-ruleset"], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/acc52da725a996ae696b00d9f818dfde.asciidoc b/docs/doc_examples/acc52da725a996ae696b00d9f818dfde.asciidoc new file mode 100644 index 000000000..17e1e0a8a --- /dev/null +++ b/docs/doc_examples/acc52da725a996ae696b00d9f818dfde.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + index: "file-path-test", + analyzer: "custom_path_tree", + text: "/User/alice/photos/2017/05/16/my_photo1.jpg", +}); +console.log(response); + +const response1 = await client.indices.analyze({ + index: "file-path-test", + analyzer: "custom_path_tree_reversed", + text: "/User/alice/photos/2017/05/16/my_photo1.jpg", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/acc6cd860032167e34fa5e0c043ab3b0.asciidoc b/docs/doc_examples/acc6cd860032167e34fa5e0c043ab3b0.asciidoc new file mode 100644 index 000000000..3224e514b --- /dev/null +++ b/docs/doc_examples/acc6cd860032167e34fa5e0c043ab3b0.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + query_string: { + query: "city.\\*:(this AND that OR thus)", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ad0dcbc7fc619e952c8825b8f307b7b2.asciidoc b/docs/doc_examples/ad0dcbc7fc619e952c8825b8f307b7b2.asciidoc index ceac02596..36fb42740 100644 --- a/docs/doc_examples/ad0dcbc7fc619e952c8825b8f307b7b2.asciidoc +++ b/docs/doc_examples/ad0dcbc7fc619e952c8825b8f307b7b2.asciidoc @@ -4,21 +4,13 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - multi_match: { - query: 'Jon', - type: 'cross_fields', - fields: [ - 'first', - 'first.edge', - 'last', - 'last.edge' - ] - } - } - } -}) -console.log(response) + query: { + multi_match: { + query: "Jon", + type: "cross_fields", + fields: ["first", "first.edge", "last", "last.edge"], + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/ad2416ca0581316cee6c63129685bca5.asciidoc b/docs/doc_examples/ad2416ca0581316cee6c63129685bca5.asciidoc new file mode 100644 index 000000000..8b68bcfdb --- /dev/null +++ b/docs/doc_examples/ad2416ca0581316cee6c63129685bca5.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + query_string: { + fields: ["title", "content"], + query: "this OR that OR thus", + minimum_should_match: 2, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ad2b8aed84c67cdc295917b47a12d3dc.asciidoc b/docs/doc_examples/ad2b8aed84c67cdc295917b47a12d3dc.asciidoc new file mode 100644 index 000000000..3768cd346 --- /dev/null +++ b/docs/doc_examples/ad2b8aed84c67cdc295917b47a12d3dc.asciidoc @@ -0,0 +1,43 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "my-image-index", + refresh: "true", + operations: [ + { + index: { + _id: "1", + }, + }, + { + "image-vector": [1, 5, -20], + "file-type": "jpg", + title: "mountain lake", + }, + { + index: { + _id: "2", + }, + }, + { + "image-vector": [42, 8, -15], + "file-type": "png", + title: "frozen lake", + }, + { + index: { + _id: "3", + }, + }, + { + "image-vector": [15, 11, 23], + "file-type": "jpg", + title: "mountain lake lodge", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/ad3b159657d4bcb373623fdc61acc3bf.asciidoc b/docs/doc_examples/ad3b159657d4bcb373623fdc61acc3bf.asciidoc new file mode 100644 index 000000000..d855f164e --- /dev/null +++ b/docs/doc_examples/ad3b159657d4bcb373623fdc61acc3bf.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.count({ + index: "my-index-000001", + q: "user:kimchy", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ad57ccba0a060da4f5313692fa26a235.asciidoc b/docs/doc_examples/ad57ccba0a060da4f5313692fa26a235.asciidoc new file mode 100644 index 000000000..9a83c4ccd --- /dev/null +++ b/docs/doc_examples/ad57ccba0a060da4f5313692fa26a235.asciidoc @@ -0,0 +1,72 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + date: { + type: "date_nanos", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "my-index-000001", + refresh: "true", + operations: [ + { + index: { + _id: "1", + }, + }, + { + date: "2015-01-01", + }, + { + index: { + _id: "2", + }, + }, + { + date: "2015-01-01T12:10:30.123456789Z", + }, + { + index: { + _id: "3", + }, + }, + { + date: 1420070400000, + }, + ], +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + sort: { + date: "asc", + }, + runtime_mappings: { + date_has_nanos: { + type: "boolean", + script: "emit(doc['date'].value.nano != 0)", + }, + }, + fields: [ + { + field: "date", + format: "strict_date_optional_time_nanos", + }, + { + field: "date_has_nanos", + }, + ], +}); +console.log(response2); +---- diff --git a/docs/doc_examples/ad63eca6829a25293c9be589c1870547.asciidoc b/docs/doc_examples/ad63eca6829a25293c9be589c1870547.asciidoc new file mode 100644 index 000000000..c3d0e495e --- /dev/null +++ b/docs/doc_examples/ad63eca6829a25293c9be589c1870547.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_date_histo: { + date_histogram: { + field: "date", + calendar_interval: "1M", + }, + aggs: { + the_sum: { + sum: { + field: "price", + }, + }, + the_moving_sum: { + moving_fn: { + buckets_path: "the_sum", + window: 10, + script: "MovingFunctions.sum(values)", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ad6d81be5fad4bad87486b699454dce5.asciidoc b/docs/doc_examples/ad6d81be5fad4bad87486b699454dce5.asciidoc new file mode 100644 index 000000000..6abd80ac6 --- /dev/null +++ b/docs/doc_examples/ad6d81be5fad4bad87486b699454dce5.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "node_upgrade", + size: 0, + aggs: { + startup_time_ttest: { + t_test: { + a: { + field: "startup_time_before", + }, + b: { + field: "startup_time_after", + }, + type: "paired", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ad6ea0c1e46712aa1fd6d3bfa0ec979e.asciidoc b/docs/doc_examples/ad6ea0c1e46712aa1fd6d3bfa0ec979e.asciidoc deleted file mode 100644 index 79e77abd8..000000000 --- a/docs/doc_examples/ad6ea0c1e46712aa1fd6d3bfa0ec979e.asciidoc +++ /dev/null @@ -1,18 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - query_string: { - query: '(new york city) OR (big apple)', - default_field: 'content' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/ad79228630684d950fe9792a768d24c5.asciidoc b/docs/doc_examples/ad79228630684d950fe9792a768d24c5.asciidoc deleted file mode 100644 index 7911e5d56..000000000 --- a/docs/doc_examples/ad79228630684d950fe9792a768d24c5.asciidoc +++ /dev/null @@ -1,28 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.updateAliases({ - body: { - actions: [ - { - add: { - index: 'test', - alias: 'alias1', - is_write_index: false - } - }, - { - add: { - index: 'test2', - alias: 'alias1', - is_write_index: true - } - } - ] - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/ad88e46bb06739991498dee248850223.asciidoc b/docs/doc_examples/ad88e46bb06739991498dee248850223.asciidoc new file mode 100644 index 000000000..e9db96801 --- /dev/null +++ b/docs/doc_examples/ad88e46bb06739991498dee248850223.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.threadPool(); +console.log(response); +---- diff --git a/docs/doc_examples/ad92a1a8bb1b0f26d1536fe8ba4ffd17.asciidoc b/docs/doc_examples/ad92a1a8bb1b0f26d1536fe8ba4ffd17.asciidoc new file mode 100644 index 000000000..03f452f53 --- /dev/null +++ b/docs/doc_examples/ad92a1a8bb1b0f26d1536fe8ba4ffd17.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.renderSearchTemplate({ + id: "my-search-template", + params: { + query_string: "hello world", + from: 20, + size: 10, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ad9889fd8a4b5930e312a51f3bc996dc.asciidoc b/docs/doc_examples/ad9889fd8a4b5930e312a51f3bc996dc.asciidoc new file mode 100644 index 000000000..ca15245b8 --- /dev/null +++ b/docs/doc_examples/ad9889fd8a4b5930e312a51f3bc996dc.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "sparse_embedding", + inference_id: "my-elser-model", + inference_config: { + service: "elasticsearch", + service_settings: { + adaptive_allocations: { + enabled: true, + min_number_of_allocations: 1, + max_number_of_allocations: 4, + }, + num_threads: 1, + model_id: ".elser_model_2", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ada2675a9c631da2bfe627fc2618f5ed.asciidoc b/docs/doc_examples/ada2675a9c631da2bfe627fc2618f5ed.asciidoc new file mode 100644 index 000000000..a7f2f8f65 --- /dev/null +++ b/docs/doc_examples/ada2675a9c631da2bfe627fc2618f5ed.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + script_score: { + query: { + match: { + message: "elasticsearch", + }, + }, + script: { + source: "doc['my-int'].value / 10 ", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/adc18ca0c344d81d68ec3b9422b54ff5.asciidoc b/docs/doc_examples/adc18ca0c344d81d68ec3b9422b54ff5.asciidoc new file mode 100644 index 000000000..27998d47c --- /dev/null +++ b/docs/doc_examples/adc18ca0c344d81d68ec3b9422b54ff5.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.msearch({ + index: "my-index-000001", + searches: [ + {}, + { + query: { + match_all: {}, + }, + from: 0, + size: 10, + }, + {}, + { + query: { + match_all: {}, + }, + }, + { + index: "my-index-000002", + }, + { + query: { + match_all: {}, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/adced6e22ef03c2ae3b14aa5bdd24fd9.asciidoc b/docs/doc_examples/adced6e22ef03c2ae3b14aa5bdd24fd9.asciidoc new file mode 100644 index 000000000..b7d64b4c8 --- /dev/null +++ b/docs/doc_examples/adced6e22ef03c2ae3b14aa5bdd24fd9.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getMigrateReindexStatus({ + index: "my-data-stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/add240aa149d8b11139947502b279ee0.asciidoc b/docs/doc_examples/add240aa149d8b11139947502b279ee0.asciidoc new file mode 100644 index 000000000..c77295769 --- /dev/null +++ b/docs/doc_examples/add240aa149d8b11139947502b279ee0.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.scroll({ + scroll: "1m", + scroll_id: "DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ==", +}); +console.log(response); +---- diff --git a/docs/doc_examples/add82cbe7cd95c4be5ce1c9958f2f208.asciidoc b/docs/doc_examples/add82cbe7cd95c4be5ce1c9958f2f208.asciidoc new file mode 100644 index 000000000..f924148c0 --- /dev/null +++ b/docs/doc_examples/add82cbe7cd95c4be5ce1c9958f2f208.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "cooking_blog", + query: { + multi_match: { + query: "vegetarian curry", + fields: ["title^3", "description^2", "tags"], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/adf36e2d8fc05c3719c91912481c4e19.asciidoc b/docs/doc_examples/adf36e2d8fc05c3719c91912481c4e19.asciidoc new file mode 100644 index 000000000..a5724dbaa --- /dev/null +++ b/docs/doc_examples/adf36e2d8fc05c3719c91912481c4e19.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.enableUser({ + username: "jacknich", +}); +console.log(response); +---- diff --git a/docs/doc_examples/adf728b0c11c5c309c730205609a379d.asciidoc b/docs/doc_examples/adf728b0c11c5c309c730205609a379d.asciidoc new file mode 100644 index 000000000..dcdc44c12 --- /dev/null +++ b/docs/doc_examples/adf728b0c11c5c309c730205609a379d.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "my-pipeline", + processors: [ + { + set: { + description: "Set dynamic '' field to 'code' value", + field: "{{{service}}}", + value: "{{{code}}}", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/ae0d20c2ebb59278e08a26c9634d90c9.asciidoc b/docs/doc_examples/ae0d20c2ebb59278e08a26c9634d90c9.asciidoc new file mode 100644 index 000000000..da6f2a257 --- /dev/null +++ b/docs/doc_examples/ae0d20c2ebb59278e08a26c9634d90c9.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.create({ + repository: "my_repository", + snapshot: "", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ae3473adaf1515afcf7773f26c018e5c.asciidoc b/docs/doc_examples/ae3473adaf1515afcf7773f26c018e5c.asciidoc new file mode 100644 index 000000000..67a3d5d75 --- /dev/null +++ b/docs/doc_examples/ae3473adaf1515afcf7773f26c018e5c.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.connector.put({ + connector_id: "my-{service-name-stub}-connector", + index_name: "my-elasticsearch-index", + name: "Content synced from {service-name}", + service_type: "{service-name-stub}", + is_native: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ae398a6b6494e7982ef2549fc2cd2d8e.asciidoc b/docs/doc_examples/ae398a6b6494e7982ef2549fc2cd2d8e.asciidoc new file mode 100644 index 000000000..b3ba17b23 --- /dev/null +++ b/docs/doc_examples/ae398a6b6494e7982ef2549fc2cd2d8e.asciidoc @@ -0,0 +1,52 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + dynamic_templates: [ + { + full_name: { + path_match: ["name.*", "user.name.*"], + path_unmatch: ["*.middle", "*.midinitial"], + mapping: { + type: "text", + copy_to: "full_name", + }, + }, + }, + ], + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + name: { + first: "John", + middle: "Winston", + last: "Lennon", + }, + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "my-index-000001", + id: 2, + document: { + user: { + name: { + first: "Jane", + midinitial: "M", + last: "Salazar", + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/ae4aa368617637a390074535df86e64b.asciidoc b/docs/doc_examples/ae4aa368617637a390074535df86e64b.asciidoc new file mode 100644 index 000000000..4c1c1e246 --- /dev/null +++ b/docs/doc_examples/ae4aa368617637a390074535df86e64b.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.setUpgradeMode({ + enabled: "true", + timeout: "10m", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ae591d49e54b838c15cdcf64a8dee9c2.asciidoc b/docs/doc_examples/ae591d49e54b838c15cdcf64a8dee9c2.asciidoc new file mode 100644 index 000000000..0f73de942 --- /dev/null +++ b/docs/doc_examples/ae591d49e54b838c15cdcf64a8dee9c2.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + hot: { + actions: { + rollover: { + max_primary_shard_docs: 10000000, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ae82eb17c23cb8e5761cb6240a5ed0a6.asciidoc b/docs/doc_examples/ae82eb17c23cb8e5761cb6240a5ed0a6.asciidoc new file mode 100644 index 000000000..4404c4b26 --- /dev/null +++ b/docs/doc_examples/ae82eb17c23cb8e5761cb6240a5ed0a6.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.putDataFrameAnalytics({ + id: "student_performance_mathematics_0.3", + source: { + index: "student_performance_mathematics", + }, + dest: { + index: "student_performance_mathematics_reg", + }, + analysis: { + regression: { + dependent_variable: "G3", + training_percent: 70, + randomize_seed: 19673948271, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ae9b5fbd42af2386ffbf56ad4a697e51.asciidoc b/docs/doc_examples/ae9b5fbd42af2386ffbf56ad4a697e51.asciidoc deleted file mode 100644 index 44685f225..000000000 --- a/docs/doc_examples/ae9b5fbd42af2386ffbf56ad4a697e51.asciidoc +++ /dev/null @@ -1,33 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'my_index', - body: { - sort: [ - { - post_date: { - order: 'asc' - } - }, - 'user', - { - name: 'desc' - }, - { - age: 'desc' - }, - '_score' - ], - query: { - term: { - user: 'kimchy' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/ae9ccfaa146731ab9176df90670db1c2.asciidoc b/docs/doc_examples/ae9ccfaa146731ab9176df90670db1c2.asciidoc index fd3601872..5b92e389a 100644 --- a/docs/doc_examples/ae9ccfaa146731ab9176df90670db1c2.asciidoc +++ b/docs/doc_examples/ae9ccfaa146731ab9176df90670db1c2.asciidoc @@ -4,44 +4,43 @@ [source, js] ---- const response = await client.bulk({ - body: [ + operations: [ { index: { - _index: 'test', - _id: '1' - } + _index: "test", + _id: "1", + }, }, { - field1: 'value1' + field1: "value1", }, { delete: { - _index: 'test', - _id: '2' - } + _index: "test", + _id: "2", + }, }, { create: { - _index: 'test', - _id: '3' - } + _index: "test", + _id: "3", + }, }, { - field1: 'value3' + field1: "value3", }, { update: { - _id: '1', - _index: 'test' - } + _id: "1", + _index: "test", + }, }, { doc: { - field2: 'value2' - } - } - ] -}) -console.log(response) + field2: "value2", + }, + }, + ], +}); +console.log(response); ---- - diff --git a/docs/doc_examples/aeaa97939a05f5b2f3f2c43b771f35e3.asciidoc b/docs/doc_examples/aeaa97939a05f5b2f3f2c43b771f35e3.asciidoc new file mode 100644 index 000000000..0c43a303d --- /dev/null +++ b/docs/doc_examples/aeaa97939a05f5b2f3f2c43b771f35e3.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.termvectors({ + index: "my-index-000001", + id: 1, + fields: ["text", "some_field_without_term_vectors"], + offsets: true, + positions: true, + term_statistics: true, + field_statistics: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/aebf9cc593fcf0d4ca08f8b61b67bf17.asciidoc b/docs/doc_examples/aebf9cc593fcf0d4ca08f8b61b67bf17.asciidoc new file mode 100644 index 000000000..af5ed0954 --- /dev/null +++ b/docs/doc_examples/aebf9cc593fcf0d4ca08f8b61b67bf17.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.createRepository({ + name: "my_backup", + repository: { + type: "azure", + settings: { + client: "secondary", + container: "my_container", + base_path: "snapshots_prefix", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/aee26dd62fbb6d614a0798f3344c0598.asciidoc b/docs/doc_examples/aee26dd62fbb6d614a0798f3344c0598.asciidoc new file mode 100644 index 000000000..8b4ea58cc --- /dev/null +++ b/docs/doc_examples/aee26dd62fbb6d614a0798f3344c0598.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "issues", + query: { + match_all: {}, + }, + aggs: { + comments: { + nested: { + path: "comments", + }, + aggs: { + top_usernames: { + terms: { + field: "comments.username", + }, + aggs: { + comment_to_issue: { + reverse_nested: {}, + aggs: { + top_tags_per_comment: { + terms: { + field: "tags", + }, + }, + }, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/aee4734ee63dbbbd12a21ee886f7a829.asciidoc b/docs/doc_examples/aee4734ee63dbbbd12a21ee886f7a829.asciidoc new file mode 100644 index 000000000..36cbdb71c --- /dev/null +++ b/docs/doc_examples/aee4734ee63dbbbd12a21ee886f7a829.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + sort: [ + { + _geo_distance: { + "pin.location": [-70, 40], + order: "asc", + unit: "km", + }, + }, + ], + query: { + term: { + user: "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/af00a58d9171d32f6efe52d94e51e526.asciidoc b/docs/doc_examples/af00a58d9171d32f6efe52d94e51e526.asciidoc new file mode 100644 index 000000000..bc40fbb31 --- /dev/null +++ b/docs/doc_examples/af00a58d9171d32f6efe52d94e51e526.asciidoc @@ -0,0 +1,42 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "hindi_example", + settings: { + analysis: { + filter: { + hindi_stop: { + type: "stop", + stopwords: "_hindi_", + }, + hindi_keywords: { + type: "keyword_marker", + keywords: ["उदाहरण"], + }, + hindi_stemmer: { + type: "stemmer", + language: "hindi", + }, + }, + analyzer: { + rebuilt_hindi: { + tokenizer: "standard", + filter: [ + "lowercase", + "decimal_digit", + "hindi_keywords", + "indic_normalization", + "hindi_normalization", + "hindi_stop", + "hindi_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/af18f5c5fb2364ae23c6a14431820aba.asciidoc b/docs/doc_examples/af18f5c5fb2364ae23c6a14431820aba.asciidoc new file mode 100644 index 000000000..73aaca9ec --- /dev/null +++ b/docs/doc_examples/af18f5c5fb2364ae23c6a14431820aba.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.enrich.getPolicy({ + name: "my-policy", +}); +console.log(response); +---- diff --git a/docs/doc_examples/af3fb9fa5691a7b37a6dc2a69ff66e64.asciidoc b/docs/doc_examples/af3fb9fa5691a7b37a6dc2a69ff66e64.asciidoc deleted file mode 100644 index e35a78b95..000000000 --- a/docs/doc_examples/af3fb9fa5691a7b37a6dc2a69ff66e64.asciidoc +++ /dev/null @@ -1,26 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.updateAliases({ - body: { - actions: [ - { - remove: { - index: 'test1', - alias: 'alias1' - } - }, - { - add: { - index: 'test1', - alias: 'alias2' - } - } - ] - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/af44cc7fb0c435d4497c77baf904bf5e.asciidoc b/docs/doc_examples/af44cc7fb0c435d4497c77baf904bf5e.asciidoc new file mode 100644 index 000000000..b56c54770 --- /dev/null +++ b/docs/doc_examples/af44cc7fb0c435d4497c77baf904bf5e.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sales_over_time: { + date_histogram: { + field: "date", + calendar_interval: "month", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/af517b6936fa41d124d68b107b2efdc3.asciidoc b/docs/doc_examples/af517b6936fa41d124d68b107b2efdc3.asciidoc new file mode 100644 index 000000000..e43a41a04 --- /dev/null +++ b/docs/doc_examples/af517b6936fa41d124d68b107b2efdc3.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.deleteLifecycle({ + name: "my_policy", +}); +console.log(response); +---- diff --git a/docs/doc_examples/af607715d0693587dd12962266359a96.asciidoc b/docs/doc_examples/af607715d0693587dd12962266359a96.asciidoc new file mode 100644 index 000000000..cb25ed36d --- /dev/null +++ b/docs/doc_examples/af607715d0693587dd12962266359a96.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.createRepository({ + name: "my_s3_repository", + repository: { + type: "s3", + settings: { + bucket: "my-bucket", + another_setting: "setting-value", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/af746266a49a693ff6170c88da8a8c04.asciidoc b/docs/doc_examples/af746266a49a693ff6170c88da8a8c04.asciidoc new file mode 100644 index 000000000..0cb4cb18f --- /dev/null +++ b/docs/doc_examples/af746266a49a693ff6170c88da8a8c04.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + default: { + tokenizer: "whitespace", + filter: ["my_custom_stop_words_filter"], + }, + }, + filter: { + my_custom_stop_words_filter: { + type: "stop", + ignore_case: true, + stopwords: ["and", "is", "the"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/af7c5add165b005aefb552d79130fed6.asciidoc b/docs/doc_examples/af7c5add165b005aefb552d79130fed6.asciidoc new file mode 100644 index 000000000..b1d4507ce --- /dev/null +++ b/docs/doc_examples/af7c5add165b005aefb552d79130fed6.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_locations", + query: { + geo_grid: { + location: { + geotile: "6/32/22", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/af84b3995564a7ca84360a526a4ac896.asciidoc b/docs/doc_examples/af84b3995564a7ca84360a526a4ac896.asciidoc new file mode 100644 index 000000000..ff6a42618 --- /dev/null +++ b/docs/doc_examples/af84b3995564a7ca84360a526a4ac896.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "5_char_words_example", + settings: { + analysis: { + analyzer: { + lowercase_5_char: { + tokenizer: "lowercase", + filter: ["5_char_trunc"], + }, + }, + filter: { + "5_char_trunc": { + type: "truncate", + length: 5, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/af85ad2551d1cc6742c6521d71c889cc.asciidoc b/docs/doc_examples/af85ad2551d1cc6742c6521d71c889cc.asciidoc new file mode 100644 index 000000000..ef4dfe564 --- /dev/null +++ b/docs/doc_examples/af85ad2551d1cc6742c6521d71c889cc.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + title: { + type: "text", + analyzer: "whitespace", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/af91019991bee136df5460e2fd4ac72a.asciidoc b/docs/doc_examples/af91019991bee136df5460e2fd4ac72a.asciidoc new file mode 100644 index 000000000..c45e9cde5 --- /dev/null +++ b/docs/doc_examples/af91019991bee136df5460e2fd4ac72a.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.rollover({ + alias: "my-data-stream", + lazy: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/af970eb8b93cdea52209e1256eba9d8c.asciidoc b/docs/doc_examples/af970eb8b93cdea52209e1256eba9d8c.asciidoc new file mode 100644 index 000000000..a107b8955 --- /dev/null +++ b/docs/doc_examples/af970eb8b93cdea52209e1256eba9d8c.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.shardStores({ + index: "test1,test2", +}); +console.log(response); +---- diff --git a/docs/doc_examples/afa11ebb493ebbfd77acbbe50d2ce6db.asciidoc b/docs/doc_examples/afa11ebb493ebbfd77acbbe50d2ce6db.asciidoc new file mode 100644 index 000000000..663d97eb6 --- /dev/null +++ b/docs/doc_examples/afa11ebb493ebbfd77acbbe50d2ce6db.asciidoc @@ -0,0 +1,43 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-data-stream", + size: 0, + aggs: { + tsid: { + terms: { + field: "_tsid", + }, + aggs: { + over_time: { + date_histogram: { + field: "@timestamp", + fixed_interval: "1d", + }, + aggs: { + min: { + min: { + field: "kubernetes.container.memory.usage.bytes", + }, + }, + max: { + max: { + field: "kubernetes.container.memory.usage.bytes", + }, + }, + avg: { + avg: { + field: "kubernetes.container.memory.usage.bytes", + }, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/afa24b7d72c2d9f586023a49bd655ec7.asciidoc b/docs/doc_examples/afa24b7d72c2d9f586023a49bd655ec7.asciidoc new file mode 100644 index 000000000..0ed57946a --- /dev/null +++ b/docs/doc_examples/afa24b7d72c2d9f586023a49bd655ec7.asciidoc @@ -0,0 +1,49 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.asyncSearch.submit({ + index: "my-data-stream", + runtime_mappings: { + "source.ip": { + type: "ip", + script: + "\n String sourceip=grok('%{IPORHOST:sourceip} .*').extract(doc[ \"message\" ].value)?.sourceip;\n if (sourceip != null) emit(sourceip);\n ", + }, + }, + query: { + bool: { + filter: [ + { + range: { + "@timestamp": { + gte: "now-2y/d", + lt: "now/d", + }, + }, + }, + { + range: { + "source.ip": { + gte: "192.0.2.0", + lte: "192.0.2.255", + }, + }, + }, + ], + }, + }, + fields: ["*"], + _source: false, + sort: [ + { + "@timestamp": "desc", + }, + { + "source.ip": "desc", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/afadb6bb7d0fa5a4531708af1ea8f9f8.asciidoc b/docs/doc_examples/afadb6bb7d0fa5a4531708af1ea8f9f8.asciidoc new file mode 100644 index 000000000..3ee21ca53 --- /dev/null +++ b/docs/doc_examples/afadb6bb7d0fa5a4531708af1ea8f9f8.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + source: { + index: "mylogs-*", + }, + dest: { + index: "mylogs", + op_type: "create", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/afbea723c4ba0d50c67d04ebb73a4101.asciidoc b/docs/doc_examples/afbea723c4ba0d50c67d04ebb73a4101.asciidoc new file mode 100644 index 000000000..6e0d97dd7 --- /dev/null +++ b/docs/doc_examples/afbea723c4ba0d50c67d04ebb73a4101.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.delete({ + name: "my-app", +}); +console.log(response); +---- diff --git a/docs/doc_examples/afc0a9cffc0100797a3f093094394763.asciidoc b/docs/doc_examples/afc0a9cffc0100797a3f093094394763.asciidoc new file mode 100644 index 000000000..9129b70e3 --- /dev/null +++ b/docs/doc_examples/afc0a9cffc0100797a3f093094394763.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.samlInvalidate({ + query_string: + "SAMLRequest=nZFda4MwFIb%2FiuS%2BmviRpqFaClKQdbvo2g12M2KMraCJ9cRR9utnW4Wyi13sMie873MeznJ1aWrnS3VQGR0j4mLkKC1NUeljjA77zYyhVbIE0dR%2By7fmaHq7U%2BdegXWGpAZ%2B%2F4pR32luBFTAtWgUcCv56%2Fp5y30X87Yz1khTIycdgpUW9kY7WdsC9zxoXTvMvWuVV98YyMnSGH2SYE5pwALBIr9QKiwDGpW0oGVUznGeMyJZKFkQ4jBf5HnhUymjIhzCAL3KNFihbYx8TBYzzGaY7EnIyZwHzCWMfiDnbRIftkSjJr%2BFu0e9v%2B0EgOquRiiZjKpiVFp6j50T4WXoyNJ%2FEWC9fdqc1t%2F1%2B2F3aUpjzhPiXpqMz1%2FHSn4A&SigAlg=http%3A%2F%2Fwww.w3.org%2F2001%2F04%2Fxmldsig-more%23rsa-sha256&Signature=MsAYz2NFdovMG2mXf6TSpu5vlQQyEJAg%2B4KCwBqJTmrb3yGXKUtIgvjqf88eCAK32v3eN8vupjPC8LglYmke1ZnjK0%2FKxzkvSjTVA7mMQe2AQdKbkyC038zzRq%2FYHcjFDE%2Bz0qISwSHZY2NyLePmwU7SexEXnIz37jKC6NMEhus%3D", + realm: "saml1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/afc29b61c532cf683f749baf013e7bfe.asciidoc b/docs/doc_examples/afc29b61c532cf683f749baf013e7bfe.asciidoc deleted file mode 100644 index 44d30f594..000000000 --- a/docs/doc_examples/afc29b61c532cf683f749baf013e7bfe.asciidoc +++ /dev/null @@ -1,19 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.putMapping({ - index: 'my_index', - body: { - properties: { - user_id: { - type: 'alias', - path: 'user_identifier' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/afcacd742d18bf220e02f0bc6891526d.asciidoc b/docs/doc_examples/afcacd742d18bf220e02f0bc6891526d.asciidoc new file mode 100644 index 000000000..2fb8b47c3 --- /dev/null +++ b/docs/doc_examples/afcacd742d18bf220e02f0bc6891526d.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sale_date: { + auto_date_histogram: { + field: "date", + buckets: 10, + minimum_interval: "minute", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/afd90d268187f995dc002abc189f818d.asciidoc b/docs/doc_examples/afd90d268187f995dc002abc189f818d.asciidoc new file mode 100644 index 000000000..ce066008c --- /dev/null +++ b/docs/doc_examples/afd90d268187f995dc002abc189f818d.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_buckets: { + composite: { + sources: [ + { + date: { + date_histogram: { + field: "timestamp", + calendar_interval: "1d", + format: "yyyy-MM-dd", + }, + }, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/afdb19ad1ebb4f64e235528b640817b6.asciidoc b/docs/doc_examples/afdb19ad1ebb4f64e235528b640817b6.asciidoc new file mode 100644 index 000000000..8ed6c634a --- /dev/null +++ b/docs/doc_examples/afdb19ad1ebb4f64e235528b640817b6.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "my-pipeline", + processors: [ + { + drop: { + description: "Drop documents with 'network.name' of 'Guest'", + if: "ctx?.network?.name == 'Guest'", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/afe30f159937b38d74c869570cfcd369.asciidoc b/docs/doc_examples/afe30f159937b38d74c869570cfcd369.asciidoc new file mode 100644 index 000000000..699718a56 --- /dev/null +++ b/docs/doc_examples/afe30f159937b38d74c869570cfcd369.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.close({ + index: "index_1", +}); +console.log(response); + +const response1 = await client.snapshot.restore({ + repository: "my_repository", + snapshot: "snapshot_2", + wait_for_completion: "true", + indices: "index_1", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/afe5aeb9317f0ae470b28e85a8d98274.asciidoc b/docs/doc_examples/afe5aeb9317f0ae470b28e85a8d98274.asciidoc new file mode 100644 index 000000000..e5cb83536 --- /dev/null +++ b/docs/doc_examples/afe5aeb9317f0ae470b28e85a8d98274.asciidoc @@ -0,0 +1,46 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + status_code: { + type: "keyword", + null_value: "NULL", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + status_code: null, + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "my-index-000001", + id: 2, + document: { + status_code: [], + }, +}); +console.log(response2); + +const response3 = await client.search({ + index: "my-index-000001", + query: { + term: { + status_code: "NULL", + }, + }, +}); +console.log(response3); +---- diff --git a/docs/doc_examples/afe87a2850326e0328fbebbefec2e839.asciidoc b/docs/doc_examples/afe87a2850326e0328fbebbefec2e839.asciidoc new file mode 100644 index 000000000..4cdf9bf3e --- /dev/null +++ b/docs/doc_examples/afe87a2850326e0328fbebbefec2e839.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchShards({ + index: "my-index-000001", + routing: "foo,bar", +}); +console.log(response); +---- diff --git a/docs/doc_examples/afef5cac988592b97ae289ab39c2f437.asciidoc b/docs/doc_examples/afef5cac988592b97ae289ab39c2f437.asciidoc new file mode 100644 index 000000000..95eab5366 --- /dev/null +++ b/docs/doc_examples/afef5cac988592b97ae289ab39c2f437.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + my_field: { + type: "text", + fields: { + keyword: { + type: "keyword", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/affc7ff234dc3acccb2bf7dc51f54813.asciidoc b/docs/doc_examples/affc7ff234dc3acccb2bf7dc51f54813.asciidoc new file mode 100644 index 000000000..e7f29a690 --- /dev/null +++ b/docs/doc_examples/affc7ff234dc3acccb2bf7dc51f54813.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "keyword", + char_filter: ["html_strip"], + text: "I'm so happy!

", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b00ac39faf96785e89be8d4205fb984d.asciidoc b/docs/doc_examples/b00ac39faf96785e89be8d4205fb984d.asciidoc new file mode 100644 index 000000000..3aa692211 --- /dev/null +++ b/docs/doc_examples/b00ac39faf96785e89be8d4205fb984d.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.search({ + name: "my_search_application", + params: { + text: true, + size: 5, + query_string: "mountain climbing", + text_fields: [ + { + name: "title", + boost: 10, + }, + { + name: "description", + boost: 5, + }, + { + name: "state", + boost: 1, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b00d74eed431a272c829c0f798e3a539.asciidoc b/docs/doc_examples/b00d74eed431a272c829c0f798e3a539.asciidoc new file mode 100644 index 000000000..01b12cb2d --- /dev/null +++ b/docs/doc_examples/b00d74eed431a272c829c0f798e3a539.asciidoc @@ -0,0 +1,84 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "test", + mappings: { + properties: { + d: { + type: "date", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "test", + refresh: "true", + operations: [ + { + index: {}, + }, + { + s: 1, + m: 3.1415, + i: 1, + d: "2020-01-01T00:12:12Z", + t: "cat", + }, + { + index: {}, + }, + { + s: 2, + m: 1, + i: 6, + d: "2020-01-02T00:12:12Z", + t: "dog", + }, + { + index: {}, + }, + { + s: 3, + m: 2.71828, + i: -12, + d: "2019-12-31T00:12:12Z", + t: "chicken", + }, + ], +}); +console.log(response1); + +const response2 = await client.search({ + index: "test", + filter_path: "aggregations", + aggs: { + tm: { + top_metrics: { + metrics: [ + { + field: "m", + }, + { + field: "i", + }, + { + field: "d", + }, + { + field: "t.keyword", + }, + ], + sort: { + s: "desc", + }, + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/b00f3bc0e47905aaa2124d6a025c75d4.asciidoc b/docs/doc_examples/b00f3bc0e47905aaa2124d6a025c75d4.asciidoc new file mode 100644 index 000000000..7e6d9a983 --- /dev/null +++ b/docs/doc_examples/b00f3bc0e47905aaa2124d6a025c75d4.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.query({ + format: "txt", + query: "SELECT * FROM library ORDER BY page_count DESC LIMIT 5", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b02e4907c9936c1adc16ccce9d49900d.asciidoc b/docs/doc_examples/b02e4907c9936c1adc16ccce9d49900d.asciidoc index f3eb9c277..f3456d0d4 100644 --- a/docs/doc_examples/b02e4907c9936c1adc16ccce9d49900d.asciidoc +++ b/docs/doc_examples/b02e4907c9936c1adc16ccce9d49900d.asciidoc @@ -3,7 +3,6 @@ [source, js] ---- -const response = await client.cluster.health() -console.log(response) +const response = await client.cluster.health(); +console.log(response); ---- - diff --git a/docs/doc_examples/b09f155602f9b2a6c40fe7c4a5436b7a.asciidoc b/docs/doc_examples/b09f155602f9b2a6c40fe7c4a5436b7a.asciidoc new file mode 100644 index 000000000..ef0a0529b --- /dev/null +++ b/docs/doc_examples/b09f155602f9b2a6c40fe7c4a5436b7a.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + runtime_mappings: { + day_of_week: { + type: "keyword", + script: + "\n emit(doc['timestamp'].value.dayOfWeekEnum\n .getDisplayName(TextStyle.FULL, Locale.ENGLISH))\n ", + }, + }, + size: 0, + aggs: { + my_buckets: { + composite: { + sources: [ + { + dow: { + terms: { + field: "day_of_week", + }, + }, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b0b1ae9582599f501f3b3ed8a42ea2af.asciidoc b/docs/doc_examples/b0b1ae9582599f501f3b3ed8a42ea2af.asciidoc new file mode 100644 index 000000000..98f315c75 --- /dev/null +++ b/docs/doc_examples/b0b1ae9582599f501f3b3ed8a42ea2af.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "circles", + id: 1, + pipeline: "polygonize_circles", + document: { + circle: "CIRCLE (30 10 40)", + }, +}); +console.log(response); + +const response1 = await client.get({ + index: "circles", + id: 1, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/b0bddf2ffaa83049b195829c06b875cd.asciidoc b/docs/doc_examples/b0bddf2ffaa83049b195829c06b875cd.asciidoc new file mode 100644 index 000000000..82a81bced --- /dev/null +++ b/docs/doc_examples/b0bddf2ffaa83049b195829c06b875cd.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.renderQuery({ + name: "my_search_application", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b0ce54ff4fec0b0c712506eb81e633f4.asciidoc b/docs/doc_examples/b0ce54ff4fec0b0c712506eb81e633f4.asciidoc new file mode 100644 index 000000000..ec64702c5 --- /dev/null +++ b/docs/doc_examples/b0ce54ff4fec0b0c712506eb81e633f4.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.simulate({ + pipeline: { + description: "monthly date-time index naming", + processors: [ + { + date_index_name: { + field: "date1", + index_name_prefix: "my-index-", + date_rounding: "M", + }, + }, + ], + }, + docs: [ + { + _source: { + date1: "2016-04-25T12:02:01.789Z", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/b0d3f839237fabf8cdc2221734c668ad.asciidoc b/docs/doc_examples/b0d3f839237fabf8cdc2221734c668ad.asciidoc new file mode 100644 index 000000000..58041fb5e --- /dev/null +++ b/docs/doc_examples/b0d3f839237fabf8cdc2221734c668ad.asciidoc @@ -0,0 +1,41 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "items", + id: 1, + refresh: "true", + document: { + name: "chocolate", + production_date: "2018-02-01", + location: [-71.34, 41.12], + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "items", + id: 2, + refresh: "true", + document: { + name: "chocolate", + production_date: "2018-01-01", + location: [-71.3, 41.15], + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "items", + id: 3, + refresh: "true", + document: { + name: "chocolate", + production_date: "2017-12-01", + location: [-71.3, 41.12], + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/b0eaf67e5cce24ef8889bf20951ccec1.asciidoc b/docs/doc_examples/b0eaf67e5cce24ef8889bf20951ccec1.asciidoc index 02f53cb9a..a3b9fd534 100644 --- a/docs/doc_examples/b0eaf67e5cce24ef8889bf20951ccec1.asciidoc +++ b/docs/doc_examples/b0eaf67e5cce24ef8889bf20951ccec1.asciidoc @@ -4,26 +4,23 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - dis_max: { - queries: [ - { - match: { - subject: 'brown fox' - } + query: { + dis_max: { + queries: [ + { + match: { + subject: "brown fox", }, - { - match: { - message: 'brown fox' - } - } - ], - tie_breaker: 0.3 - } - } - } -}) -console.log(response) + }, + { + match: { + message: "brown fox", + }, + }, + ], + tie_breaker: 0.3, + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/b0ec418bf416c62bed602b0a32a6d5f5.asciidoc b/docs/doc_examples/b0ec418bf416c62bed602b0a32a6d5f5.asciidoc deleted file mode 100644 index 61ba1d017..000000000 --- a/docs/doc_examples/b0ec418bf416c62bed602b0a32a6d5f5.asciidoc +++ /dev/null @@ -1,15 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.index({ - index: 'alias1', - id: '1', - body: { - foo: 'bar' - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/b0ee6f19875fe5bad8aab02d60e3532c.asciidoc b/docs/doc_examples/b0ee6f19875fe5bad8aab02d60e3532c.asciidoc new file mode 100644 index 000000000..abe2a362d --- /dev/null +++ b/docs/doc_examples/b0ee6f19875fe5bad8aab02d60e3532c.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "geoip", + description: "Add ip geolocation info", + processors: [ + { + geoip: { + field: "ip", + }, + }, + ], +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: "my_id", + pipeline: "geoip", + document: { + ip: "89.160.20.128", + }, +}); +console.log(response1); + +const response2 = await client.get({ + index: "my-index-000001", + id: "my_id", +}); +console.log(response2); +---- diff --git a/docs/doc_examples/b0fa301cd3c6b9db128e34114f0c1e8f.asciidoc b/docs/doc_examples/b0fa301cd3c6b9db128e34114f0c1e8f.asciidoc new file mode 100644 index 000000000..8c4ef9868 --- /dev/null +++ b/docs/doc_examples/b0fa301cd3c6b9db128e34114f0c1e8f.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "test", + id: 1, + document: { + counter: 1, + tags: ["red"], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b0fe9a7c8e519995258786be4bef36c4.asciidoc b/docs/doc_examples/b0fe9a7c8e519995258786be4bef36c4.asciidoc new file mode 100644 index 000000000..4137f344f --- /dev/null +++ b/docs/doc_examples/b0fe9a7c8e519995258786be4bef36c4.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.tasks.cancel({ + task_id: "", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b109d0141ec8a0aed5d3805abc349a20.asciidoc b/docs/doc_examples/b109d0141ec8a0aed5d3805abc349a20.asciidoc new file mode 100644 index 000000000..5a3bb37ba --- /dev/null +++ b/docs/doc_examples/b109d0141ec8a0aed5d3805abc349a20.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_date_histo: { + date_histogram: { + field: "date", + calendar_interval: "1M", + }, + aggs: { + the_sum: { + sum: { + field: "price", + }, + }, + the_movavg: { + moving_fn: { + buckets_path: "the_sum", + window: 10, + script: "MovingFunctions.linearWeightedAvg(values)", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b11a0675e49df0709be693297ca73a2c.asciidoc b/docs/doc_examples/b11a0675e49df0709be693297ca73a2c.asciidoc new file mode 100644 index 000000000..85c89bc6f --- /dev/null +++ b/docs/doc_examples/b11a0675e49df0709be693297ca73a2c.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.xpack.info({ + categories: "build,features", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b14122481ae1f158f1a9a1bfbc4a41b1.asciidoc b/docs/doc_examples/b14122481ae1f158f1a9a1bfbc4a41b1.asciidoc new file mode 100644 index 000000000..09bd844ad --- /dev/null +++ b/docs/doc_examples/b14122481ae1f158f1a9a1bfbc4a41b1.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.reloadSecureSettings({ + secure_settings_password: "keystore-password", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b16700002af3aa70639f3e88c733bf35.asciidoc b/docs/doc_examples/b16700002af3aa70639f3e88c733bf35.asciidoc new file mode 100644 index 000000000..87a17c886 --- /dev/null +++ b/docs/doc_examples/b16700002af3aa70639f3e88c733bf35.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.openPointInTime({ + index: "my-index-000001", + keep_alive: "1m", + allow_partial_search_results: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b17143780e9904bfc1e1c53436497fa1.asciidoc b/docs/doc_examples/b17143780e9904bfc1e1c53436497fa1.asciidoc new file mode 100644 index 000000000..91bbd6972 --- /dev/null +++ b/docs/doc_examples/b17143780e9904bfc1e1c53436497fa1.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.query({ + format: "json", + wait_for_completion_timeout: "2s", + query: "SELECT * FROM library ORDER BY page_count DESC", + fetch_size: 5, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b176e0d428726705298184ef39ad5cb2.asciidoc b/docs/doc_examples/b176e0d428726705298184ef39ad5cb2.asciidoc new file mode 100644 index 000000000..06c244807 --- /dev/null +++ b/docs/doc_examples/b176e0d428726705298184ef39ad5cb2.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "mapping2", + roles: ["user", "admin"], + enabled: true, + rules: { + field: { + username: ["esadmin01", "esadmin02"], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b195068563b1dc0f721f5f8c8d172312.asciidoc b/docs/doc_examples/b195068563b1dc0f721f5f8c8d172312.asciidoc new file mode 100644 index 000000000..894dc8c79 --- /dev/null +++ b/docs/doc_examples/b195068563b1dc0f721f5f8c8d172312.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: "MULTIPOINT (1002.0 2000.0, 1003.0 2000.0)", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b1e81b70b874a1f0cf75a0ec6e430ddc.asciidoc b/docs/doc_examples/b1e81b70b874a1f0cf75a0ec6e430ddc.asciidoc new file mode 100644 index 000000000..9c3d8a489 --- /dev/null +++ b/docs/doc_examples/b1e81b70b874a1f0cf75a0ec6e430ddc.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.esql.asyncQueryStop({ + id: "FkpMRkJGS1gzVDRlM3g4ZzMyRGlLbkEaTXlJZHdNT09TU2VTZVBoNDM3cFZMUToxMDM=", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b1ee1b0b5f7af596e5f81743cfd3755f.asciidoc b/docs/doc_examples/b1ee1b0b5f7af596e5f81743cfd3755f.asciidoc new file mode 100644 index 000000000..58a34e6ca --- /dev/null +++ b/docs/doc_examples/b1ee1b0b5f7af596e5f81743cfd3755f.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: ",,", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b1efa1c51a34dd5ab5511b71a399f5b1.asciidoc b/docs/doc_examples/b1efa1c51a34dd5ab5511b71a399f5b1.asciidoc index 135a0872e..096b0c769 100644 --- a/docs/doc_examples/b1efa1c51a34dd5ab5511b71a399f5b1.asciidoc +++ b/docs/doc_examples/b1efa1c51a34dd5ab5511b71a399f5b1.asciidoc @@ -4,16 +4,13 @@ [source, js] ---- const response = await client.reindex({ - body: { - source: { - index: 'source' - }, - dest: { - index: 'dest', - pipeline: 'some_ingest_pipeline' - } - } -}) -console.log(response) + source: { + index: "source", + }, + dest: { + index: "dest", + pipeline: "some_ingest_pipeline", + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/b1f7cb4157b13368373383abd7d2b8cb.asciidoc b/docs/doc_examples/b1f7cb4157b13368373383abd7d2b8cb.asciidoc new file mode 100644 index 000000000..0243ebab2 --- /dev/null +++ b/docs/doc_examples/b1f7cb4157b13368373383abd7d2b8cb.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + cluster: { + remote: { + cluster_two: { + "transport.compress": false, + }, + cluster_three: { + "transport.compress": true, + "transport.ping_schedule": "60s", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b214942b938e47f2c486e523546cb574.asciidoc b/docs/doc_examples/b214942b938e47f2c486e523546cb574.asciidoc deleted file mode 100644 index 009fd0fcc..000000000 --- a/docs/doc_examples/b214942b938e47f2c486e523546cb574.asciidoc +++ /dev/null @@ -1,29 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'my_index', - body: { - query: { - bool: { - must: [ - { - match: { - 'user.first': 'Alice' - } - }, - { - match: { - 'user.last': 'Smith' - } - } - ] - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/b22559a7c319f90bc63a41cac1c39b4c.asciidoc b/docs/doc_examples/b22559a7c319f90bc63a41cac1c39b4c.asciidoc new file mode 100644 index 000000000..2ec43a9b9 --- /dev/null +++ b/docs/doc_examples/b22559a7c319f90bc63a41cac1c39b4c.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.invalidateApiKey({ + ids: ["VuaCfGcBCdbkQm-e5aOx"], + owner: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b23ed357dce8ec0014708b7b2850a8fb.asciidoc b/docs/doc_examples/b23ed357dce8ec0014708b7b2850a8fb.asciidoc new file mode 100644 index 000000000..cbb851c0f --- /dev/null +++ b/docs/doc_examples/b23ed357dce8ec0014708b7b2850a8fb.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.tasks({ + v: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b2440b492149b705ef107137fdccb0c2.asciidoc b/docs/doc_examples/b2440b492149b705ef107137fdccb0c2.asciidoc new file mode 100644 index 000000000..86f16baba --- /dev/null +++ b/docs/doc_examples/b2440b492149b705ef107137fdccb0c2.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.followInfo({ + index: "", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b24a374c0ad264abbcacb5686f5ed61c.asciidoc b/docs/doc_examples/b24a374c0ad264abbcacb5686f5ed61c.asciidoc new file mode 100644 index 000000000..4d4945469 --- /dev/null +++ b/docs/doc_examples/b24a374c0ad264abbcacb5686f5ed61c.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.termvectors({ + index: "text_payloads", + id: 1, + fields: ["text"], + payloads: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b25256ed615cd837461b0bfa590526b7.asciidoc b/docs/doc_examples/b25256ed615cd837461b0bfa590526b7.asciidoc new file mode 100644 index 000000000..9def5bea5 --- /dev/null +++ b/docs/doc_examples/b25256ed615cd837461b0bfa590526b7.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.pauseAutoFollowPattern({ + name: "my_auto_follow_pattern", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b2652b1763a5fd31e95c983869b433bd.asciidoc b/docs/doc_examples/b2652b1763a5fd31e95c983869b433bd.asciidoc new file mode 100644 index 000000000..5e97578d6 --- /dev/null +++ b/docs/doc_examples/b2652b1763a5fd31e95c983869b433bd.asciidoc @@ -0,0 +1,44 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "metrics_index", + id: 1, + document: { + "network.name": "net-1", + latency_histo: { + values: [0.1, 0.2, 0.3, 0.4, 0.5], + counts: [3, 7, 23, 12, 6], + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "metrics_index", + id: 2, + document: { + "network.name": "net-2", + latency_histo: { + values: [0.1, 0.2, 0.3, 0.4, 0.5], + counts: [8, 17, 8, 7, 6], + }, + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "metrics_index", + size: 0, + aggs: { + avg_latency: { + avg: { + field: "latency_histo", + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/b2b26f8568c5dba7649e79f09b859272.asciidoc b/docs/doc_examples/b2b26f8568c5dba7649e79f09b859272.asciidoc new file mode 100644 index 000000000..461b239ec --- /dev/null +++ b/docs/doc_examples/b2b26f8568c5dba7649e79f09b859272.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putUser({ + username: "saml-service-user", + password: "", + roles: ["saml-service-role"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/b2dec193082462c775169db438308bc3.asciidoc b/docs/doc_examples/b2dec193082462c775169db438308bc3.asciidoc new file mode 100644 index 000000000..99396dabc --- /dev/null +++ b/docs/doc_examples/b2dec193082462c775169db438308bc3.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "remote-replication", + cluster: ["read_ccr"], + indices: [ + { + names: ["leader-index-name"], + privileges: ["monitor", "read"], + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/b2e1e802fc3c5fbeb4190af7d598c23e.asciidoc b/docs/doc_examples/b2e1e802fc3c5fbeb4190af7d598c23e.asciidoc new file mode 100644 index 000000000..3b6862fdc --- /dev/null +++ b/docs/doc_examples/b2e1e802fc3c5fbeb4190af7d598c23e.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + document: { + "@timestamp": "2099-11-15T13:12:00", + message: "GET /search HTTP/1.1 200 1070000", + user: { + id: "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b2e20bca1846d7d584626b12eae9f6dc.asciidoc b/docs/doc_examples/b2e20bca1846d7d584626b12eae9f6dc.asciidoc new file mode 100644 index 000000000..c48370b26 --- /dev/null +++ b/docs/doc_examples/b2e20bca1846d7d584626b12eae9f6dc.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.nodes({ + v: "true", + h: "name,node.role,disk.used_percent,disk.used,disk.avail,disk.total", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b2e4f3257c0e0aa3311f7270034bbc42.asciidoc b/docs/doc_examples/b2e4f3257c0e0aa3311f7270034bbc42.asciidoc new file mode 100644 index 000000000..afabda5ab --- /dev/null +++ b/docs/doc_examples/b2e4f3257c0e0aa3311f7270034bbc42.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "my-index", + settings: { + "index.routing.allocation.require.data": null, + "index.routing.allocation.include._tier_preference": "data_hot", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b3479ee4586c15020549afae58d94d65.asciidoc b/docs/doc_examples/b3479ee4586c15020549afae58d94d65.asciidoc new file mode 100644 index 000000000..68cc5ab9c --- /dev/null +++ b/docs/doc_examples/b3479ee4586c15020549afae58d94d65.asciidoc @@ -0,0 +1,44 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, + }, + }, + mappings: { + properties: { + point: { + type: "geo_point", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "idx", + id: 1, + document: { + point: [ + { + lat: -90, + lon: -80, + }, + { + lat: 10, + lon: 30, + }, + ], + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/b3623b8c7f3e7650f52b6fb8b050f583.asciidoc b/docs/doc_examples/b3623b8c7f3e7650f52b6fb8b050f583.asciidoc new file mode 100644 index 000000000..2f0e298e1 --- /dev/null +++ b/docs/doc_examples/b3623b8c7f3e7650f52b6fb8b050f583.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.features.getFeatures(); +console.log(response); +---- diff --git a/docs/doc_examples/b3685560cb328f179d96ffe7c2668f72.asciidoc b/docs/doc_examples/b3685560cb328f179d96ffe7c2668f72.asciidoc new file mode 100644 index 000000000..1e269e68b --- /dev/null +++ b/docs/doc_examples/b3685560cb328f179d96ffe7c2668f72.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_date_histo: { + date_histogram: { + field: "date", + calendar_interval: "1M", + }, + aggs: { + the_sum: { + sum: { + field: "price", + }, + }, + the_movavg: { + moving_fn: { + buckets_path: "the_sum", + window: 10, + script: + "if (values.length > 5*2) {MovingFunctions.holtWinters(values, 0.3, 0.1, 0.1, 5, false)}", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b3756e700d0f6c7e8919003bdf26bc8f.asciidoc b/docs/doc_examples/b3756e700d0f6c7e8919003bdf26bc8f.asciidoc new file mode 100644 index 000000000..8d51c8bb6 --- /dev/null +++ b/docs/doc_examples/b3756e700d0f6c7e8919003bdf26bc8f.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "DELETE", + path: "/_internal/desired_balance", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b37919cc438b47477343833b4e522408.asciidoc b/docs/doc_examples/b37919cc438b47477343833b4e522408.asciidoc new file mode 100644 index 000000000..111b4f7a6 --- /dev/null +++ b/docs/doc_examples/b37919cc438b47477343833b4e522408.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.termvectors({ + index: "imdb", + doc: { + plot: "When wealthy industrialist Tony Stark is forced to build an armored suit after a life-threatening incident, he ultimately decides to use its technology to fight against evil.", + }, + term_statistics: true, + field_statistics: true, + positions: false, + offsets: false, + filter: { + max_num_terms: 3, + min_term_freq: 1, + min_doc_freq: 1, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b3a1c4220617ded67ed43fff2051d324.asciidoc b/docs/doc_examples/b3a1c4220617ded67ed43fff2051d324.asciidoc new file mode 100644 index 000000000..b0861175a --- /dev/null +++ b/docs/doc_examples/b3a1c4220617ded67ed43fff2051d324.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-index-000001", + properties: { + tags: { + type: "keyword", + eager_global_ordinals: true, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b3a711c3deddcdb8a3f6623184a8b794.asciidoc b/docs/doc_examples/b3a711c3deddcdb8a3f6623184a8b794.asciidoc new file mode 100644 index 000000000..89cd2af13 --- /dev/null +++ b/docs/doc_examples/b3a711c3deddcdb8a3f6623184a8b794.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.update({ + index: "test", + id: 1, + script: { + source: "ctx._source.counter += params.count", + lang: "painless", + params: { + count: 4, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b3cd07f02059165fd62a2f148be3dc58.asciidoc b/docs/doc_examples/b3cd07f02059165fd62a2f148be3dc58.asciidoc new file mode 100644 index 000000000..98b1807e4 --- /dev/null +++ b/docs/doc_examples/b3cd07f02059165fd62a2f148be3dc58.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, + }, + }, + mappings: { + properties: { + long: { + type: "long", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "idx", + id: 1, + document: { + long: [0, 0, -123466, 87612], + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/b3ed567d2c0915a280b6b15f7a37539b.asciidoc b/docs/doc_examples/b3ed567d2c0915a280b6b15f7a37539b.asciidoc new file mode 100644 index 000000000..184318927 --- /dev/null +++ b/docs/doc_examples/b3ed567d2c0915a280b6b15f7a37539b.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sales_per_month: { + date_histogram: { + field: "date", + calendar_interval: "month", + }, + aggs: { + sales: { + sum: { + field: "price", + }, + }, + }, + }, + percentiles_monthly_sales: { + percentiles_bucket: { + buckets_path: "sales_per_month>sales", + percents: [25, 50, 75], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b3f442a7d9eb391121dcab991787f9d6.asciidoc b/docs/doc_examples/b3f442a7d9eb391121dcab991787f9d6.asciidoc new file mode 100644 index 000000000..15dcc7ae9 --- /dev/null +++ b/docs/doc_examples/b3f442a7d9eb391121dcab991787f9d6.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, + }, + }, + mappings: { + properties: { + binary: { + type: "binary", + doc_values: true, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "idx", + id: 1, + document: { + binary: ["IAA=", "EAA="], + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/b3fffd96fdb118cd059b5f1d67d928de.asciidoc b/docs/doc_examples/b3fffd96fdb118cd059b5f1d67d928de.asciidoc new file mode 100644 index 000000000..02f50769a --- /dev/null +++ b/docs/doc_examples/b3fffd96fdb118cd059b5f1d67d928de.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: { + type: "MultiPoint", + coordinates: [ + [102, 2], + [103, 2], + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b42e7d627cd79e4c5e7a4a3cd8b19ce0.asciidoc b/docs/doc_examples/b42e7d627cd79e4c5e7a4a3cd8b19ce0.asciidoc new file mode 100644 index 000000000..2709e0881 --- /dev/null +++ b/docs/doc_examples/b42e7d627cd79e4c5e7a4a3cd8b19ce0.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "one-pipeline-to-rule-them-all", + processors: [ + { + pipeline: { + description: + "If 'service.name' is 'apache_httpd', use 'httpd_pipeline'", + if: "ctx.service?.name == 'apache_httpd'", + name: "httpd_pipeline", + }, + }, + { + pipeline: { + description: "If 'service.name' is 'syslog', use 'syslog_pipeline'", + if: "ctx.service?.name == 'syslog'", + name: "syslog_pipeline", + }, + }, + { + fail: { + description: + "If 'service.name' is not 'apache_httpd' or 'syslog', return a failure message", + if: "ctx.service?.name != 'apache_httpd' && ctx.service?.name != 'syslog'", + message: + "This pipeline requires service.name to be either `syslog` or `apache_httpd`", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/b430122345d560bbd2a77826f5c475f7.asciidoc b/docs/doc_examples/b430122345d560bbd2a77826f5c475f7.asciidoc new file mode 100644 index 000000000..3d767067b --- /dev/null +++ b/docs/doc_examples/b430122345d560bbd2a77826f5c475f7.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + dynamic_templates: [ + { + ip_fields: { + match: ["ip_*", "*_ip"], + unmatch: ["one*", "*two"], + mapping: { + type: "ip", + }, + }, + }, + ], + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index", + id: 1, + document: { + one_ip: "will not match", + ip_two: "will not match", + three_ip: "12.12.12.12", + ip_four: "13.13.13.13", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/b4392116f2cc57ce8064ccbad30318d5.asciidoc b/docs/doc_examples/b4392116f2cc57ce8064ccbad30318d5.asciidoc deleted file mode 100644 index f4b8f3ffc..000000000 --- a/docs/doc_examples/b4392116f2cc57ce8064ccbad30318d5.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.updateAliases({ - body: { - actions: [ - { - add: { - index: 'test1', - alias: 'alias1' - } - } - ] - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/b45a8c6fc746e9c90fd181e69a605fad.asciidoc b/docs/doc_examples/b45a8c6fc746e9c90fd181e69a605fad.asciidoc new file mode 100644 index 000000000..0a14b2f32 --- /dev/null +++ b/docs/doc_examples/b45a8c6fc746e9c90fd181e69a605fad.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.completion({ + inference_id: "openai_chat_completions", + input: "What is Elastic?", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b45c60f908b329835ab40609423f378e.asciidoc b/docs/doc_examples/b45c60f908b329835ab40609423f378e.asciidoc new file mode 100644 index 000000000..d97a8f4a6 --- /dev/null +++ b/docs/doc_examples/b45c60f908b329835ab40609423f378e.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.nodes({ + h: "node.role", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b4693f2aa9fa65db04ab2499355c54fc.asciidoc b/docs/doc_examples/b4693f2aa9fa65db04ab2499355c54fc.asciidoc new file mode 100644 index 000000000..aef48d3ba --- /dev/null +++ b/docs/doc_examples/b4693f2aa9fa65db04ab2499355c54fc.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "cohere-embeddings", + knn: { + field: "content_embedding", + query_vector_builder: { + text_embedding: { + model_id: "cohere_embeddings", + model_text: "Muscles in human body", + }, + }, + k: 10, + num_candidates: 100, + }, + _source: ["id", "content"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/b47945c7db8868dd36ba079b742f2a90.asciidoc b/docs/doc_examples/b47945c7db8868dd36ba079b742f2a90.asciidoc new file mode 100644 index 000000000..5ab2dad91 --- /dev/null +++ b/docs/doc_examples/b47945c7db8868dd36ba079b742f2a90.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.search({ + name: "my-app", + params: { + default_field: "author", + query_string: "Jane", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b4946ecc9101b97102a1c5bcb19e5607.asciidoc b/docs/doc_examples/b4946ecc9101b97102a1c5bcb19e5607.asciidoc new file mode 100644 index 000000000..2bda8fc41 --- /dev/null +++ b/docs/doc_examples/b4946ecc9101b97102a1c5bcb19e5607.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.renderSearchTemplate({ + source: + '{ "query": { "bool": { "filter": [ {{#year_scope}} { "range": { "@timestamp": { "gte": "now-1y/d", "lt": "now/d" } } }, {{/year_scope}} { "term": { "user.id": "{{user_id}}" }}]}}}', + params: { + year_scope: true, + user_id: "kimchy", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b4a0d0ed512dffc10ee53bca2feca49b.asciidoc b/docs/doc_examples/b4a0d0ed512dffc10ee53bca2feca49b.asciidoc deleted file mode 100644 index 86335844e..000000000 --- a/docs/doc_examples/b4a0d0ed512dffc10ee53bca2feca49b.asciidoc +++ /dev/null @@ -1,43 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - function_score: { - query: { - match_all: {} - }, - boost: '5', - functions: [ - { - filter: { - match: { - test: 'bar' - } - }, - random_score: {}, - weight: 23 - }, - { - filter: { - match: { - test: 'cat' - } - }, - weight: 42 - } - ], - max_boost: 42, - score_mode: 'max', - boost_mode: 'multiply', - min_score: 42 - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/b4aec2a1d353852507c091bdb629b765.asciidoc b/docs/doc_examples/b4aec2a1d353852507c091bdb629b765.asciidoc new file mode 100644 index 000000000..0840b621e --- /dev/null +++ b/docs/doc_examples/b4aec2a1d353852507c091bdb629b765.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.putFilter({ + filter_id: "safe_domains", + description: "A list of safe domains", + items: ["*.google.com", "wikipedia.org"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/b4d1fc887e40885cdf6ac2d01487cb76.asciidoc b/docs/doc_examples/b4d1fc887e40885cdf6ac2d01487cb76.asciidoc new file mode 100644 index 000000000..77651c802 --- /dev/null +++ b/docs/doc_examples/b4d1fc887e40885cdf6ac2d01487cb76.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + span_multi: { + match: { + prefix: { + "user.id": { + value: "ki", + boost: 1.08, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b4d9d5017d42f27281e734e969949623.asciidoc b/docs/doc_examples/b4d9d5017d42f27281e734e969949623.asciidoc new file mode 100644 index 000000000..6e8f104ef --- /dev/null +++ b/docs/doc_examples/b4d9d5017d42f27281e734e969949623.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.getRepository({ + name: "my-repo", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b4da132cb934c33d61e2b60988c6d4a3.asciidoc b/docs/doc_examples/b4da132cb934c33d61e2b60988c6d4a3.asciidoc new file mode 100644 index 000000000..6e6b62677 --- /dev/null +++ b/docs/doc_examples/b4da132cb934c33d61e2b60988c6d4a3.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_date_histo: { + date_histogram: { + field: "timestamp", + calendar_interval: "day", + }, + aggs: { + the_sum: { + sum: { + field: "lemmings", + }, + }, + thirtieth_difference: { + serial_diff: { + buckets_path: "the_sum", + lag: 30, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b4f3165e873f551fbaa03945877eb370.asciidoc b/docs/doc_examples/b4f3165e873f551fbaa03945877eb370.asciidoc new file mode 100644 index 000000000..604cc5327 --- /dev/null +++ b/docs/doc_examples/b4f3165e873f551fbaa03945877eb370.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + dynamic_date_formats: ["yyyy/MM", "MM/dd/yyyy"], + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + create_date: "09/25/2015", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/b4f4c9ad3301c97fb3c38d108a3bc453.asciidoc b/docs/doc_examples/b4f4c9ad3301c97fb3c38d108a3bc453.asciidoc new file mode 100644 index 000000000..60c5ddca6 --- /dev/null +++ b/docs/doc_examples/b4f4c9ad3301c97fb3c38d108a3bc453.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + cluster: { + remote: { + cluster_one: { + seeds: ["127.0.0.1:{remote-interface-default-port}"], + }, + cluster_two: { + mode: "sniff", + seeds: ["127.0.0.1:{remote-interface-default-port-plus1}"], + "transport.compress": true, + skip_unavailable: true, + }, + cluster_three: { + mode: "proxy", + proxy_address: "127.0.0.1:{remote-interface-default-port-plus2}", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b504119238b44cddd3b5944da20a498d.asciidoc b/docs/doc_examples/b504119238b44cddd3b5944da20a498d.asciidoc new file mode 100644 index 000000000..858440978 --- /dev/null +++ b/docs/doc_examples/b504119238b44cddd3b5944da20a498d.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: + "POLYGON ((1000.0 -1001.0, 1001.0 -1001.0, 1001.0 -1000.0, 1000.0 -1000.0, 1000.0 -1001.0))", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b515427f8685ca7d79176def672d19fa.asciidoc b/docs/doc_examples/b515427f8685ca7d79176def672d19fa.asciidoc new file mode 100644 index 000000000..6d63883b0 --- /dev/null +++ b/docs/doc_examples/b515427f8685ca7d79176def672d19fa.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.refresh(); +console.log(response); + +const response1 = await client.search({ + index: "my-index-000001", + size: 0, + q: "extra:test", + filter_path: "hits.total", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/b52951b78cd5fb2f9353d1c7e6d37070.asciidoc b/docs/doc_examples/b52951b78cd5fb2f9353d1c7e6d37070.asciidoc new file mode 100644 index 000000000..869f73861 --- /dev/null +++ b/docs/doc_examples/b52951b78cd5fb2f9353d1c7e6d37070.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + wildcard: { + "user.id": { + value: "ki*y", + boost: 1, + rewrite: "constant_score_blended", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b557f114e21dbc6f531d4e7621a08e8f.asciidoc b/docs/doc_examples/b557f114e21dbc6f531d4e7621a08e8f.asciidoc new file mode 100644 index 000000000..052d3a53f --- /dev/null +++ b/docs/doc_examples/b557f114e21dbc6f531d4e7621a08e8f.asciidoc @@ -0,0 +1,46 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "logs", + mappings: { + _source: { + includes: ["*.count", "meta.*"], + excludes: ["meta.description", "meta.other.*"], + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "logs", + id: 1, + document: { + requests: { + count: 10, + foo: "bar", + }, + meta: { + name: "Some metric", + description: "Some metric description", + other: { + foo: "one", + baz: "two", + }, + }, + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "logs", + query: { + match: { + "meta.other.foo": "one", + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/b573e893de0d5f92d67f4f5eb7f0c353.asciidoc b/docs/doc_examples/b573e893de0d5f92d67f4f5eb7f0c353.asciidoc new file mode 100644 index 000000000..64bf78362 --- /dev/null +++ b/docs/doc_examples/b573e893de0d5f92d67f4f5eb7f0c353.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sales_per_month: { + date_histogram: { + field: "date", + calendar_interval: "month", + }, + aggs: { + sales: { + sum: { + field: "price", + }, + }, + }, + }, + stats_monthly_sales: { + stats_bucket: { + buckets_path: "sales_per_month>sales", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b583bf8d3a2f49d633aa2cfed5606418.asciidoc b/docs/doc_examples/b583bf8d3a2f49d633aa2cfed5606418.asciidoc new file mode 100644 index 000000000..17ee6fcc0 --- /dev/null +++ b/docs/doc_examples/b583bf8d3a2f49d633aa2cfed5606418.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putComponentTemplate({ + name: "template_1", + template: { + settings: { + number_of_shards: 1, + }, + aliases: { + alias1: {}, + alias2: { + filter: { + term: { + "user.id": "kimchy", + }, + }, + routing: "shard-1", + }, + "{index}-alias": {}, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b58b17975bbce307b2ccce5051a449e8.asciidoc b/docs/doc_examples/b58b17975bbce307b2ccce5051a449e8.asciidoc new file mode 100644 index 000000000..6276393a4 --- /dev/null +++ b/docs/doc_examples/b58b17975bbce307b2ccce5051a449e8.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + size: 0, + filter_path: "hits.total", + query: { + range: { + "http.response.bytes": { + lt: 2000000, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b590241c4296299b836fbb5a95bdd2dc.asciidoc b/docs/doc_examples/b590241c4296299b836fbb5a95bdd2dc.asciidoc new file mode 100644 index 000000000..f71aebf61 --- /dev/null +++ b/docs/doc_examples/b590241c4296299b836fbb5a95bdd2dc.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "kibana_sample_data_ecommerce", + size: 0, + aggs: { + avg_order_value: { + avg: { + field: "taxful_total_price", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b5bc1bb7278f2f95bc54790c78c928e0.asciidoc b/docs/doc_examples/b5bc1bb7278f2f95bc54790c78c928e0.asciidoc new file mode 100644 index 000000000..1e2a0ed25 --- /dev/null +++ b/docs/doc_examples/b5bc1bb7278f2f95bc54790c78c928e0.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.rollup.putJob({ + id: "sensor2", + index_pattern: "sensor-*", + rollup_index: "sensor_rollup", + cron: "*/30 * * * * ?", + page_size: 1000, + groups: { + date_histogram: { + field: "timestamp", + fixed_interval: "1h", + delay: "7d", + }, + terms: { + fields: ["node"], + }, + }, + metrics: [ + { + field: "temperature", + metrics: ["min", "max", "sum"], + }, + { + field: "voltage", + metrics: ["avg"], + }, + ], +}); +console.log(response); + +const response1 = await client.rollup.getJobs({ + id: "_all", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/b5e5cd4eccc40d7c5f2a1fcb654bd4a4.asciidoc b/docs/doc_examples/b5e5cd4eccc40d7c5f2a1fcb654bd4a4.asciidoc new file mode 100644 index 000000000..b346dff5b --- /dev/null +++ b/docs/doc_examples/b5e5cd4eccc40d7c5f2a1fcb654bd4a4.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "stackoverflow", + size: 0, + query: { + query_string: { + query: "tags:elasticsearch", + }, + }, + aggs: { + my_unbiased_sample: { + diversified_sampler: { + shard_size: 200, + field: "author", + }, + aggs: { + keywords: { + significant_terms: { + field: "tags", + exclude: ["elasticsearch"], + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b5f95bc097a201b29c7200fc8d3d31c1.asciidoc b/docs/doc_examples/b5f95bc097a201b29c7200fc8d3d31c1.asciidoc deleted file mode 100644 index be1a10ea3..000000000 --- a/docs/doc_examples/b5f95bc097a201b29c7200fc8d3d31c1.asciidoc +++ /dev/null @@ -1,44 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response0 = await client.indices.putTemplate({ - name: 'template_1', - body: { - index_patterns: [ - '*' - ], - order: 0, - settings: { - number_of_shards: 1 - }, - mappings: { - _source: { - enabled: false - } - } - } -}) -console.log(response0) - -const response1 = await client.indices.putTemplate({ - name: 'template_2', - body: { - index_patterns: [ - 'te*' - ], - order: 1, - settings: { - number_of_shards: 1 - }, - mappings: { - _source: { - enabled: true - } - } - } -}) -console.log(response1) ----- - diff --git a/docs/doc_examples/b601bc78fb69e15a42e0783219ddc38d.asciidoc b/docs/doc_examples/b601bc78fb69e15a42e0783219ddc38d.asciidoc new file mode 100644 index 000000000..4f33f0da1 --- /dev/null +++ b/docs/doc_examples/b601bc78fb69e15a42e0783219ddc38d.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sales_per_month: { + date_histogram: { + field: "date", + calendar_interval: "month", + }, + aggs: { + sales: { + sum: { + field: "price", + }, + }, + }, + }, + max_monthly_sales: { + max_bucket: { + buckets_path: "sales_per_month>sales", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b607eea422295a3e9acd75f9ed1c8cb7.asciidoc b/docs/doc_examples/b607eea422295a3e9acd75f9ed1c8cb7.asciidoc new file mode 100644 index 000000000..34e45f1b9 --- /dev/null +++ b/docs/doc_examples/b607eea422295a3e9acd75f9ed1c8cb7.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + sort: [ + { + price: { + missing: "_last", + }, + }, + ], + query: { + term: { + product: "chocolate", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b61afb7ca29a11243232ffcc8b5a43cf.asciidoc b/docs/doc_examples/b61afb7ca29a11243232ffcc8b5a43cf.asciidoc new file mode 100644 index 000000000..c872c534b --- /dev/null +++ b/docs/doc_examples/b61afb7ca29a11243232ffcc8b5a43cf.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getFieldMapping({ + index: "publications", + fields: "a*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b620ef4400d2f660fe2c67835938442c.asciidoc b/docs/doc_examples/b620ef4400d2f660fe2c67835938442c.asciidoc new file mode 100644 index 000000000..d756cff94 --- /dev/null +++ b/docs/doc_examples/b620ef4400d2f660fe2c67835938442c.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.autoscaling.deleteAutoscalingPolicy({ + name: "my_autoscaling_policy", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b62eaa20c4e0e48134a6d1d1b3c30b26.asciidoc b/docs/doc_examples/b62eaa20c4e0e48134a6d1d1b3c30b26.asciidoc new file mode 100644 index 000000000..7ffe922db --- /dev/null +++ b/docs/doc_examples/b62eaa20c4e0e48134a6d1d1b3c30b26.asciidoc @@ -0,0 +1,216 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + refresh: "true", + operations: [ + { + index: { + _index: "test-logs", + }, + }, + { + message: + "[2024-03-05T10:52:36,256][INFO ][o.a.l.u.VectorUtilPanamaProvider] [laptop] Java vector incubator API enabled; uses preferredBitSize=128", + }, + { + index: { + _index: "test-logs", + }, + }, + { + message: + "[2024-03-05T10:52:41,038][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-url]", + }, + { + index: { + _index: "test-logs", + }, + }, + { + message: + "[2024-03-05T10:52:41,042][INFO ][o.e.p.PluginsService ] [laptop] loaded module [rest-root]", + }, + { + index: { + _index: "test-logs", + }, + }, + { + message: + "[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-core]", + }, + { + index: { + _index: "test-logs", + }, + }, + { + message: + "[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-redact]", + }, + { + index: { + _index: "test-logs", + }, + }, + { + message: + "[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [ingest-user-agent]", + }, + { + index: { + _index: "test-logs", + }, + }, + { + message: + "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-monitoring]", + }, + { + index: { + _index: "test-logs", + }, + }, + { + message: + "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-s3]", + }, + { + index: { + _index: "test-logs", + }, + }, + { + message: + "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-analytics]", + }, + { + index: { + _index: "test-logs", + }, + }, + { + message: + "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-ent-search]", + }, + { + index: { + _index: "test-logs", + }, + }, + { + message: + "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-autoscaling]", + }, + { + index: { + _index: "test-logs", + }, + }, + { + message: + "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [lang-painless]]", + }, + { + index: { + _index: "test-logs", + }, + }, + { + message: + "[2024-03-05T10:52:41,059][INFO ][o.e.p.PluginsService ] [laptop] loaded module [lang-expression]", + }, + { + index: { + _index: "test-logs", + }, + }, + { + message: + "[2024-03-05T10:52:41,059][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-eql]", + }, + { + index: { + _index: "test-logs", + }, + }, + { + message: + "[2024-03-05T10:52:43,291][INFO ][o.e.e.NodeEnvironment ] [laptop] heap size [16gb], compressed ordinary object pointers [true]", + }, + { + index: { + _index: "test-logs", + }, + }, + { + message: + "[2024-03-05T10:52:46,098][INFO ][o.e.x.s.Security ] [laptop] Security is enabled", + }, + { + index: { + _index: "test-logs", + }, + }, + { + message: + "[2024-03-05T10:52:47,227][INFO ][o.e.x.p.ProfilingPlugin ] [laptop] Profiling is enabled", + }, + { + index: { + _index: "test-logs", + }, + }, + { + message: + "[2024-03-05T10:52:47,259][INFO ][o.e.x.p.ProfilingPlugin ] [laptop] profiling index templates will not be installed or reinstalled", + }, + { + index: { + _index: "test-logs", + }, + }, + { + message: + "[2024-03-05T10:52:47,755][INFO ][o.e.i.r.RecoverySettings ] [laptop] using rate limit [40mb] with [default=40mb, read=0b, write=0b, max=0b]", + }, + { + index: { + _index: "test-logs", + }, + }, + { + message: + "[2024-03-05T10:52:47,787][INFO ][o.e.d.DiscoveryModule ] [laptop] using discovery type [multi-node] and seed hosts providers [settings]", + }, + { + index: { + _index: "test-logs", + }, + }, + { + message: + "[2024-03-05T10:52:49,188][INFO ][o.e.n.Node ] [laptop] initialized", + }, + { + index: { + _index: "test-logs", + }, + }, + { + message: + "[2024-03-05T10:52:49,199][INFO ][o.e.n.Node ] [laptop] starting ...", + }, + ], +}); +console.log(response); + +const response1 = await client.textStructure.findFieldStructure({ + index: "test-logs", + field: "message", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/b638e11d6a8a084290f8934d224abd52.asciidoc b/docs/doc_examples/b638e11d6a8a084290f8934d224abd52.asciidoc new file mode 100644 index 000000000..9e193f94e --- /dev/null +++ b/docs/doc_examples/b638e11d6a8a084290f8934d224abd52.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "cluster.max_shards_per_node.frozen": null, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b63ce79ce4fa1bb9b99a789f4dcfef4e.asciidoc b/docs/doc_examples/b63ce79ce4fa1bb9b99a789f4dcfef4e.asciidoc new file mode 100644 index 000000000..d787e9a7f --- /dev/null +++ b/docs/doc_examples/b63ce79ce4fa1bb9b99a789f4dcfef4e.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "test", + settings: { + top_metrics_max_size: 100, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b65dbb51ddd496189c65a9326a53480c.asciidoc b/docs/doc_examples/b65dbb51ddd496189c65a9326a53480c.asciidoc new file mode 100644 index 000000000..cd9fd0281 --- /dev/null +++ b/docs/doc_examples/b65dbb51ddd496189c65a9326a53480c.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.createRepository({ + name: "my_read_only_url_repository", + repository: { + type: "url", + settings: { + url: "file:/mount/backups/my_fs_backup_location", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b66be1daf6c220eb66d94e708b2fae39.asciidoc b/docs/doc_examples/b66be1daf6c220eb66d94e708b2fae39.asciidoc new file mode 100644 index 000000000..47fff6913 --- /dev/null +++ b/docs/doc_examples/b66be1daf6c220eb66d94e708b2fae39.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.state({ + metric: "metadata,routing_table", + index: "foo,bar", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b67fa8c560dd10a8e6f226048cd21562.asciidoc b/docs/doc_examples/b67fa8c560dd10a8e6f226048cd21562.asciidoc new file mode 100644 index 000000000..3bd3fc327 --- /dev/null +++ b/docs/doc_examples/b67fa8c560dd10a8e6f226048cd21562.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.renderSearchTemplate({ + source: '{ "query": { "bool": { "must": {{#toJson}}clauses{{/toJson}} }}}', + params: { + clauses: [ + { + term: { + "user.id": "kimchy", + }, + }, + { + term: { + "url.domain": "example.com", + }, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b68c85fe1b0d2f264dc0d1cbf530f319.asciidoc b/docs/doc_examples/b68c85fe1b0d2f264dc0d1cbf530f319.asciidoc deleted file mode 100644 index f428e4fb7..000000000 --- a/docs/doc_examples/b68c85fe1b0d2f264dc0d1cbf530f319.asciidoc +++ /dev/null @@ -1,30 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - function_score: { - query: { - match: { - message: 'elasticsearch' - } - }, - script_score: { - script: { - params: { - a: 5, - b: 1.2 - }, - source: "params.a / Math.pow(params.b, doc['likes'].value)" - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/b68ed7037042719945a2452d23e64c78.asciidoc b/docs/doc_examples/b68ed7037042719945a2452d23e64c78.asciidoc new file mode 100644 index 000000000..8ec894868 --- /dev/null +++ b/docs/doc_examples/b68ed7037042719945a2452d23e64c78.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 3, + refresh: "true", + document: { + query: { + match: { + message: "brown fox", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b691d41f84b5b46e9051b51db22a46af.asciidoc b/docs/doc_examples/b691d41f84b5b46e9051b51db22a46af.asciidoc new file mode 100644 index 000000000..e026fbdb7 --- /dev/null +++ b/docs/doc_examples/b691d41f84b5b46e9051b51db22a46af.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + genres: { + rare_terms: { + field: "genre", + include: ["swing", "rock"], + exclude: ["jazz"], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b6a6aa9ba20e9a019371ae268488833f.asciidoc b/docs/doc_examples/b6a6aa9ba20e9a019371ae268488833f.asciidoc new file mode 100644 index 000000000..363f0407f --- /dev/null +++ b/docs/doc_examples/b6a6aa9ba20e9a019371ae268488833f.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.getSettings({ + filter_path: "persistent.cluster.remote", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b6a7ffd2003c38f4aa321f067d162be5.asciidoc b/docs/doc_examples/b6a7ffd2003c38f4aa321f067d162be5.asciidoc new file mode 100644 index 000000000..c0ceba90e --- /dev/null +++ b/docs/doc_examples/b6a7ffd2003c38f4aa321f067d162be5.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + query: { + bool: { + should: [ + { + sparse_vector: { + field: "content_embedding", + inference_id: "my-elser-endpoint", + query: "How to avoid muscle soreness after running?", + boost: 1, + }, + }, + { + query_string: { + query: "toxins", + boost: 4, + }, + }, + ], + }, + }, + min_score: 10, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b6c872d04eabb39d1947cde6b29d4ae1.asciidoc b/docs/doc_examples/b6c872d04eabb39d1947cde6b29d4ae1.asciidoc new file mode 100644 index 000000000..0d1ab73b0 --- /dev/null +++ b/docs/doc_examples/b6c872d04eabb39d1947cde6b29d4ae1.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + tags: { + terms: { + field: "tags", + min_doc_count: 10, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b6d278737d27973e498ac61cda9e5126.asciidoc b/docs/doc_examples/b6d278737d27973e498ac61cda9e5126.asciidoc new file mode 100644 index 000000000..446bba938 --- /dev/null +++ b/docs/doc_examples/b6d278737d27973e498ac61cda9e5126.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "kibana_sample_data_ecommerce", + size: 0, + aggs: { + daily_orders: { + date_histogram: { + field: "order_date", + calendar_interval: "day", + format: "yyyy-MM-dd", + min_doc_count: 0, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b6e29a0e14b611d4aaafb3051220ea56.asciidoc b/docs/doc_examples/b6e29a0e14b611d4aaafb3051220ea56.asciidoc new file mode 100644 index 000000000..e7faa2968 --- /dev/null +++ b/docs/doc_examples/b6e29a0e14b611d4aaafb3051220ea56.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + title: { + type: "text", + analyzer: "whitespace", + search_analyzer: "simple", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b6e385760e036e36827f719b540d9c11.asciidoc b/docs/doc_examples/b6e385760e036e36827f719b540d9c11.asciidoc new file mode 100644 index 000000000..55fd2549a --- /dev/null +++ b/docs/doc_examples/b6e385760e036e36827f719b540d9c11.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-dfs-index", + search_type: "dfs_query_then_fetch", + pretty: "true", + size: 0, + profile: true, + query: { + term: { + "my-keyword": { + value: "a", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b6f690896001f8f9ad5bf24e1304a552.asciidoc b/docs/doc_examples/b6f690896001f8f9ad5bf24e1304a552.asciidoc new file mode 100644 index 000000000..bddd8cdb6 --- /dev/null +++ b/docs/doc_examples/b6f690896001f8f9ad5bf24e1304a552.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-byte-quantized-index", + mappings: { + properties: { + my_vector: { + type: "dense_vector", + dims: 4, + index: true, + index_options: { + type: "int4_hnsw", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b717a583b5165e5c6caafc42fdfd9086.asciidoc b/docs/doc_examples/b717a583b5165e5c6caafc42fdfd9086.asciidoc new file mode 100644 index 000000000..7f2287489 --- /dev/null +++ b/docs/doc_examples/b717a583b5165e5c6caafc42fdfd9086.asciidoc @@ -0,0 +1,67 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "places", + mappings: { + properties: { + geometry: { + type: "shape", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "places", + refresh: "true", + operations: [ + { + index: { + _id: 1, + }, + }, + { + name: "NEMO Science Museum", + geometry: "POINT(491.2350 5237.4081)", + }, + { + index: { + _id: 2, + }, + }, + { + name: "Sportpark De Weeren", + geometry: { + type: "Polygon", + coordinates: [ + [ + [496.5305328369141, 5239.347642069457], + [496.6979026794433, 5239.172175893484], + [496.9425201416015, 5239.238958618537], + [496.7944622039794, 5239.420969150824], + [496.5305328369141, 5239.347642069457], + ], + ], + }, + }, + ], +}); +console.log(response1); + +const response2 = await client.search({ + index: "places", + size: 0, + aggs: { + viewport: { + cartesian_bounds: { + field: "geometry", + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/b724f547c5d67e95bbc0a9920e47033c.asciidoc b/docs/doc_examples/b724f547c5d67e95bbc0a9920e47033c.asciidoc new file mode 100644 index 000000000..d283de54e --- /dev/null +++ b/docs/doc_examples/b724f547c5d67e95bbc0a9920e47033c.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "file-path-test", + query: { + term: { + "file_path.tree": "/User/alice/photos/2017/05/16", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b728d6ba226dba719aadcd8b8099cc74.asciidoc b/docs/doc_examples/b728d6ba226dba719aadcd8b8099cc74.asciidoc new file mode 100644 index 000000000..9138df541 --- /dev/null +++ b/docs/doc_examples/b728d6ba226dba719aadcd8b8099cc74.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.allocation({ + v: "true", + h: "node,shards,disk.*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b7a4f5b9a93eff44268a1ee38ee1c6d3.asciidoc b/docs/doc_examples/b7a4f5b9a93eff44268a1ee38ee1c6d3.asciidoc new file mode 100644 index 000000000..e911c937e --- /dev/null +++ b/docs/doc_examples/b7a4f5b9a93eff44268a1ee38ee1c6d3.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + source: { + index: "archive", + }, + dest: { + index: "my-data-stream", + op_type: "create", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b7a9f60b3646efe3834ca8381f8aa560.asciidoc b/docs/doc_examples/b7a9f60b3646efe3834ca8381f8aa560.asciidoc new file mode 100644 index 000000000..4674047c2 --- /dev/null +++ b/docs/doc_examples/b7a9f60b3646efe3834ca8381f8aa560.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "logger.org.elasticsearch.discovery": null, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b7ad394975863a8f5ee29627c3ab738b.asciidoc b/docs/doc_examples/b7ad394975863a8f5ee29627c3ab738b.asciidoc new file mode 100644 index 000000000..9f14e1098 --- /dev/null +++ b/docs/doc_examples/b7ad394975863a8f5ee29627c3ab738b.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + prices: { + histogram: { + field: "price", + interval: 50, + keyed: true, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b7bb5503e64bd869b2ac1c46c434a079.asciidoc b/docs/doc_examples/b7bb5503e64bd869b2ac1c46c434a079.asciidoc new file mode 100644 index 000000000..b560ec837 --- /dev/null +++ b/docs/doc_examples/b7bb5503e64bd869b2ac1c46c434a079.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_buckets: { + composite: { + sources: [ + { + histo: { + histogram: { + field: "price", + interval: 5, + }, + }, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b7c99eb38d4b37e22de1ffcb0e88ae4c.asciidoc b/docs/doc_examples/b7c99eb38d4b37e22de1ffcb0e88ae4c.asciidoc new file mode 100644 index 000000000..d82a47006 --- /dev/null +++ b/docs/doc_examples/b7c99eb38d4b37e22de1ffcb0e88ae4c.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 2, + document: { + message: "A new bonsai tree in the office", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b7df0848b2dc3093f931976db5b8cfff.asciidoc b/docs/doc_examples/b7df0848b2dc3093f931976db5b8cfff.asciidoc new file mode 100644 index 000000000..0bb694919 --- /dev/null +++ b/docs/doc_examples/b7df0848b2dc3093f931976db5b8cfff.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.health({ + filter_path: "status,*_shards", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b7f8bd33c22f3c93336ab57c2e091f73.asciidoc b/docs/doc_examples/b7f8bd33c22f3c93336ab57c2e091f73.asciidoc new file mode 100644 index 000000000..a89aac295 --- /dev/null +++ b/docs/doc_examples/b7f8bd33c22f3c93336ab57c2e091f73.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "DELETE", + path: "/_query_rules/my-ruleset/_rule/my-rule1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b80e1f5b26bae4f3c2f8a604b7caaf17.asciidoc b/docs/doc_examples/b80e1f5b26bae4f3c2f8a604b7caaf17.asciidoc new file mode 100644 index 000000000..55e88724b --- /dev/null +++ b/docs/doc_examples/b80e1f5b26bae4f3c2f8a604b7caaf17.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "mapping7", + roles: ["ldap-example-user"], + enabled: true, + rules: { + all: [ + { + field: { + dn: "*,ou=subtree,dc=example,dc=com", + }, + }, + { + field: { + "realm.name": "ldap1", + }, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b81a7b5f5ef19553f9cd49196f31018c.asciidoc b/docs/doc_examples/b81a7b5f5ef19553f9cd49196f31018c.asciidoc new file mode 100644 index 000000000..e7be5898e --- /dev/null +++ b/docs/doc_examples/b81a7b5f5ef19553f9cd49196f31018c.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "items", + mappings: { + properties: { + name: { + type: "keyword", + }, + production_date: { + type: "date", + }, + location: { + type: "geo_point", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b82b156c7b9d1d78054577a6947a6cdd.asciidoc b/docs/doc_examples/b82b156c7b9d1d78054577a6947a6cdd.asciidoc new file mode 100644 index 000000000..8eb0ebc1a --- /dev/null +++ b/docs/doc_examples/b82b156c7b9d1d78054577a6947a6cdd.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "geocells", + id: 1, + pipeline: "geotile2shape", + document: { + geocell: "4/8/5", + }, +}); +console.log(response); + +const response1 = await client.get({ + index: "geocells", + id: 1, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/b839f79a5d58506baed5714f1876ab55.asciidoc b/docs/doc_examples/b839f79a5d58506baed5714f1876ab55.asciidoc new file mode 100644 index 000000000..6a88cfba7 --- /dev/null +++ b/docs/doc_examples/b839f79a5d58506baed5714f1876ab55.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-data-stream", + query: '\n process where process.name == "regsvr32.exe"\n ', +}); +console.log(response); +---- diff --git a/docs/doc_examples/b8400dbe39215705060500f0e569f452.asciidoc b/docs/doc_examples/b8400dbe39215705060500f0e569f452.asciidoc new file mode 100644 index 000000000..efd531967 --- /dev/null +++ b/docs/doc_examples/b8400dbe39215705060500f0e569f452.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.connector.get({ + connector_id: "my-connector-id", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b84932030e60a2cd58884b9dc6d3147f.asciidoc b/docs/doc_examples/b84932030e60a2cd58884b9dc6d3147f.asciidoc new file mode 100644 index 000000000..51159b730 --- /dev/null +++ b/docs/doc_examples/b84932030e60a2cd58884b9dc6d3147f.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.search({ + name: "my_search_application", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b85716ba42a57096452665c38995da7d.asciidoc b/docs/doc_examples/b85716ba42a57096452665c38995da7d.asciidoc new file mode 100644 index 000000000..71f3584f3 --- /dev/null +++ b/docs/doc_examples/b85716ba42a57096452665c38995da7d.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.previewDataFrameAnalytics({ + config: { + source: { + index: "houses_sold_last_10_yrs", + }, + analysis: { + regression: { + dependent_variable: "price", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b857abedc64e367def172bd07075e5c7.asciidoc b/docs/doc_examples/b857abedc64e367def172bd07075e5c7.asciidoc new file mode 100644 index 000000000..b4250ac7a --- /dev/null +++ b/docs/doc_examples/b857abedc64e367def172bd07075e5c7.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_fingerprint_analyzer: { + type: "fingerprint", + stopwords: "_english_", + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.analyze({ + index: "my-index-000001", + analyzer: "my_fingerprint_analyzer", + text: "Yes yes, Gödel said this sentence is consistent and.", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/b87438263ccd68624b1d69d8750f9432.asciidoc b/docs/doc_examples/b87438263ccd68624b1d69d8750f9432.asciidoc new file mode 100644 index 000000000..ded6c67e6 --- /dev/null +++ b/docs/doc_examples/b87438263ccd68624b1d69d8750f9432.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + status_code: { + type: "long", + }, + session_id: { + type: "long", + index: false, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b87bc8a521995051c7e7395f9c047e1c.asciidoc b/docs/doc_examples/b87bc8a521995051c7e7395f9c047e1c.asciidoc new file mode 100644 index 000000000..51f2cdbe2 --- /dev/null +++ b/docs/doc_examples/b87bc8a521995051c7e7395f9c047e1c.asciidoc @@ -0,0 +1,41 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + number_one: { + type: "integer", + ignore_malformed: true, + }, + number_two: { + type: "integer", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + text: "Some text value", + number_one: "foo", + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "my-index-000001", + id: 2, + document: { + text: "Some text value", + number_two: "foo", + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/b88a2d96da1401d548a4540cca223d27.asciidoc b/docs/doc_examples/b88a2d96da1401d548a4540cca223d27.asciidoc new file mode 100644 index 000000000..dd71f035a --- /dev/null +++ b/docs/doc_examples/b88a2d96da1401d548a4540cca223d27.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchMvt({ + index: "museums", + field: "location", + zoom: 13, + x: 4207, + y: 2692, + grid_agg: "geotile", + grid_precision: 2, + fields: ["name", "price"], + query: { + term: { + included: true, + }, + }, + aggs: { + min_price: { + min: { + field: "price", + }, + }, + max_price: { + max: { + field: "price", + }, + }, + avg_price: { + avg: { + field: "price", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b8c03bbd917d0cf5474a3e46ebdd7aad.asciidoc b/docs/doc_examples/b8c03bbd917d0cf5474a3e46ebdd7aad.asciidoc new file mode 100644 index 000000000..752be7fd7 --- /dev/null +++ b/docs/doc_examples/b8c03bbd917d0cf5474a3e46ebdd7aad.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: ["cjk_bigram"], + text: "東京都は、日本の首都であり", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b8cc74a92bac837bfd8ba6d5935350ed.asciidoc b/docs/doc_examples/b8cc74a92bac837bfd8ba6d5935350ed.asciidoc new file mode 100644 index 000000000..d818285db --- /dev/null +++ b/docs/doc_examples/b8cc74a92bac837bfd8ba6d5935350ed.asciidoc @@ -0,0 +1,41 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + enabled: false, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + refresh: "true", + document: { + user_id: "kimchy", + session_data: { + object: { + some_field: "some_value", + }, + }, + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + fields: [ + "user_id", + { + field: "session_data.object.*", + include_unmapped: true, + }, + ], + _source: false, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/b8dc3764c4467922474b2cdec74bb86b.asciidoc b/docs/doc_examples/b8dc3764c4467922474b2cdec74bb86b.asciidoc new file mode 100644 index 000000000..c04dca75e --- /dev/null +++ b/docs/doc_examples/b8dc3764c4467922474b2cdec74bb86b.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.startTransform({ + transform_id: "last-log-from-clientip", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b8e6e320a19936f6edfc242ccb5cde43.asciidoc b/docs/doc_examples/b8e6e320a19936f6edfc242ccb5cde43.asciidoc new file mode 100644 index 000000000..fe98fe20a --- /dev/null +++ b/docs/doc_examples/b8e6e320a19936f6edfc242ccb5cde43.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + document: { + names: ["John Abraham", "Lincoln Smith"], + }, +}); +console.log(response); + +const response1 = await client.search({ + index: "my-index-000001", + query: { + match_phrase: { + names: { + query: "Abraham Lincoln", + }, + }, + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + query: { + match_phrase: { + names: { + query: "Abraham Lincoln", + slop: 101, + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/b918d6b798da673a33e49b94f61dcdc0.asciidoc b/docs/doc_examples/b918d6b798da673a33e49b94f61dcdc0.asciidoc deleted file mode 100644 index 688866783..000000000 --- a/docs/doc_examples/b918d6b798da673a33e49b94f61dcdc0.asciidoc +++ /dev/null @@ -1,18 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.index({ - index: 'twitter', - id: '1', - timeout: '5m', - body: { - user: 'kimchy', - post_date: '2009-11-15T14:12:12', - message: 'trying out Elasticsearch' - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/b919f88e6f47a40d5793479440a90ba6.asciidoc b/docs/doc_examples/b919f88e6f47a40d5793479440a90ba6.asciidoc deleted file mode 100644 index a05989f32..000000000 --- a/docs/doc_examples/b919f88e6f47a40d5793479440a90ba6.asciidoc +++ /dev/null @@ -1,102 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response0 = await client.indices.create({ - index: 'my_index', - body: { - mappings: { - properties: { - user: { - type: 'nested' - } - } - } - } -}) -console.log(response0) - -const response1 = await client.index({ - index: 'my_index', - id: '1', - body: { - group: 'fans', - user: [ - { - first: 'John', - last: 'Smith' - }, - { - first: 'Alice', - last: 'White' - } - ] - } -}) -console.log(response1) - -const response2 = await client.search({ - index: 'my_index', - body: { - query: { - nested: { - path: 'user', - query: { - bool: { - must: [ - { - match: { - 'user.first': 'Alice' - } - }, - { - match: { - 'user.last': 'Smith' - } - } - ] - } - } - } - } - } -}) -console.log(response2) - -const response3 = await client.search({ - index: 'my_index', - body: { - query: { - nested: { - path: 'user', - query: { - bool: { - must: [ - { - match: { - 'user.first': 'Alice' - } - }, - { - match: { - 'user.last': 'White' - } - } - ] - } - }, - inner_hits: { - highlight: { - fields: { - 'user.first': {} - } - } - } - } - } - } -}) -console.log(response3) ----- - diff --git a/docs/doc_examples/b9370fa1aa18fe4bc00cf81ef0c0d45b.asciidoc b/docs/doc_examples/b9370fa1aa18fe4bc00cf81ef0c0d45b.asciidoc new file mode 100644 index 000000000..25f68fae7 --- /dev/null +++ b/docs/doc_examples/b9370fa1aa18fe4bc00cf81ef0c0d45b.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + query_string: { + fields: ["city.*"], + query: "this AND that OR thus", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b94cee0f74f57742b3948f9b784dfdd4.asciidoc b/docs/doc_examples/b94cee0f74f57742b3948f9b784dfdd4.asciidoc new file mode 100644 index 000000000..3445735bc --- /dev/null +++ b/docs/doc_examples/b94cee0f74f57742b3948f9b784dfdd4.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.clearScroll({ + scroll_id: + "DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ==,DnF1ZXJ5VGhlbkZldGNoBQAAAAAAAAABFmtSWWRRWUJrU2o2ZExpSGJCVmQxYUEAAAAAAAAAAxZrUllkUVlCa1NqNmRMaUhiQlZkMWFBAAAAAAAAAAIWa1JZZFFZQmtTajZkTGlIYkJWZDFhQQAAAAAAAAAFFmtSWWRRWUJrU2o2ZExpSGJCVmQxYUEAAAAAAAAABBZrUllkUVlCa1NqNmRMaUhiQlZkMWFB", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b968853454b4416f7baa3209eb335957.asciidoc b/docs/doc_examples/b968853454b4416f7baa3209eb335957.asciidoc new file mode 100644 index 000000000..fbd141a5c --- /dev/null +++ b/docs/doc_examples/b968853454b4416f7baa3209eb335957.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "museums", + size: 0, + aggs: { + cities: { + terms: { + field: "city.keyword", + }, + aggs: { + centroid: { + cartesian_centroid: { + field: "location", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b96f465abb658fe32889c3d183f159a3.asciidoc b/docs/doc_examples/b96f465abb658fe32889c3d183f159a3.asciidoc new file mode 100644 index 000000000..8d6be0bbe --- /dev/null +++ b/docs/doc_examples/b96f465abb658fe32889c3d183f159a3.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "limit_example", + settings: { + analysis: { + analyzer: { + standard_one_token_limit: { + tokenizer: "standard", + filter: ["limit"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b997885974522ef439d5e345924cc5ba.asciidoc b/docs/doc_examples/b997885974522ef439d5e345924cc5ba.asciidoc deleted file mode 100644 index b7d1582c0..000000000 --- a/docs/doc_examples/b997885974522ef439d5e345924cc5ba.asciidoc +++ /dev/null @@ -1,39 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response0 = await client.index({ - index: 'my_index', - id: '1', - refresh: true, - body: { - product: 'chocolate', - price: [ - 20, - 4 - ] - } -}) -console.log(response0) - -const response1 = await client.search({ - body: { - query: { - term: { - product: 'chocolate' - } - }, - sort: [ - { - price: { - order: 'asc', - mode: 'avg' - } - } - ] - } -}) -console.log(response1) ----- - diff --git a/docs/doc_examples/b9a8f39ab9b1ed18c6c1db61ac4e6a9e.asciidoc b/docs/doc_examples/b9a8f39ab9b1ed18c6c1db61ac4e6a9e.asciidoc new file mode 100644 index 000000000..b68c2dadb --- /dev/null +++ b/docs/doc_examples/b9a8f39ab9b1ed18c6c1db61ac4e6a9e.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.get({ + repository: "my_repository", + snapshot: "_current", +}); +console.log(response); +---- diff --git a/docs/doc_examples/b9ba66209b7fcc111a7bcef0b3e00052.asciidoc b/docs/doc_examples/b9ba66209b7fcc111a7bcef0b3e00052.asciidoc new file mode 100644 index 000000000..61939766d --- /dev/null +++ b/docs/doc_examples/b9ba66209b7fcc111a7bcef0b3e00052.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + document: { + attributes: { + id: "foo", + }, + id: "bar", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/b9c5d7ca6ca9c6f747201f45337a4abf.asciidoc b/docs/doc_examples/b9c5d7ca6ca9c6f747201f45337a4abf.asciidoc deleted file mode 100644 index d4ad0cff8..000000000 --- a/docs/doc_examples/b9c5d7ca6ca9c6f747201f45337a4abf.asciidoc +++ /dev/null @@ -1,17 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'twitter', - body: { - settings: { - number_of_shards: 3, - number_of_replicas: 2 - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/b9f716219359a6c973dafc50b348de33.asciidoc b/docs/doc_examples/b9f716219359a6c973dafc50b348de33.asciidoc new file mode 100644 index 000000000..4e8a2557e --- /dev/null +++ b/docs/doc_examples/b9f716219359a6c973dafc50b348de33.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + _source: { + enabled: false, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ba07330ed3291b3970f4eb01dacd8086.asciidoc b/docs/doc_examples/ba07330ed3291b3970f4eb01dacd8086.asciidoc new file mode 100644 index 000000000..f9c560d5f --- /dev/null +++ b/docs/doc_examples/ba07330ed3291b3970f4eb01dacd8086.asciidoc @@ -0,0 +1,105 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "museums", + mappings: { + properties: { + location: { + type: "geo_point", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "museums", + refresh: "true", + operations: [ + { + index: { + _id: 1, + }, + }, + { + location: "POINT (4.912350 52.374081)", + name: "NEMO Science Museum", + }, + { + index: { + _id: 2, + }, + }, + { + location: "POINT (4.901618 52.369219)", + name: "Museum Het Rembrandthuis", + }, + { + index: { + _id: 3, + }, + }, + { + location: "POINT (4.914722 52.371667)", + name: "Nederlands Scheepvaartmuseum", + }, + { + index: { + _id: 4, + }, + }, + { + location: "POINT (4.405200 51.222900)", + name: "Letterenhuis", + }, + { + index: { + _id: 5, + }, + }, + { + location: "POINT (2.336389 48.861111)", + name: "Musée du Louvre", + }, + { + index: { + _id: 6, + }, + }, + { + location: "POINT (2.327000 48.860000)", + name: "Musée d'Orsay", + }, + ], +}); +console.log(response1); + +const response2 = await client.search({ + index: "museums", + size: 0, + aggs: { + rings_around_amsterdam: { + geo_distance: { + field: "location", + origin: "POINT (4.894 52.3760)", + ranges: [ + { + to: 100000, + }, + { + from: 100000, + to: 300000, + }, + { + from: 300000, + }, + ], + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/ba0b4081c98f3387f76b77847c52ee9a.asciidoc b/docs/doc_examples/ba0b4081c98f3387f76b77847c52ee9a.asciidoc deleted file mode 100644 index b68015bb2..000000000 --- a/docs/doc_examples/ba0b4081c98f3387f76b77847c52ee9a.asciidoc +++ /dev/null @@ -1,31 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response0 = await client.indices.close({ - index: 'twitter' -}) -console.log(response0) - -const response1 = await client.indices.putSettings({ - index: 'twitter', - body: { - analysis: { - analyzer: { - content: { - type: 'custom', - tokenizer: 'whitespace' - } - } - } - } -}) -console.log(response1) - -const response2 = await client.indices.open({ - index: 'twitter' -}) -console.log(response2) ----- - diff --git a/docs/doc_examples/ba0e7e0b18fc9ec6c623d40186d1f61b.asciidoc b/docs/doc_examples/ba0e7e0b18fc9ec6c623d40186d1f61b.asciidoc new file mode 100644 index 000000000..42c6d4763 --- /dev/null +++ b/docs/doc_examples/ba0e7e0b18fc9ec6c623d40186d1f61b.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.resolveCluster({ + name: "not-present,clust*:my-index*,oldcluster:*", + ignore_unavailable: "false", + timeout: "5s", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ba10b644a4e9a2e7d78744ca607355d0.asciidoc b/docs/doc_examples/ba10b644a4e9a2e7d78744ca607355d0.asciidoc new file mode 100644 index 000000000..647beec9d --- /dev/null +++ b/docs/doc_examples/ba10b644a4e9a2e7d78744ca607355d0.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.follow({ + index: ".ds-logs-mysql-default_copy-2022-01-01-000001", + remote_cluster: "remote_cluster", + leader_index: ".ds-logs-mysql-default-2022-01-01-000001", + data_stream_name: "logs-mysql-default_copy", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ba21a7fbb74180ff138d97032f28ace7.asciidoc b/docs/doc_examples/ba21a7fbb74180ff138d97032f28ace7.asciidoc new file mode 100644 index 000000000..e5f5724e2 --- /dev/null +++ b/docs/doc_examples/ba21a7fbb74180ff138d97032f28ace7.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.updateUserProfileData({ + uid: "u_P_0BMHgaOK3p7k-PFWUCbw9dQ-UFjt01oWJ_Dp2PmPc_0", + labels: { + direction: "east", + }, + data: { + app1: { + theme: "default", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ba3b9783aa188c6841e1926c5ab1472d.asciidoc b/docs/doc_examples/ba3b9783aa188c6841e1926c5ab1472d.asciidoc new file mode 100644 index 000000000..3fbe7f25e --- /dev/null +++ b/docs/doc_examples/ba3b9783aa188c6841e1926c5ab1472d.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.put({ + name: "my_search_application", + search_application: { + indices: ["index1", "index2"], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ba5dc6fb9bbe1406714da5d641462a23.asciidoc b/docs/doc_examples/ba5dc6fb9bbe1406714da5d641462a23.asciidoc new file mode 100644 index 000000000..4287d506e --- /dev/null +++ b/docs/doc_examples/ba5dc6fb9bbe1406714da5d641462a23.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + dynamic_templates: [ + { + strings_as_ip: { + match_mapping_type: "string", + match: "ip*", + runtime: { + type: "ip", + }, + }, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ba6040de55afb2c8fb9e5b24bb038820.asciidoc b/docs/doc_examples/ba6040de55afb2c8fb9e5b24bb038820.asciidoc new file mode 100644 index 000000000..3fd0c946f --- /dev/null +++ b/docs/doc_examples/ba6040de55afb2c8fb9e5b24bb038820.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getTemplate({ + name: "temp*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ba650046f9063f6c43d76f47e0f94403.asciidoc b/docs/doc_examples/ba650046f9063f6c43d76f47e0f94403.asciidoc new file mode 100644 index 000000000..ec556c8dc --- /dev/null +++ b/docs/doc_examples/ba650046f9063f6c43d76f47e0f94403.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, + }, + }, + mappings: { + properties: { + date: { + type: "date", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "idx", + id: 1, + document: { + date: ["2015-01-01T12:10:30Z", "2014-01-01T12:10:30Z"], + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/ba66768ed04f7b87906badff40ff40ed.asciidoc b/docs/doc_examples/ba66768ed04f7b87906badff40ff40ed.asciidoc new file mode 100644 index 000000000..5e98e21e6 --- /dev/null +++ b/docs/doc_examples/ba66768ed04f7b87906badff40ff40ed.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + hot: { + actions: { + rollover: { + max_primary_shard_size: "50gb", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ba8c3578613ae0bf890f6a05706ce776.asciidoc b/docs/doc_examples/ba8c3578613ae0bf890f6a05706ce776.asciidoc new file mode 100644 index 000000000..46ae56d49 --- /dev/null +++ b/docs/doc_examples/ba8c3578613ae0bf890f6a05706ce776.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-data-stream", + filter_path: "-hits.events._source", + query: '\n process where process.name == "regsvr32.exe"\n ', + fields: [ + "event.type", + "process.*", + { + field: "@timestamp", + format: "epoch_millis", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/ba9a5f66a6148612de0ad2491fd6c90d.asciidoc b/docs/doc_examples/ba9a5f66a6148612de0ad2491fd6c90d.asciidoc new file mode 100644 index 000000000..c56554773 --- /dev/null +++ b/docs/doc_examples/ba9a5f66a6148612de0ad2491fd6c90d.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "my_tokenizer", + }, + }, + tokenizer: { + my_tokenizer: { + type: "classic", + max_token_length: 5, + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.analyze({ + index: "my-index-000001", + analyzer: "my_analyzer", + text: "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone.", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/baadbfffcd0c16f51eb3537f516dc3ed.asciidoc b/docs/doc_examples/baadbfffcd0c16f51eb3537f516dc3ed.asciidoc new file mode 100644 index 000000000..6dc62aec5 --- /dev/null +++ b/docs/doc_examples/baadbfffcd0c16f51eb3537f516dc3ed.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.disableUserProfile({ + uid: "u_79HkWkwmnBH5gqFKwoxggWPjEBOur1zLPXQPEl1VBW0_0", +}); +console.log(response); +---- diff --git a/docs/doc_examples/bab4c3b22c1768fcc7153345e4096dfb.asciidoc b/docs/doc_examples/bab4c3b22c1768fcc7153345e4096dfb.asciidoc new file mode 100644 index 000000000..0cdc5da85 --- /dev/null +++ b/docs/doc_examples/bab4c3b22c1768fcc7153345e4096dfb.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "whitespace", + filter: ["keyword_repeat", "stemmer", "remove_duplicates"], + text: "jumping dog", +}); +console.log(response); +---- diff --git a/docs/doc_examples/bb067c049331cc850a77b18bdfff81b5.asciidoc b/docs/doc_examples/bb067c049331cc850a77b18bdfff81b5.asciidoc new file mode 100644 index 000000000..a2845c63c --- /dev/null +++ b/docs/doc_examples/bb067c049331cc850a77b18bdfff81b5.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "lithuanian_example", + settings: { + analysis: { + filter: { + lithuanian_stop: { + type: "stop", + stopwords: "_lithuanian_", + }, + lithuanian_keywords: { + type: "keyword_marker", + keywords: ["pavyzdys"], + }, + lithuanian_stemmer: { + type: "stemmer", + language: "lithuanian", + }, + }, + analyzer: { + rebuilt_lithuanian: { + tokenizer: "standard", + filter: [ + "lowercase", + "lithuanian_stop", + "lithuanian_keywords", + "lithuanian_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bb143628fd04070683eeeadc9406d9cc.asciidoc b/docs/doc_examples/bb143628fd04070683eeeadc9406d9cc.asciidoc deleted file mode 100644 index 04d174d4f..000000000 --- a/docs/doc_examples/bb143628fd04070683eeeadc9406d9cc.asciidoc +++ /dev/null @@ -1,17 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.index({ - index: 'twitter', - id: '1', - body: { - user: 'kimchy', - post_date: '2009-11-15T14:12:12', - message: 'trying out Elasticsearch' - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/bb28d1f7f3f09f5061d7f4351aee89fc.asciidoc b/docs/doc_examples/bb28d1f7f3f09f5061d7f4351aee89fc.asciidoc new file mode 100644 index 000000000..8fb900dc0 --- /dev/null +++ b/docs/doc_examples/bb28d1f7f3f09f5061d7f4351aee89fc.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "test_role4", + indices: [ + { + names: ["*"], + privileges: ["read"], + field_security: { + grant: ["customer.*"], + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/bb293e1bdf0c6f6d9069eeb7edc9d399.asciidoc b/docs/doc_examples/bb293e1bdf0c6f6d9069eeb7edc9d399.asciidoc new file mode 100644 index 000000000..a8bb92321 --- /dev/null +++ b/docs/doc_examples/bb293e1bdf0c6f6d9069eeb7edc9d399.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.disableUser({ + username: "jacknich", +}); +console.log(response); +---- diff --git a/docs/doc_examples/bb2ba5d1885f87506f90dbb002e518f4.asciidoc b/docs/doc_examples/bb2ba5d1885f87506f90dbb002e518f4.asciidoc new file mode 100644 index 000000000..ac5f2bf5b --- /dev/null +++ b/docs/doc_examples/bb2ba5d1885f87506f90dbb002e518f4.asciidoc @@ -0,0 +1,45 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "retrievers_example", + retriever: { + rrf: { + retrievers: [ + { + standard: { + query: { + query_string: { + query: "(information retrieval) OR (artificial intelligence)", + default_field: "text", + }, + }, + }, + }, + { + knn: { + field: "vector", + query_vector: [0.23, 0.67, 0.89], + k: 3, + num_candidates: 5, + }, + }, + ], + rank_window_size: 10, + rank_constant: 1, + }, + }, + highlight: { + fields: { + text: { + fragment_size: 150, + number_of_fragments: 3, + }, + }, + }, + _source: false, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bb5a67e3d2d9cd3016e487e627769fe8.asciidoc b/docs/doc_examples/bb5a67e3d2d9cd3016e487e627769fe8.asciidoc new file mode 100644 index 000000000..d73057f5b --- /dev/null +++ b/docs/doc_examples/bb5a67e3d2d9cd3016e487e627769fe8.asciidoc @@ -0,0 +1,88 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "cooking_blog", + refresh: "wait_for", + operations: [ + { + index: { + _id: "1", + }, + }, + { + title: "Perfect Pancakes: A Fluffy Breakfast Delight", + description: + "Learn the secrets to making the fluffiest pancakes, so amazing you won't believe your tastebuds. This recipe uses buttermilk and a special folding technique to create light, airy pancakes that are perfect for lazy Sunday mornings.", + author: "Maria Rodriguez", + date: "2023-05-01", + category: "Breakfast", + tags: ["pancakes", "breakfast", "easy recipes"], + rating: 4.8, + }, + { + index: { + _id: "2", + }, + }, + { + title: "Spicy Thai Green Curry: A Vegetarian Adventure", + description: + "Dive into the flavors of Thailand with this vibrant green curry. Packed with vegetables and aromatic herbs, this dish is both healthy and satisfying. Don't worry about the heat - you can easily adjust the spice level to your liking.", + author: "Liam Chen", + date: "2023-05-05", + category: "Main Course", + tags: ["thai", "vegetarian", "curry", "spicy"], + rating: 4.6, + }, + { + index: { + _id: "3", + }, + }, + { + title: "Classic Beef Stroganoff: A Creamy Comfort Food", + description: + "Indulge in this rich and creamy beef stroganoff. Tender strips of beef in a savory mushroom sauce, served over a bed of egg noodles. It's the ultimate comfort food for chilly evenings.", + author: "Emma Watson", + date: "2023-05-10", + category: "Main Course", + tags: ["beef", "pasta", "comfort food"], + rating: 4.7, + }, + { + index: { + _id: "4", + }, + }, + { + title: "Vegan Chocolate Avocado Mousse", + description: + "Discover the magic of avocado in this rich, vegan chocolate mousse. Creamy, indulgent, and secretly healthy, it's the perfect guilt-free dessert for chocolate lovers.", + author: "Alex Green", + date: "2023-05-15", + category: "Dessert", + tags: ["vegan", "chocolate", "avocado", "healthy dessert"], + rating: 4.5, + }, + { + index: { + _id: "5", + }, + }, + { + title: "Crispy Oven-Fried Chicken", + description: + "Get that perfect crunch without the deep fryer! This oven-fried chicken recipe delivers crispy, juicy results every time. A healthier take on the classic comfort food.", + author: "Maria Rodriguez", + date: "2023-05-20", + category: "Main Course", + tags: ["chicken", "oven-fried", "healthy"], + rating: 4.9, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/bb64a7228a479f6aeeaccaf7560e11ee.asciidoc b/docs/doc_examples/bb64a7228a479f6aeeaccaf7560e11ee.asciidoc new file mode 100644 index 000000000..ddbfb6569 --- /dev/null +++ b/docs/doc_examples/bb64a7228a479f6aeeaccaf7560e11ee.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.putTransform({ + transform_id: "last-log-from-clientip", + source: { + index: ["kibana_sample_data_logs"], + }, + latest: { + unique_key: ["clientip"], + sort: "timestamp", + }, + frequency: "1m", + dest: { + index: "last-log-from-clientip", + }, + sync: { + time: { + field: "timestamp", + delay: "60s", + }, + }, + retention_policy: { + time: { + field: "timestamp", + max_age: "30d", + }, + }, + settings: { + max_page_search_size: 500, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bb792e64a4c1f872296073b457aa03c8.asciidoc b/docs/doc_examples/bb792e64a4c1f872296073b457aa03c8.asciidoc new file mode 100644 index 000000000..0c98668fa --- /dev/null +++ b/docs/doc_examples/bb792e64a4c1f872296073b457aa03c8.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.delete({ + repository: "my_repository", + snapshot: "my_snapshot_2099.05.06", +}); +console.log(response); +---- diff --git a/docs/doc_examples/bb975b342de7e838ebf6a36aaa1a8749.asciidoc b/docs/doc_examples/bb975b342de7e838ebf6a36aaa1a8749.asciidoc new file mode 100644 index 000000000..6d510961d --- /dev/null +++ b/docs/doc_examples/bb975b342de7e838ebf6a36aaa1a8749.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 3, + routing: 1, + refresh: "true", + document: { + text: "This is a vote", + my_join_field: { + name: "vote", + parent: "2", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bb9e268ec62d19ca2a6366cbb48fae68.asciidoc b/docs/doc_examples/bb9e268ec62d19ca2a6366cbb48fae68.asciidoc new file mode 100644 index 000000000..f733254ab --- /dev/null +++ b/docs/doc_examples/bb9e268ec62d19ca2a6366cbb48fae68.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.count({ + v: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/bc01aee2ab2ce1690986374bd836e1c7.asciidoc b/docs/doc_examples/bc01aee2ab2ce1690986374bd836e1c7.asciidoc new file mode 100644 index 000000000..59eabf7be --- /dev/null +++ b/docs/doc_examples/bc01aee2ab2ce1690986374bd836e1c7.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "cooking_blog", + query: { + multi_match: { + query: "vegetarian curry", + fields: ["title", "description", "tags"], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bc1ad5cc6d3eab98e3ce01f209ba7094.asciidoc b/docs/doc_examples/bc1ad5cc6d3eab98e3ce01f209ba7094.asciidoc deleted file mode 100644 index d44288214..000000000 --- a/docs/doc_examples/bc1ad5cc6d3eab98e3ce01f209ba7094.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.updateAliases({ - body: { - actions: [ - { - add: { - index: 'test', - alias: 'alias1', - routing: '1' - } - } - ] - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/bc4d308069af23929a49d856f6bc3008.asciidoc b/docs/doc_examples/bc4d308069af23929a49d856f6bc3008.asciidoc new file mode 100644 index 000000000..0f34216b6 --- /dev/null +++ b/docs/doc_examples/bc4d308069af23929a49d856f6bc3008.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "museums", + size: 0, + aggs: { + rings: { + geo_distance: { + field: "location", + origin: "POINT (4.894 52.3760)", + unit: "km", + distance_type: "plane", + ranges: [ + { + to: 100, + }, + { + from: 100, + to: 300, + }, + { + from: 300, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bcae0f00ae1e6f08fa395ca741fe84f9.asciidoc b/docs/doc_examples/bcae0f00ae1e6f08fa395ca741fe84f9.asciidoc new file mode 100644 index 000000000..6463ea56c --- /dev/null +++ b/docs/doc_examples/bcae0f00ae1e6f08fa395ca741fe84f9.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.rankEval({ + index: "my-index-000001", + requests: [ + { + id: "JFK query", + request: { + query: { + match_all: {}, + }, + }, + ratings: [], + }, + ], + metric: { + dcg: { + k: 20, + normalize: false, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bcb572658986d69ae17c28ddd7e4bfd8.asciidoc b/docs/doc_examples/bcb572658986d69ae17c28ddd7e4bfd8.asciidoc new file mode 100644 index 000000000..e0bcddedd --- /dev/null +++ b/docs/doc_examples/bcb572658986d69ae17c28ddd7e4bfd8.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.fieldUsageStats({ + index: "my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/bcbd4d4749126837723438ff4faeb0f6.asciidoc b/docs/doc_examples/bcbd4d4749126837723438ff4faeb0f6.asciidoc new file mode 100644 index 000000000..d4b74e787 --- /dev/null +++ b/docs/doc_examples/bcbd4d4749126837723438ff4faeb0f6.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + filter_path: "aggregations", + size: 0, + aggs: { + top_values: { + terms: { + field: "my-field", + size: 10, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bcc75fc01b45e482638c65b8fbdf09fa.asciidoc b/docs/doc_examples/bcc75fc01b45e482638c65b8fbdf09fa.asciidoc new file mode 100644 index 000000000..22c9c01d9 --- /dev/null +++ b/docs/doc_examples/bcc75fc01b45e482638c65b8fbdf09fa.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "books", +}); +console.log(response); +---- diff --git a/docs/doc_examples/bccd4eb26b1a325d103b12e198a13c08.asciidoc b/docs/doc_examples/bccd4eb26b1a325d103b12e198a13c08.asciidoc new file mode 100644 index 000000000..e63a33d34 --- /dev/null +++ b/docs/doc_examples/bccd4eb26b1a325d103b12e198a13c08.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getSettings({ + index: "_all", + expand_wildcards: "all", + filter_path: "*.settings.index.*.slowlog", +}); +console.log(response); +---- diff --git a/docs/doc_examples/bcd1afb793240b1dddd9fa5d3f21192b.asciidoc b/docs/doc_examples/bcd1afb793240b1dddd9fa5d3f21192b.asciidoc new file mode 100644 index 000000000..5dc68e409 --- /dev/null +++ b/docs/doc_examples/bcd1afb793240b1dddd9fa5d3f21192b.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.update({ + index: "test", + id: 1, + doc: { + product_price: 100, + }, + upsert: { + product_price: 50, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bcdfaa4487747249699a86a0dcd22f5e.asciidoc b/docs/doc_examples/bcdfaa4487747249699a86a0dcd22f5e.asciidoc new file mode 100644 index 000000000..01a784b51 --- /dev/null +++ b/docs/doc_examples/bcdfaa4487747249699a86a0dcd22f5e.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.simulate.ingest({ + docs: [ + { + _index: "my-index", + _id: "123", + _source: { + foo: "bar", + }, + }, + { + _index: "my-index", + _id: "456", + _source: { + foo: "rab", + }, + }, + ], + pipeline_substitutions: { + "my-pipeline": { + processors: [ + { + uppercase: { + field: "foo", + }, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bd0d30a7683037e1ebadd163514765d4.asciidoc b/docs/doc_examples/bd0d30a7683037e1ebadd163514765d4.asciidoc new file mode 100644 index 000000000..dfc485635 --- /dev/null +++ b/docs/doc_examples/bd0d30a7683037e1ebadd163514765d4.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "basic_users", + roles: ["user"], + rules: { + any: [ + { + field: { + groups: "cn=users,dc=example,dc=com", + }, + }, + { + field: { + dn: "cn=John Doe,cn=contractors,dc=example,dc=com", + }, + }, + ], + }, + enabled: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bd1e55b8cb2ca9e496e223e717d76640.asciidoc b/docs/doc_examples/bd1e55b8cb2ca9e496e223e717d76640.asciidoc new file mode 100644 index 000000000..a175b8350 --- /dev/null +++ b/docs/doc_examples/bd1e55b8cb2ca9e496e223e717d76640.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + bool: { + must: { + match_all: {}, + }, + filter: { + geo_polygon: { + "person.location": { + points: ["40, -70", "30, -80", "20, -90"], + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bd23c3a03907b1238dcb07ab9eecae7b.asciidoc b/docs/doc_examples/bd23c3a03907b1238dcb07ab9eecae7b.asciidoc new file mode 100644 index 000000000..05746a7fc --- /dev/null +++ b/docs/doc_examples/bd23c3a03907b1238dcb07ab9eecae7b.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.updateByQuery({ + index: "my-index-000001", + scroll_size: 100, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bd298b11933605c641626750c981d70b.asciidoc b/docs/doc_examples/bd298b11933605c641626750c981d70b.asciidoc new file mode 100644 index 000000000..c2ca58040 --- /dev/null +++ b/docs/doc_examples/bd298b11933605c641626750c981d70b.asciidoc @@ -0,0 +1,43 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putComponentTemplate({ + name: "ct1", + template: { + settings: { + "index.number_of_shards": 2, + }, + }, +}); +console.log(response); + +const response1 = await client.cluster.putComponentTemplate({ + name: "ct2", + template: { + settings: { + "index.number_of_replicas": 0, + }, + mappings: { + properties: { + "@timestamp": { + type: "date", + }, + }, + }, + }, +}); +console.log(response1); + +const response2 = await client.indices.simulateTemplate({ + index_patterns: ["my*"], + template: { + settings: { + "index.number_of_shards": 3, + }, + }, + composed_of: ["ct1", "ct2"], +}); +console.log(response2); +---- diff --git a/docs/doc_examples/bd2a387e8c21bf01a1039e81d7602921.asciidoc b/docs/doc_examples/bd2a387e8c21bf01a1039e81d7602921.asciidoc new file mode 100644 index 000000000..f1f4feed3 --- /dev/null +++ b/docs/doc_examples/bd2a387e8c21bf01a1039e81d7602921.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.putScript({ + id: "my-search-template", + script: { + lang: "mustache", + source: { + query: { + multi_match: { + query: "{{query_string}}", + fields: "[{{#text_fields}}{{user_name}},{{/text_fields}}]", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bd3d710ec50a151453e141691163af72.asciidoc b/docs/doc_examples/bd3d710ec50a151453e141691163af72.asciidoc new file mode 100644 index 000000000..fad53bf5d --- /dev/null +++ b/docs/doc_examples/bd3d710ec50a151453e141691163af72.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.tasks.list({ + group_by: "parents", +}); +console.log(response); +---- diff --git a/docs/doc_examples/bd458073196a19ecdeb24a8016488c20.asciidoc b/docs/doc_examples/bd458073196a19ecdeb24a8016488c20.asciidoc new file mode 100644 index 000000000..e41d391d0 --- /dev/null +++ b/docs/doc_examples/bd458073196a19ecdeb24a8016488c20.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.deleteIndexTemplate({ + name: "my-index-template", +}); +console.log(response); +---- diff --git a/docs/doc_examples/bd57976bc93ca64b2d3e001df9f06c82.asciidoc b/docs/doc_examples/bd57976bc93ca64b2d3e001df9f06c82.asciidoc new file mode 100644 index 000000000..920798532 --- /dev/null +++ b/docs/doc_examples/bd57976bc93ca64b2d3e001df9f06c82.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.resolveIndex({ + name: "f*,remoteCluster1:bar*", + expand_wildcards: "all", +}); +console.log(response); +---- diff --git a/docs/doc_examples/bd5918ab903c0889bb1f09c8c2466e43.asciidoc b/docs/doc_examples/bd5918ab903c0889bb1f09c8c2466e43.asciidoc deleted file mode 100644 index b8e035f6f..000000000 --- a/docs/doc_examples/bd5918ab903c0889bb1f09c8c2466e43.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'users', - body: { - mappings: { - properties: { - user_id: { - type: 'long' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/bd5bd5d8b3d81241335fe1e5747080ac.asciidoc b/docs/doc_examples/bd5bd5d8b3d81241335fe1e5747080ac.asciidoc new file mode 100644 index 000000000..fa54a3549 --- /dev/null +++ b/docs/doc_examples/bd5bd5d8b3d81241335fe1e5747080ac.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "shrink-index", + policy: { + phases: { + warm: { + min_age: "5d", + actions: { + shrink: { + number_of_shards: 1, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bd68666ca2e0be12f7624016317a62bc.asciidoc b/docs/doc_examples/bd68666ca2e0be12f7624016317a62bc.asciidoc new file mode 100644 index 000000000..821f169b6 --- /dev/null +++ b/docs/doc_examples/bd68666ca2e0be12f7624016317a62bc.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.stats({ + groups: "_all", +}); +console.log(response); + +const response1 = await client.nodes.stats({ + metric: "indices", + groups: "foo,bar", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/bd6f30e3caa3632260da42d9ff82c98c.asciidoc b/docs/doc_examples/bd6f30e3caa3632260da42d9ff82c98c.asciidoc new file mode 100644 index 000000000..d40a96760 --- /dev/null +++ b/docs/doc_examples/bd6f30e3caa3632260da42d9ff82c98c.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.clearApiKeyCache({ + ids: "*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/bd7330af2609bdd8aa10958f5e640b93.asciidoc b/docs/doc_examples/bd7330af2609bdd8aa10958f5e640b93.asciidoc new file mode 100644 index 000000000..a063913c0 --- /dev/null +++ b/docs/doc_examples/bd7330af2609bdd8aa10958f5e640b93.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my_queries2", + id: 2, + refresh: "true", + document: { + query: { + match: { + "my_field.suffix": "xyz", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bd767ea03171fe71c73f58f16d5da92f.asciidoc b/docs/doc_examples/bd767ea03171fe71c73f58f16d5da92f.asciidoc new file mode 100644 index 000000000..810d4a7cd --- /dev/null +++ b/docs/doc_examples/bd767ea03171fe71c73f58f16d5da92f.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "file-path-test", + query: { + match: { + file_path: "/User/bob/photos/2017/05", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bd7a1417fc27b5a801334ec44462b376.asciidoc b/docs/doc_examples/bd7a1417fc27b5a801334ec44462b376.asciidoc new file mode 100644 index 000000000..d1fd5b125 --- /dev/null +++ b/docs/doc_examples/bd7a1417fc27b5a801334ec44462b376.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.mlDatafeeds({ + v: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/bd7fa2f122ab861cd00e0b9154d120b3.asciidoc b/docs/doc_examples/bd7fa2f122ab861cd00e0b9154d120b3.asciidoc new file mode 100644 index 000000000..47d5b8d52 --- /dev/null +++ b/docs/doc_examples/bd7fa2f122ab861cd00e0b9154d120b3.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index", + mappings: { + properties: { + "@timestamp": { + format: "strict_date_optional_time||epoch_second", + type: "date", + }, + message: { + type: "wildcard", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bdaf00d791706d7fde25fd65d3735b94.asciidoc b/docs/doc_examples/bdaf00d791706d7fde25fd65d3735b94.asciidoc new file mode 100644 index 000000000..f8485a4c3 --- /dev/null +++ b/docs/doc_examples/bdaf00d791706d7fde25fd65d3735b94.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, + }, + }, + mappings: { + properties: { + kwd: { + type: "keyword", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "idx", + id: 1, + document: { + kwd: ["foo", "foo", "bar", "baz"], + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/bdb30dd52d32f50994008f4f9c0da5f0.asciidoc b/docs/doc_examples/bdb30dd52d32f50994008f4f9c0da5f0.asciidoc new file mode 100644 index 000000000..961cd52e4 --- /dev/null +++ b/docs/doc_examples/bdb30dd52d32f50994008f4f9c0da5f0.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.updateByQueryRethrottle({ + task_id: "r1A2WoRbTwKZ516z6NEs5A:36619", + requests_per_second: "-1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/bdc1afd2181154bb78797360f9dbb1a0.asciidoc b/docs/doc_examples/bdc1afd2181154bb78797360f9dbb1a0.asciidoc new file mode 100644 index 000000000..5d422bbb9 --- /dev/null +++ b/docs/doc_examples/bdc1afd2181154bb78797360f9dbb1a0.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.executeWatch({ + id: "my_watch", + record_execution: true, +}); +console.log(response); + +const response1 = await client.watcher.getWatch({ + id: "my_watch", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/bdc55256fa5f701680631a149dbb75a9.asciidoc b/docs/doc_examples/bdc55256fa5f701680631a149dbb75a9.asciidoc new file mode 100644 index 000000000..4e074487d --- /dev/null +++ b/docs/doc_examples/bdc55256fa5f701680631a149dbb75a9.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "kibana_sample_data_ecommerce", + size: 0, + aggs: { + sales_by_category: { + terms: { + field: "category.keyword", + size: 5, + order: { + _count: "desc", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bdc68012c121062628d6d73468bf4866.asciidoc b/docs/doc_examples/bdc68012c121062628d6d73468bf4866.asciidoc new file mode 100644 index 000000000..63445d9ea --- /dev/null +++ b/docs/doc_examples/bdc68012c121062628d6d73468bf4866.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.cleanupRepository({ + name: "my_repository", +}); +console.log(response); +---- diff --git a/docs/doc_examples/bdd28276618235487ac96bd6679bc206.asciidoc b/docs/doc_examples/bdd28276618235487ac96bd6679bc206.asciidoc new file mode 100644 index 000000000..b518cae85 --- /dev/null +++ b/docs/doc_examples/bdd28276618235487ac96bd6679bc206.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "kibana_sample_data_ecommerce", + size: 0, + aggs: { + daily_sales: { + date_histogram: { + field: "order_date", + calendar_interval: "day", + }, + aggs: { + revenue: { + sum: { + field: "taxful_total_price", + }, + }, + cumulative_revenue: { + cumulative_sum: { + buckets_path: "revenue", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bde74dbbcef8ebf8541cae2c1711255f.asciidoc b/docs/doc_examples/bde74dbbcef8ebf8541cae2c1711255f.asciidoc new file mode 100644 index 000000000..4da9dfa63 --- /dev/null +++ b/docs/doc_examples/bde74dbbcef8ebf8541cae2c1711255f.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.get({ + name: "my-app", +}); +console.log(response); +---- diff --git a/docs/doc_examples/bdfb86cdfffb9d2ee6e3d399f00a57b0.asciidoc b/docs/doc_examples/bdfb86cdfffb9d2ee6e3d399f00a57b0.asciidoc new file mode 100644 index 000000000..fd79781de --- /dev/null +++ b/docs/doc_examples/bdfb86cdfffb9d2ee6e3d399f00a57b0.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "test*", + filter_path: "aggregations", + aggs: { + ip: { + terms: { + field: "ip", + }, + aggs: { + tm: { + top_metrics: { + metrics: { + field: "m", + }, + sort: { + s: "desc", + }, + size: 1, + }, + }, + having_tm: { + bucket_selector: { + buckets_path: { + top_m: "tm[m]", + }, + script: "params.top_m < 1000", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/be1bd47393646ac6bbee177d1cdb7738.asciidoc b/docs/doc_examples/be1bd47393646ac6bbee177d1cdb7738.asciidoc deleted file mode 100644 index e6082026e..000000000 --- a/docs/doc_examples/be1bd47393646ac6bbee177d1cdb7738.asciidoc +++ /dev/null @@ -1,22 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - query_string: { - fields: [ - 'title', - 'content' - ], - query: 'this that thus', - minimum_should_match: 2 - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/be285eef1d2df0dfcf876e2d4b361f1e.asciidoc b/docs/doc_examples/be285eef1d2df0dfcf876e2d4b361f1e.asciidoc new file mode 100644 index 000000000..78b727a26 --- /dev/null +++ b/docs/doc_examples/be285eef1d2df0dfcf876e2d4b361f1e.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "common_grams_example", + settings: { + analysis: { + analyzer: { + index_grams: { + tokenizer: "whitespace", + filter: ["common_grams_query"], + }, + }, + filter: { + common_grams_query: { + type: "common_grams", + common_words: ["a", "is", "the"], + ignore_case: true, + query_mode: true, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/be3a6431d01846950dc1a39a7a6a1faa.asciidoc b/docs/doc_examples/be3a6431d01846950dc1a39a7a6a1faa.asciidoc index 9bfabc15d..61e1776a5 100644 --- a/docs/doc_examples/be3a6431d01846950dc1a39a7a6a1faa.asciidoc +++ b/docs/doc_examples/be3a6431d01846950dc1a39a7a6a1faa.asciidoc @@ -4,8 +4,7 @@ [source, js] ---- const response = await client.tasks.get({ - task_id: 'r1A2WoRbTwKZ516z6NEs5A:36619' -}) -console.log(response) + task_id: "r1A2WoRbTwKZ516z6NEs5A:36619", +}); +console.log(response); ---- - diff --git a/docs/doc_examples/be49260e1b3496c4feac38c56ebb0669.asciidoc b/docs/doc_examples/be49260e1b3496c4feac38c56ebb0669.asciidoc deleted file mode 100644 index 613044417..000000000 --- a/docs/doc_examples/be49260e1b3496c4feac38c56ebb0669.asciidoc +++ /dev/null @@ -1,12 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'twitter', - q: 'user:kimchy' -}) -console.log(response) ----- - diff --git a/docs/doc_examples/be5b415d7f33d6f0397ac2f8b5c10521.asciidoc b/docs/doc_examples/be5b415d7f33d6f0397ac2f8b5c10521.asciidoc new file mode 100644 index 000000000..27b13645c --- /dev/null +++ b/docs/doc_examples/be5b415d7f33d6f0397ac2f8b5c10521.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.updateByQuery({ + index: "my-index-000001", + refresh: "true", + slices: 5, + script: { + source: "ctx._source['extra'] = 'test'", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/be5c5a9c25901737585e4fff9195da3c.asciidoc b/docs/doc_examples/be5c5a9c25901737585e4fff9195da3c.asciidoc new file mode 100644 index 000000000..7cbd2aa06 --- /dev/null +++ b/docs/doc_examples/be5c5a9c25901737585e4fff9195da3c.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-bit-vectors", + filter_path: "hits.hits", + query: { + knn: { + query_vector: [127, -127, 0, 1, 42], + field: "my_vector", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/be5d62e7c8f63687c585305fbe70d7d0.asciidoc b/docs/doc_examples/be5d62e7c8f63687c585305fbe70d7d0.asciidoc new file mode 100644 index 000000000..818de22d5 --- /dev/null +++ b/docs/doc_examples/be5d62e7c8f63687c585305fbe70d7d0.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "latency", + size: 0, + aggs: { + load_time_outlier: { + percentiles: { + field: "load_time", + tdigest: { + compression: 200, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/be5fef0640c3a650ee96f84e3376a1be.asciidoc b/docs/doc_examples/be5fef0640c3a650ee96f84e3376a1be.asciidoc new file mode 100644 index 000000000..a988440a3 --- /dev/null +++ b/docs/doc_examples/be5fef0640c3a650ee96f84e3376a1be.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.update({ + index: "test", + id: 1, + scripted_upsert: true, + script: { + source: + "\n if ( ctx.op == 'create' ) {\n ctx._source.counter = params.count\n } else {\n ctx._source.counter += params.count\n }\n ", + params: { + count: 4, + }, + }, + upsert: {}, +}); +console.log(response); +---- diff --git a/docs/doc_examples/be6b0bfcdce1ef100af89f74da5d4748.asciidoc b/docs/doc_examples/be6b0bfcdce1ef100af89f74da5d4748.asciidoc new file mode 100644 index 000000000..42e3fb761 --- /dev/null +++ b/docs/doc_examples/be6b0bfcdce1ef100af89f74da5d4748.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.putTrainedModelDefinitionPart({ + model_id: "elastic__distilbert-base-uncased-finetuned-conll03-english", + part: 0, + definition: "...", + total_definition_length: 265632637, + total_parts: 64, +}); +console.log(response); +---- diff --git a/docs/doc_examples/be9376b1e354ad9c6bdad83f6a0ce5ad.asciidoc b/docs/doc_examples/be9376b1e354ad9c6bdad83f6a0ce5ad.asciidoc new file mode 100644 index 000000000..b76dc5c90 --- /dev/null +++ b/docs/doc_examples/be9376b1e354ad9c6bdad83f6a0ce5ad.asciidoc @@ -0,0 +1,61 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.previewTransform({ + source: { + index: "kibana_sample_data_flights", + query: { + bool: { + filter: [ + { + term: { + Cancelled: false, + }, + }, + ], + }, + }, + }, + dest: { + index: "sample_flight_delays_by_carrier", + }, + pivot: { + group_by: { + carrier: { + terms: { + field: "Carrier", + }, + }, + }, + aggregations: { + flights_count: { + value_count: { + field: "FlightNum", + }, + }, + delay_mins_total: { + sum: { + field: "FlightDelayMin", + }, + }, + flight_mins_total: { + sum: { + field: "FlightTimeMin", + }, + }, + delay_time_percentage: { + bucket_script: { + buckets_path: { + delay_time: "delay_mins_total.value", + flight_time: "flight_mins_total.value", + }, + script: "(params.delay_time / params.flight_time) * 100", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/be9836fe55c5fada404a2adc1663d832.asciidoc b/docs/doc_examples/be9836fe55c5fada404a2adc1663d832.asciidoc new file mode 100644 index 000000000..e8b24cc01 --- /dev/null +++ b/docs/doc_examples/be9836fe55c5fada404a2adc1663d832.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-index-000001", + runtime: { + http: { + type: "composite", + script: 'emit(grok("%{COMMONAPACHELOG}").extract(doc["message"].value))', + fields: { + clientip: { + type: "ip", + }, + verb: { + type: "keyword", + }, + response: { + type: "long", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/beaf43b274b0f32cf3cf48f59e5cb1f2.asciidoc b/docs/doc_examples/beaf43b274b0f32cf3cf48f59e5cb1f2.asciidoc new file mode 100644 index 000000000..5a0da457f --- /dev/null +++ b/docs/doc_examples/beaf43b274b0f32cf3cf48f59e5cb1f2.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.get({ + repository: "my_repository", + snapshot: "snapshot_*", + sort: "start_time", + from_sort_value: 1577833200000, +}); +console.log(response); +---- diff --git a/docs/doc_examples/beb0b9ff4f68672273fcff1b7bae706b.asciidoc b/docs/doc_examples/beb0b9ff4f68672273fcff1b7bae706b.asciidoc new file mode 100644 index 000000000..ae9b3f6f0 --- /dev/null +++ b/docs/doc_examples/beb0b9ff4f68672273fcff1b7bae706b.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + user_identifier: { + type: "keyword", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/beba2a9795c8a13653e1edf64eec4357.asciidoc b/docs/doc_examples/beba2a9795c8a13653e1edf64eec4357.asciidoc new file mode 100644 index 000000000..dcf66fbd5 --- /dev/null +++ b/docs/doc_examples/beba2a9795c8a13653e1edf64eec4357.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "test", + settings: { + "index.routing.allocation.require.size": "big", + "index.routing.allocation.require.rack": "rack1", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bed14cc152522ca0726ac3746ebc31db.asciidoc b/docs/doc_examples/bed14cc152522ca0726ac3746ebc31db.asciidoc new file mode 100644 index 000000000..6b1fa92ed --- /dev/null +++ b/docs/doc_examples/bed14cc152522ca0726ac3746ebc31db.asciidoc @@ -0,0 +1,45 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "my_index", + refresh: "true", + operations: [ + { + index: { + _id: 1, + }, + }, + { + my_counter: 0, + }, + { + index: { + _id: 2, + }, + }, + { + my_counter: 9223372036854776000, + }, + { + index: { + _id: 3, + }, + }, + { + my_counter: 18446744073709552000, + }, + { + index: { + _id: 4, + }, + }, + { + my_counter: 18446744073709552000, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/befa73a8a419fcf3b7798548b54a20bf.asciidoc b/docs/doc_examples/befa73a8a419fcf3b7798548b54a20bf.asciidoc new file mode 100644 index 000000000..2e7e049df --- /dev/null +++ b/docs/doc_examples/befa73a8a419fcf3b7798548b54a20bf.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + size: 10, + knn: { + query_vector: [0.04283529, 0.85670587, -0.51402352, 0], + field: "my_int4_vector", + k: 20, + num_candidates: 50, + }, + rescore: { + window_size: 20, + query: { + rescore_query: { + script_score: { + query: { + match_all: {}, + }, + script: { + source: "(dotProduct(params.queryVector, 'my_int4_vector') + 1.0)", + params: { + queryVector: [0.04283529, 0.85670587, -0.51402352, 0], + }, + }, + }, + }, + query_weight: 0, + rescore_query_weight: 1, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bf17440ac178d2ef5f5be643d033920b.asciidoc b/docs/doc_examples/bf17440ac178d2ef5f5be643d033920b.asciidoc new file mode 100644 index 000000000..240f711b1 --- /dev/null +++ b/docs/doc_examples/bf17440ac178d2ef5f5be643d033920b.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + wait_for_completion: "false", + source: { + index: "test-data", + size: 50, + }, + dest: { + index: "my-index", + pipeline: "elser-v2-test", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bf1de9fa1b825fa875d27fa08821a6d1.asciidoc b/docs/doc_examples/bf1de9fa1b825fa875d27fa08821a6d1.asciidoc new file mode 100644 index 000000000..5af1d209c --- /dev/null +++ b/docs/doc_examples/bf1de9fa1b825fa875d27fa08821a6d1.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putUser({ + username: "remote_user", + password: "", + roles: ["remote1"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/bf2e6ea2bae621b9b2fee7003e891f86.asciidoc b/docs/doc_examples/bf2e6ea2bae621b9b2fee7003e891f86.asciidoc new file mode 100644 index 000000000..bfc119d2c --- /dev/null +++ b/docs/doc_examples/bf2e6ea2bae621b9b2fee7003e891f86.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "index", + query: { + simple_query_string: { + fields: ["body"], + query: "ski", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bf3c3bc41c593a80faebef1df353e483.asciidoc b/docs/doc_examples/bf3c3bc41c593a80faebef1df353e483.asciidoc new file mode 100644 index 000000000..b0b47f121 --- /dev/null +++ b/docs/doc_examples/bf3c3bc41c593a80faebef1df353e483.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "rerank", + inference_id: "jinaai-rerank", + inference_config: { + service: "jinaai", + service_settings: { + api_key: "", + model_id: "jina-reranker-v2-base-multilingual", + }, + task_settings: { + top_n: 10, + return_documents: true, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bf3f520b47581d861e802730aaf2a519.asciidoc b/docs/doc_examples/bf3f520b47581d861e802730aaf2a519.asciidoc new file mode 100644 index 000000000..a2f8a7bfd --- /dev/null +++ b/docs/doc_examples/bf3f520b47581d861e802730aaf2a519.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.updateAliases({ + actions: [ + { + add: { + index: "logs-nginx.access-prod", + alias: "logs", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/bf448c3889c18266e2e6d3af4f614da2.asciidoc b/docs/doc_examples/bf448c3889c18266e2e6d3af4f614da2.asciidoc new file mode 100644 index 000000000..98dc289b4 --- /dev/null +++ b/docs/doc_examples/bf448c3889c18266e2e6d3af4f614da2.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: ".ds-my-data-stream-2099-03-08-000003", + id: "bfspvnIBr7VVZlfp2lqX", + if_seq_no: 0, + if_primary_term: 1, + document: { + "@timestamp": "2099-03-08T11:06:07.000Z", + user: { + id: "8a4f500d", + }, + message: "Login successful", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bf639275d0818be04317ee5ab6075da6.asciidoc b/docs/doc_examples/bf639275d0818be04317ee5ab6075da6.asciidoc new file mode 100644 index 000000000..b6894f8b7 --- /dev/null +++ b/docs/doc_examples/bf639275d0818be04317ee5ab6075da6.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + has_parent: { + parent_type: "parent", + query: { + term: { + tag: { + value: "Elasticsearch", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bf8680d940c84e43a9483a25548dea57.asciidoc b/docs/doc_examples/bf8680d940c84e43a9483a25548dea57.asciidoc new file mode 100644 index 000000000..312348bb9 --- /dev/null +++ b/docs/doc_examples/bf8680d940c84e43a9483a25548dea57.asciidoc @@ -0,0 +1,59 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + filter: { + autocomplete_filter: { + type: "edge_ngram", + min_gram: 1, + max_gram: 20, + }, + }, + analyzer: { + autocomplete: { + type: "custom", + tokenizer: "standard", + filter: ["lowercase", "autocomplete_filter"], + }, + }, + }, + }, + mappings: { + properties: { + text: { + type: "text", + analyzer: "autocomplete", + search_analyzer: "standard", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + text: "Quick Brown Fox", + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + query: { + match: { + text: { + query: "Quick Br", + operator: "and", + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/bf9f13dc6c24cc225a72e32177e9ee02.asciidoc b/docs/doc_examples/bf9f13dc6c24cc225a72e32177e9ee02.asciidoc new file mode 100644 index 000000000..822b94914 --- /dev/null +++ b/docs/doc_examples/bf9f13dc6c24cc225a72e32177e9ee02.asciidoc @@ -0,0 +1,73 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my_locations", + mappings: { + properties: { + pin: { + properties: { + location: { + type: "geo_point", + }, + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my_locations", + id: 1, + document: { + pin: { + location: { + lat: 40.12, + lon: -71.34, + }, + }, + }, +}); +console.log(response1); + +const response2 = await client.indices.create({ + index: "my_geoshapes", + mappings: { + properties: { + pin: { + properties: { + location: { + type: "geo_shape", + }, + }, + }, + }, + }, +}); +console.log(response2); + +const response3 = await client.index({ + index: "my_geoshapes", + id: 1, + document: { + pin: { + location: { + type: "polygon", + coordinates: [ + [ + [13, 51.5], + [15, 51.5], + [15, 54], + [13, 54], + [13, 51.5], + ], + ], + }, + }, + }, +}); +console.log(response3); +---- diff --git a/docs/doc_examples/bfb0db2a72f22c9c2046119777efbb43.asciidoc b/docs/doc_examples/bfb0db2a72f22c9c2046119777efbb43.asciidoc new file mode 100644 index 000000000..6936bb686 --- /dev/null +++ b/docs/doc_examples/bfb0db2a72f22c9c2046119777efbb43.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "elser-embeddings", + query: { + sparse_vector: { + field: "content_embedding", + inference_id: "elser_embeddings", + query: "How to avoid muscle soreness after running?", + }, + }, + _source: ["id", "content"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/bfb1aa83da8e3f414d50b5ed7894ed33.asciidoc b/docs/doc_examples/bfb1aa83da8e3f414d50b5ed7894ed33.asciidoc new file mode 100644 index 000000000..1d21a7fb7 --- /dev/null +++ b/docs/doc_examples/bfb1aa83da8e3f414d50b5ed7894ed33.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + script_fields: { + my_doubled_field: { + script: { + source: "field('my_field').get(null) * params['multiplier']", + params: { + multiplier: 2, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bfb8a15cd05b43094ffbce8078bad3e1.asciidoc b/docs/doc_examples/bfb8a15cd05b43094ffbce8078bad3e1.asciidoc new file mode 100644 index 000000000..2a993db0b --- /dev/null +++ b/docs/doc_examples/bfb8a15cd05b43094ffbce8078bad3e1.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.get({ + repository: "my_repository", + snapshot: "snapshot_2", +}); +console.log(response); +---- diff --git a/docs/doc_examples/bfcd65ab85d684d36a8550080032958d.asciidoc b/docs/doc_examples/bfcd65ab85d684d36a8550080032958d.asciidoc deleted file mode 100644 index 618f5153b..000000000 --- a/docs/doc_examples/bfcd65ab85d684d36a8550080032958d.asciidoc +++ /dev/null @@ -1,13 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - q: 'message:number', - size: '0', - terminate_after: '1' -}) -console.log(response) ----- - diff --git a/docs/doc_examples/bfd6fa3f44e6165f8999102f5a8e24d6.asciidoc b/docs/doc_examples/bfd6fa3f44e6165f8999102f5a8e24d6.asciidoc new file mode 100644 index 000000000..77ceb7753 --- /dev/null +++ b/docs/doc_examples/bfd6fa3f44e6165f8999102f5a8e24d6.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "index1", + query: { + query_string: { + query: "running with scissors", + fields: ["comment", "comment.english"], + }, + }, + highlight: { + order: "score", + fields: { + comment: {}, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/bfdad8a928ea30d7cf60d0a0a6bc6e2e.asciidoc b/docs/doc_examples/bfdad8a928ea30d7cf60d0a0a6bc6e2e.asciidoc index f57620210..518894b96 100644 --- a/docs/doc_examples/bfdad8a928ea30d7cf60d0a0a6bc6e2e.asciidoc +++ b/docs/doc_examples/bfdad8a928ea30d7cf60d0a0a6bc6e2e.asciidoc @@ -4,43 +4,42 @@ [source, js] ---- const response = await client.bulk({ - filter_path: 'items.*.error', - body: [ + filter_path: "items.*.error", + operations: [ { update: { - _id: '5', - _index: 'index1' - } + _id: "5", + _index: "index1", + }, }, { doc: { - my_field: 'baz' - } + my_field: "baz", + }, }, { update: { - _id: '6', - _index: 'index1' - } + _id: "6", + _index: "index1", + }, }, { doc: { - my_field: 'baz' - } + my_field: "baz", + }, }, { update: { - _id: '7', - _index: 'index1' - } + _id: "7", + _index: "index1", + }, }, { doc: { - my_field: 'baz' - } - } - ] -}) -console.log(response) + my_field: "baz", + }, + }, + ], +}); +console.log(response); ---- - diff --git a/docs/doc_examples/c00c9412609832ebceb9e786dd9542df.asciidoc b/docs/doc_examples/c00c9412609832ebceb9e786dd9542df.asciidoc new file mode 100644 index 000000000..f2b49ad8b --- /dev/null +++ b/docs/doc_examples/c00c9412609832ebceb9e786dd9542df.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.connector.updateName({ + connector_id: "my-connector", + name: "Custom connector", + description: "This is my customized connector", +}); +console.log(response); +---- diff --git a/docs/doc_examples/c012f42b26eb8dd9b197644c3ed954cf.asciidoc b/docs/doc_examples/c012f42b26eb8dd9b197644c3ed954cf.asciidoc new file mode 100644 index 000000000..74becb406 --- /dev/null +++ b/docs/doc_examples/c012f42b26eb8dd9b197644c3ed954cf.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 2, + document: { + name: { + first: "Paul", + last: "McCartney", + title: { + value: "Sir", + category: "order of chivalry", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c03ce952de42eae4b522cedc9fd3d14a.asciidoc b/docs/doc_examples/c03ce952de42eae4b522cedc9fd3d14a.asciidoc new file mode 100644 index 000000000..7ddfea214 --- /dev/null +++ b/docs/doc_examples/c03ce952de42eae4b522cedc9fd3d14a.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: + "POLYGON ((100.0 0.0, 101.0 0.0, 101.0 1.0, 100.0 1.0, 100.0 0.0), (100.2 0.2, 100.8 0.2, 100.8 0.8, 100.2 0.8, 100.2 0.2))", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c065a200c00e2005d88ec2f0c10c908a.asciidoc b/docs/doc_examples/c065a200c00e2005d88ec2f0c10c908a.asciidoc new file mode 100644 index 000000000..37c356419 --- /dev/null +++ b/docs/doc_examples/c065a200c00e2005d88ec2f0c10c908a.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "whitespace", + filter: ["shingle"], + text: "quick brown fox jumps", +}); +console.log(response); +---- diff --git a/docs/doc_examples/c067182d385f59ce5952fb9a716fbf05.asciidoc b/docs/doc_examples/c067182d385f59ce5952fb9a716fbf05.asciidoc new file mode 100644 index 000000000..45c34f74e --- /dev/null +++ b/docs/doc_examples/c067182d385f59ce5952fb9a716fbf05.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.postCalendarEvents({ + calendar_id: "planned-outages", + events: [ + { + description: "event 1", + start_time: 1513641600000, + end_time: 1513728000000, + }, + { + description: "event 2", + start_time: 1513814400000, + end_time: 1513900800000, + }, + { + description: "event 3", + start_time: 1514160000000, + end_time: 1514246400000, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/c088ce5291ae28650b6091cdec489398.asciidoc b/docs/doc_examples/c088ce5291ae28650b6091cdec489398.asciidoc new file mode 100644 index 000000000..c555add24 --- /dev/null +++ b/docs/doc_examples/c088ce5291ae28650b6091cdec489398.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 100, + query: { + match: { + title: "elasticsearch", + }, + }, + pit: { + id: "46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA==", + keep_alive: "1m", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c0a4b0c1c6eff14da8b152ceb19c1c31.asciidoc b/docs/doc_examples/c0a4b0c1c6eff14da8b152ceb19c1c31.asciidoc new file mode 100644 index 000000000..49f017323 --- /dev/null +++ b/docs/doc_examples/c0a4b0c1c6eff14da8b152ceb19c1c31.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.health(); +console.log(response); + +const response1 = await client.cat.nodes(); +console.log(response1); +---- diff --git a/docs/doc_examples/c0c638e3d218b0ecbe5c4d77c964ae9e.asciidoc b/docs/doc_examples/c0c638e3d218b0ecbe5c4d77c964ae9e.asciidoc new file mode 100644 index 000000000..6ade03c9c --- /dev/null +++ b/docs/doc_examples/c0c638e3d218b0ecbe5c4d77c964ae9e.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + term: { + "user.id": { + value: "kimchy", + boost: 1, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c0ddfb2e6315f5bcf0d3ef414b5bbed3.asciidoc b/docs/doc_examples/c0ddfb2e6315f5bcf0d3ef414b5bbed3.asciidoc new file mode 100644 index 000000000..c0190ee1c --- /dev/null +++ b/docs/doc_examples/c0ddfb2e6315f5bcf0d3ef414b5bbed3.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.connector.updateConfiguration({ + connector_id: "my-spo-connector", + values: { + secret_value: "foo-bar", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c0ebaa33e750b87555dc352073f692e8.asciidoc b/docs/doc_examples/c0ebaa33e750b87555dc352073f692e8.asciidoc new file mode 100644 index 000000000..2adf4c42e --- /dev/null +++ b/docs/doc_examples/c0ebaa33e750b87555dc352073f692e8.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.close({ + index: "my-index-000001", +}); +console.log(response); + +const response1 = await client.indices.putSettings({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + content: { + type: "custom", + tokenizer: "whitespace", + }, + }, + }, + }, +}); +console.log(response1); + +const response2 = await client.indices.open({ + index: "my-index-000001", +}); +console.log(response2); +---- diff --git a/docs/doc_examples/c0ff8b3db994c4736f7579dde18097d2.asciidoc b/docs/doc_examples/c0ff8b3db994c4736f7579dde18097d2.asciidoc new file mode 100644 index 000000000..8227fad3c --- /dev/null +++ b/docs/doc_examples/c0ff8b3db994c4736f7579dde18097d2.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.getSource({ + index: "my-index-000001", + id: 1, + _source_includes: "*.id", + _source_excludes: "entities", +}); +console.log(response); +---- diff --git a/docs/doc_examples/c10a486a28cbc5b2f15c3474ae31a431.asciidoc b/docs/doc_examples/c10a486a28cbc5b2f15c3474ae31a431.asciidoc new file mode 100644 index 000000000..d2009383f --- /dev/null +++ b/docs/doc_examples/c10a486a28cbc5b2f15c3474ae31a431.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.slm.putLifecycle({ + policy_id: "nightly-snapshots", + schedule: "0 30 1 * * ?", + name: "", + repository: "my_repository", + config: { + indices: "*", + include_global_state: true, + }, + retention: { + expire_after: "30d", + min_count: 5, + max_count: 50, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c11c4d6b30e882871bf0074f407149bd.asciidoc b/docs/doc_examples/c11c4d6b30e882871bf0074f407149bd.asciidoc new file mode 100644 index 000000000..7067915ee --- /dev/null +++ b/docs/doc_examples/c11c4d6b30e882871bf0074f407149bd.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + refresh: "true", + document: { + text: "This is a parent document.", + "my-join-field": "my-parent", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c12d6e962f083c728f9397932f05202e.asciidoc b/docs/doc_examples/c12d6e962f083c728f9397932f05202e.asciidoc new file mode 100644 index 000000000..8dc0ab1c1 --- /dev/null +++ b/docs/doc_examples/c12d6e962f083c728f9397932f05202e.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "GET", + path: "/_connector/_sync_job", + querystring: { + connector_id: "connector-1", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c1409f591a01589638d9b00436ce42c0.asciidoc b/docs/doc_examples/c1409f591a01589638d9b00436ce42c0.asciidoc new file mode 100644 index 000000000..e33cc36cb --- /dev/null +++ b/docs/doc_examples/c1409f591a01589638d9b00436ce42c0.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.clearCachedRealms({ + realms: "default_file", + usernames: "rdeniro,alpacino", +}); +console.log(response); +---- diff --git a/docs/doc_examples/c147de68fd6da032ad4a3c1bf626f5d6.asciidoc b/docs/doc_examples/c147de68fd6da032ad4a3c1bf626f5d6.asciidoc new file mode 100644 index 000000000..f823fda2b --- /dev/null +++ b/docs/doc_examples/c147de68fd6da032ad4a3c1bf626f5d6.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match: { + "user.id": "kimchy", + }, + }, + highlight: { + fields: { + comment: { + type: "plain", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c155d2670ff82b135c7dcec0fc8a3f23.asciidoc b/docs/doc_examples/c155d2670ff82b135c7dcec0fc8a3f23.asciidoc new file mode 100644 index 000000000..b214d0ee9 --- /dev/null +++ b/docs/doc_examples/c155d2670ff82b135c7dcec0fc8a3f23.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.delete({ + id: "FmNJRUZ1YWZCU3dHY1BIOUhaenVSRkEaaXFlZ3h4c1RTWFNocDdnY2FSaERnUTozNDE=", +}); +console.log(response); +---- diff --git a/docs/doc_examples/c18100d62ed31bc9e05f62900156e6a8.asciidoc b/docs/doc_examples/c18100d62ed31bc9e05f62900156e6a8.asciidoc new file mode 100644 index 000000000..54f13ca9d --- /dev/null +++ b/docs/doc_examples/c18100d62ed31bc9e05f62900156e6a8.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.connector.list({ + index_name: "search-google-drive", +}); +console.log(response); +---- diff --git a/docs/doc_examples/c186ecf6f799ddff7add1abdecea5821.asciidoc b/docs/doc_examples/c186ecf6f799ddff7add1abdecea5821.asciidoc new file mode 100644 index 000000000..9d4fb8338 --- /dev/null +++ b/docs/doc_examples/c186ecf6f799ddff7add1abdecea5821.asciidoc @@ -0,0 +1,47 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + full_name: { + type: "text", + store: true, + }, + title: { + type: "text", + store: true, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + refresh: "true", + document: { + full_name: "Alice Ball", + title: "Professor", + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + script_fields: { + name_with_title: { + script: { + lang: "painless", + source: + "params._fields['title'].value + ' ' + params._fields['full_name'].value", + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/c187b52646cedeebe0716327add65642.asciidoc b/docs/doc_examples/c187b52646cedeebe0716327add65642.asciidoc new file mode 100644 index 000000000..f599f4825 --- /dev/null +++ b/docs/doc_examples/c187b52646cedeebe0716327add65642.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.getAsync({ + id: "FmdMX2pIang3UWhLRU5QS0lqdlppYncaMUpYQ05oSkpTc3kwZ21EdC1tbFJXQToxOTI=", + format: "json", +}); +console.log(response); +---- diff --git a/docs/doc_examples/c1a39c2628ada04c3ddd61a303b65d44.asciidoc b/docs/doc_examples/c1a39c2628ada04c3ddd61a303b65d44.asciidoc new file mode 100644 index 000000000..b4564c6f3 --- /dev/null +++ b/docs/doc_examples/c1a39c2628ada04c3ddd61a303b65d44.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + script_score: { + query: { + bool: { + filter: { + term: { + status: "published", + }, + }, + }, + }, + script: { + source: + "(24 - hamming(params.queryVector, 'my_byte_dense_vector')) / 24", + params: { + queryVector: [4, 3, 0], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c1a895497066a3dac674d4b1a119048d.asciidoc b/docs/doc_examples/c1a895497066a3dac674d4b1a119048d.asciidoc new file mode 100644 index 000000000..08d37316d --- /dev/null +++ b/docs/doc_examples/c1a895497066a3dac674d4b1a119048d.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + pretty: "true", + query: { + term: { + full_text: "Quick Brown Foxes!", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c1ac9e53b04f7acee4b4933969d6b574.asciidoc b/docs/doc_examples/c1ac9e53b04f7acee4b4933969d6b574.asciidoc new file mode 100644 index 000000000..dc95c58bc --- /dev/null +++ b/docs/doc_examples/c1ac9e53b04f7acee4b4933969d6b574.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.previewTransform({ + source: { + index: "kibana_sample_data_ecommerce", + }, + pivot: { + group_by: { + customer_id: { + terms: { + field: "customer_id", + missing_bucket: true, + }, + }, + }, + aggregations: { + max_price: { + max: { + field: "taxful_total_price", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c1ad9ff64728a5bfeeb485e60ec694a1.asciidoc b/docs/doc_examples/c1ad9ff64728a5bfeeb485e60ec694a1.asciidoc new file mode 100644 index 000000000..7f6259118 --- /dev/null +++ b/docs/doc_examples/c1ad9ff64728a5bfeeb485e60ec694a1.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.rankEval({ + index: "my-index-000001", + requests: [ + { + id: "JFK query", + request: { + query: { + match_all: {}, + }, + }, + ratings: [], + }, + ], + metric: { + expected_reciprocal_rank: { + maximum_relevance: 3, + k: 20, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c1efc5cfcb3c29711bfe118f1baa28b0.asciidoc b/docs/doc_examples/c1efc5cfcb3c29711bfe118f1baa28b0.asciidoc new file mode 100644 index 000000000..7d4d534cd --- /dev/null +++ b/docs/doc_examples/c1efc5cfcb3c29711bfe118f1baa28b0.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "keyword_example", + settings: { + analysis: { + analyzer: { + rebuilt_keyword: { + tokenizer: "keyword", + filter: [], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c208a06212dc0cf6ac413d4f2c154296.asciidoc b/docs/doc_examples/c208a06212dc0cf6ac413d4f2c154296.asciidoc new file mode 100644 index 000000000..e9e3a968d --- /dev/null +++ b/docs/doc_examples/c208a06212dc0cf6ac413d4f2c154296.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.flush({ + index: "my-index-000001,my-index-000002", +}); +console.log(response); +---- diff --git a/docs/doc_examples/c208de54369379e8d78ab201be18b6be.asciidoc b/docs/doc_examples/c208de54369379e8d78ab201be18b6be.asciidoc new file mode 100644 index 000000000..79835e938 --- /dev/null +++ b/docs/doc_examples/c208de54369379e8d78ab201be18b6be.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + dynamic_templates: [ + { + longs_as_strings: { + match_mapping_type: "string", + match: "long_*", + unmatch: "*_text", + mapping: { + type: "long", + }, + }, + }, + ], + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + long_num: "5", + long_text: "foo", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/c21aaedb5752a83489476fa3b5e2e9ff.asciidoc b/docs/doc_examples/c21aaedb5752a83489476fa3b5e2e9ff.asciidoc new file mode 100644 index 000000000..cdde2a100 --- /dev/null +++ b/docs/doc_examples/c21aaedb5752a83489476fa3b5e2e9ff.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_query_rules/my-ruleset/_rule/my-rule1", + body: { + type: "pinned", + criteria: [ + { + type: "contains", + metadata: "user_query", + values: ["pugs", "puggles"], + }, + { + type: "exact", + metadata: "user_country", + values: ["us"], + }, + ], + actions: { + ids: ["id1", "id2"], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c21eb4bc30087188241cbba6b6b89999.asciidoc b/docs/doc_examples/c21eb4bc30087188241cbba6b6b89999.asciidoc new file mode 100644 index 000000000..c33aa6f65 --- /dev/null +++ b/docs/doc_examples/c21eb4bc30087188241cbba6b6b89999.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.connector.updateServiceType({ + connector_id: "my-connector", + service_type: "sharepoint_online", +}); +console.log(response); +---- diff --git a/docs/doc_examples/c22b72c4a52ee098331b3f252c22860d.asciidoc b/docs/doc_examples/c22b72c4a52ee098331b3f252c22860d.asciidoc deleted file mode 100644 index b4af4528c..000000000 --- a/docs/doc_examples/c22b72c4a52ee098331b3f252c22860d.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.deleteByQuery({ - index: 'twitter,blog', - body: { - query: { - match_all: {} - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/c23e32775340d7bc6f46820313014d8a.asciidoc b/docs/doc_examples/c23e32775340d7bc6f46820313014d8a.asciidoc new file mode 100644 index 000000000..4b72af338 --- /dev/null +++ b/docs/doc_examples/c23e32775340d7bc6f46820313014d8a.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my_test_scores_2", + pipeline: "my_test_scores_pipeline", + document: { + student: "kimchy", + grad_year: "2099", + math_score: 1200, + verbal_score: 800, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c267e90b7873a7c8c8af06f01e958e69.asciidoc b/docs/doc_examples/c267e90b7873a7c8c8af06f01e958e69.asciidoc new file mode 100644 index 000000000..ede289d19 --- /dev/null +++ b/docs/doc_examples/c267e90b7873a7c8c8af06f01e958e69.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "logs*", + size: 0, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c26b185952ddf9842e18493aca2de147.asciidoc b/docs/doc_examples/c26b185952ddf9842e18493aca2de147.asciidoc new file mode 100644 index 000000000..b37a39ab7 --- /dev/null +++ b/docs/doc_examples/c26b185952ddf9842e18493aca2de147.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "books", + document: { + name: "Snow Crash", + author: "Neal Stephenson", + release_date: "1992-06-01", + page_count: 470, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c27b7d9836aa4ea756f59e9c42911721.asciidoc b/docs/doc_examples/c27b7d9836aa4ea756f59e9c42911721.asciidoc new file mode 100644 index 000000000..1b989add3 --- /dev/null +++ b/docs/doc_examples/c27b7d9836aa4ea756f59e9c42911721.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.scroll({ + scroll_id: "DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ==", +}); +console.log(response); +---- diff --git a/docs/doc_examples/c28f0b0dd3246cb91d6facb3295a61d7.asciidoc b/docs/doc_examples/c28f0b0dd3246cb91d6facb3295a61d7.asciidoc new file mode 100644 index 000000000..d47342e95 --- /dev/null +++ b/docs/doc_examples/c28f0b0dd3246cb91d6facb3295a61d7.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.close({ + index: "kibana_sample_data_flights,.ds-my-data-stream-2022.06.17-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/c2c21e2824fbf6b7198ede30419da82b.asciidoc b/docs/doc_examples/c2c21e2824fbf6b7198ede30419da82b.asciidoc new file mode 100644 index 000000000..125fe122d --- /dev/null +++ b/docs/doc_examples/c2c21e2824fbf6b7198ede30419da82b.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.clearScroll({ + scroll_id: "_all", +}); +console.log(response); +---- diff --git a/docs/doc_examples/c2d7c36daac8608d2515c549b2c82436.asciidoc b/docs/doc_examples/c2d7c36daac8608d2515c549b2c82436.asciidoc new file mode 100644 index 000000000..b8646e655 --- /dev/null +++ b/docs/doc_examples/c2d7c36daac8608d2515c549b2c82436.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_buckets: { + composite: { + sources: [ + { + tile: { + geotile_grid: { + field: "location", + precision: 22, + bounds: { + top_left: "POINT (4.9 52.4)", + bottom_right: "POINT (5.0 52.3)", + }, + }, + }, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c318fde926842722825a51e5c9c326a9.asciidoc b/docs/doc_examples/c318fde926842722825a51e5c9c326a9.asciidoc new file mode 100644 index 000000000..20508d4c1 --- /dev/null +++ b/docs/doc_examples/c318fde926842722825a51e5c9c326a9.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "keyword", + text: " fox ", +}); +console.log(response); +---- diff --git a/docs/doc_examples/c32a3f8071d87f0a3f5a78e07fe7a669.asciidoc b/docs/doc_examples/c32a3f8071d87f0a3f5a78e07fe7a669.asciidoc deleted file mode 100644 index 219f4e552..000000000 --- a/docs/doc_examples/c32a3f8071d87f0a3f5a78e07fe7a669.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.deleteByQuery({ - index: 'twitter', - routing: '1', - body: { - query: { - range: { - age: { - gte: 10 - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/c38c882c642dd412e8fa4c3eed49d12f.asciidoc b/docs/doc_examples/c38c882c642dd412e8fa4c3eed49d12f.asciidoc new file mode 100644 index 000000000..7c02bc9da --- /dev/null +++ b/docs/doc_examples/c38c882c642dd412e8fa4c3eed49d12f.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + match_phrase_prefix: { + my_field: "brown f", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c3b77e11b16e37e9e37e28dec922432e.asciidoc b/docs/doc_examples/c3b77e11b16e37e9e37e28dec922432e.asciidoc new file mode 100644 index 000000000..f80f1ac99 --- /dev/null +++ b/docs/doc_examples/c3b77e11b16e37e9e37e28dec922432e.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.esql.query({ + query: + '\nFROM library\n| WHERE match(author, "Frank Herbert", {"minimum_should_match": 2, "operator": "AND"})\n| LIMIT 5\n', +}); +console.log(response); +---- diff --git a/docs/doc_examples/c4272ad0309ffbcbe9ce96bf9fb4352a.asciidoc b/docs/doc_examples/c4272ad0309ffbcbe9ce96bf9fb4352a.asciidoc new file mode 100644 index 000000000..368f20c7d --- /dev/null +++ b/docs/doc_examples/c4272ad0309ffbcbe9ce96bf9fb4352a.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "place", + pretty: "true", + suggest: { + place_suggestion: { + prefix: "tim", + completion: { + field: "suggest", + size: 10, + contexts: { + place_type: ["cafe", "restaurants"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c42bc6e74afc3d43cd032ec2bfd77385.asciidoc b/docs/doc_examples/c42bc6e74afc3d43cd032ec2bfd77385.asciidoc new file mode 100644 index 000000000..78ec45236 --- /dev/null +++ b/docs/doc_examples/c42bc6e74afc3d43cd032ec2bfd77385.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "keyword", + filter: ["word_delimiter"], + text: "Neil's-Super-Duper-XL500--42+AutoCoder", +}); +console.log(response); +---- diff --git a/docs/doc_examples/c4607ca79b2bcde39305d6f4f21cad37.asciidoc b/docs/doc_examples/c4607ca79b2bcde39305d6f4f21cad37.asciidoc new file mode 100644 index 000000000..f5e4eeae0 --- /dev/null +++ b/docs/doc_examples/c4607ca79b2bcde39305d6f4f21cad37.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.esql.query({ + locale: "fr-FR", + query: + '\n ROW birth_date_string = "2023-01-15T00:00:00.000Z"\n | EVAL birth_date = date_parse(birth_date_string)\n | EVAL month_of_birth = DATE_FORMAT("MMMM",birth_date)\n | LIMIT 5\n ', +}); +console.log(response); +---- diff --git a/docs/doc_examples/c464ed2001d66a1446f37659dc9efc2a.asciidoc b/docs/doc_examples/c464ed2001d66a1446f37659dc9efc2a.asciidoc new file mode 100644 index 000000000..de2646cd4 --- /dev/null +++ b/docs/doc_examples/c464ed2001d66a1446f37659dc9efc2a.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + range: { + date_range: { + field: "date", + format: "MM-yyyy", + ranges: [ + { + to: "now-10M/M", + }, + { + from: "now-10M/M", + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c47f030216a3c89f92f31787fc4d5df5.asciidoc b/docs/doc_examples/c47f030216a3c89f92f31787fc4d5df5.asciidoc new file mode 100644 index 000000000..a698efa88 --- /dev/null +++ b/docs/doc_examples/c47f030216a3c89f92f31787fc4d5df5.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.plugins({ + v: "true", + s: "component", + h: "name,component,version,description", +}); +console.log(response); +---- diff --git a/docs/doc_examples/c48264ec5d9b9679fddd72e5c44425b9.asciidoc b/docs/doc_examples/c48264ec5d9b9679fddd72e5c44425b9.asciidoc deleted file mode 100644 index 646050e2b..000000000 --- a/docs/doc_examples/c48264ec5d9b9679fddd72e5c44425b9.asciidoc +++ /dev/null @@ -1,12 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.cluster.health({ - index: 'twitter', - level: 'shards' -}) -console.log(response) ----- - diff --git a/docs/doc_examples/c48b8bcd6f41e0d12b58e854e09ea893.asciidoc b/docs/doc_examples/c48b8bcd6f41e0d12b58e854e09ea893.asciidoc new file mode 100644 index 000000000..9f1c29078 --- /dev/null +++ b/docs/doc_examples/c48b8bcd6f41e0d12b58e854e09ea893.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: + "MULTIPOLYGON (((1002.0 200.0, 1003.0 200.0, 1003.0 300.0, 1002.0 300.0, 102.0 200.0)), ((1000.0 100.0, 1001.0 100.0, 1001.0 100.0, 1000.0 100.0, 1000.0 100.0), (1000.2 100.2, 1000.8 100.2, 1000.8 100.8, 1000.2 100.8, 1000.2 100.2)))", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c4a1d03dcfb82913d0724a42b0a89f20.asciidoc b/docs/doc_examples/c4a1d03dcfb82913d0724a42b0a89f20.asciidoc new file mode 100644 index 000000000..41ee348ba --- /dev/null +++ b/docs/doc_examples/c4a1d03dcfb82913d0724a42b0a89f20.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.clearCache(); +console.log(response); +---- diff --git a/docs/doc_examples/c4b727723b57052b6504bb74fe09abc6.asciidoc b/docs/doc_examples/c4b727723b57052b6504bb74fe09abc6.asciidoc new file mode 100644 index 000000000..19ee3e92b --- /dev/null +++ b/docs/doc_examples/c4b727723b57052b6504bb74fe09abc6.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "template_1", + index_patterns: ["template*"], + priority: 1, + template: { + settings: { + number_of_shards: 2, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c4c1a87414741a678f6cb91804daf095.asciidoc b/docs/doc_examples/c4c1a87414741a678f6cb91804daf095.asciidoc new file mode 100644 index 000000000..693a16ba0 --- /dev/null +++ b/docs/doc_examples/c4c1a87414741a678f6cb91804daf095.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "test", + query: { + rank_feature: { + field: "pagerank", + linear: {}, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c4fadbb7f61e5f83ab3fc9cd4b82b5e5.asciidoc b/docs/doc_examples/c4fadbb7f61e5f83ab3fc9cd4b82b5e5.asciidoc new file mode 100644 index 000000000..745d088b2 --- /dev/null +++ b/docs/doc_examples/c4fadbb7f61e5f83ab3fc9cd4b82b5e5.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.restore({ + repository: "my_repository", + snapshot: "my_snapshot_2099.05.06", + feature_states: ["geoip"], + include_global_state: false, + indices: "-*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/c526fca1609b4c3c1d12dfd218d69a50.asciidoc b/docs/doc_examples/c526fca1609b4c3c1d12dfd218d69a50.asciidoc new file mode 100644 index 000000000..198997e4a --- /dev/null +++ b/docs/doc_examples/c526fca1609b4c3c1d12dfd218d69a50.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + source: { + index: "my-index-000001", + }, + dest: { + index: "my-new-index-000001", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c54597143ac86540726f6422fd98b22e.asciidoc b/docs/doc_examples/c54597143ac86540726f6422fd98b22e.asciidoc new file mode 100644 index 000000000..8de8f5661 --- /dev/null +++ b/docs/doc_examples/c54597143ac86540726f6422fd98b22e.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_security/settings", + body: { + security: { + "index.auto_expand_replicas": "0-all", + }, + "security-tokens": { + "index.auto_expand_replicas": "0-all", + }, + "security-profile": { + "index.auto_expand_replicas": "0-all", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c554a1791f29bbbcddda84c64deaba6f.asciidoc b/docs/doc_examples/c554a1791f29bbbcddda84c64deaba6f.asciidoc new file mode 100644 index 000000000..93d08c3c2 --- /dev/null +++ b/docs/doc_examples/c554a1791f29bbbcddda84c64deaba6f.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.query({ + format: "txt", + query: "SELECT * FROM library ORDER BY page_count DESC", + fetch_size: 5, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c580092fd3d36c32b09d63921708a67b.asciidoc b/docs/doc_examples/c580092fd3d36c32b09d63921708a67b.asciidoc new file mode 100644 index 000000000..91be3581e --- /dev/null +++ b/docs/doc_examples/c580092fd3d36c32b09d63921708a67b.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + dis_max: { + queries: [ + { + term: { + title: "Quick pets", + }, + }, + { + term: { + body: "Quick pets", + }, + }, + ], + tie_breaker: 0.7, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c5802e9f3f4068fcecb6937b867b270d.asciidoc b/docs/doc_examples/c5802e9f3f4068fcecb6937b867b270d.asciidoc new file mode 100644 index 000000000..a55b94c8b --- /dev/null +++ b/docs/doc_examples/c5802e9f3f4068fcecb6937b867b270d.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + genres: { + terms: { + field: "genre", + order: { + _count: "asc", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c580990a70028bb49cca8a6bde86bbf6.asciidoc b/docs/doc_examples/c580990a70028bb49cca8a6bde86bbf6.asciidoc new file mode 100644 index 000000000..9de280733 --- /dev/null +++ b/docs/doc_examples/c580990a70028bb49cca8a6bde86bbf6.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.bulkUpdateApiKeys({ + ids: ["VuaCfGcBCdbkQm-e5aOx", "H3_AhoIBA9hmeQJdg7ij"], + role_descriptors: {}, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c5ba7c4badb5ef5ca32740106e4aa6b6.asciidoc b/docs/doc_examples/c5ba7c4badb5ef5ca32740106e4aa6b6.asciidoc new file mode 100644 index 000000000..18b3c84e7 --- /dev/null +++ b/docs/doc_examples/c5ba7c4badb5ef5ca32740106e4aa6b6.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.termvectors({ + index: "my-index-000001", + id: 1, + fields: "message", +}); +console.log(response); +---- diff --git a/docs/doc_examples/c5bc577ff92f889225b0d2617adcb48c.asciidoc b/docs/doc_examples/c5bc577ff92f889225b0d2617adcb48c.asciidoc new file mode 100644 index 000000000..9144b19a6 --- /dev/null +++ b/docs/doc_examples/c5bc577ff92f889225b0d2617adcb48c.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.stats({ + metric: "process", + filter_path: "**.max_file_descriptors", +}); +console.log(response); +---- diff --git a/docs/doc_examples/c5cc19e48549fbc5327a9d46874bbeee.asciidoc b/docs/doc_examples/c5cc19e48549fbc5327a9d46874bbeee.asciidoc new file mode 100644 index 000000000..fc07f2f81 --- /dev/null +++ b/docs/doc_examples/c5cc19e48549fbc5327a9d46874bbeee.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "quantized-image-index", + knn: { + field: "image-vector", + query_vector: [0.1, -2], + k: 10, + num_candidates: 100, + }, + fields: ["title"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/c5e5873783246c7b1c01d8464fed72c4.asciidoc b/docs/doc_examples/c5e5873783246c7b1c01d8464fed72c4.asciidoc deleted file mode 100644 index 2778add40..000000000 --- a/docs/doc_examples/c5e5873783246c7b1c01d8464fed72c4.asciidoc +++ /dev/null @@ -1,12 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.delete({ - index: 'twitter', - id: '1' -}) -console.log(response) ----- - diff --git a/docs/doc_examples/c5ed7d83ade97a417aef28b9e2871e5d.asciidoc b/docs/doc_examples/c5ed7d83ade97a417aef28b9e2871e5d.asciidoc new file mode 100644 index 000000000..31134e4a0 --- /dev/null +++ b/docs/doc_examples/c5ed7d83ade97a417aef28b9e2871e5d.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-data-stream", + filter_path: "hits.hits._source", +}); +console.log(response); +---- diff --git a/docs/doc_examples/c612d93e7f682a0d731e385edf9f5d56.asciidoc b/docs/doc_examples/c612d93e7f682a0d731e385edf9f5d56.asciidoc deleted file mode 100644 index 7abc351c7..000000000 --- a/docs/doc_examples/c612d93e7f682a0d731e385edf9f5d56.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'my_index', - body: { - mappings: { - properties: { - obj1: { - type: 'nested' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/c6151a0788a10a7f40da684d72c3255c.asciidoc b/docs/doc_examples/c6151a0788a10a7f40da684d72c3255c.asciidoc new file mode 100644 index 000000000..e865fbce5 --- /dev/null +++ b/docs/doc_examples/c6151a0788a10a7f40da684d72c3255c.asciidoc @@ -0,0 +1,55 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "my-index-000001", + refresh: "true", + operations: [ + { + index: {}, + }, + { + title: "Something really urgent", + labels: { + priority: "urgent", + release: ["v1.2.5", "v1.3.0"], + timestamp: { + created: 1541458026, + closed: 1541457010, + }, + }, + }, + { + index: {}, + }, + { + title: "Somewhat less urgent", + labels: { + priority: "high", + release: ["v1.3.0"], + timestamp: { + created: 1541458026, + closed: 1541457010, + }, + }, + }, + { + index: {}, + }, + { + title: "Not urgent", + labels: { + priority: "low", + release: ["v1.2.0"], + timestamp: { + created: 1541458026, + closed: 1541457010, + }, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/c630a1f891aa9aa651f9982b832a42e1.asciidoc b/docs/doc_examples/c630a1f891aa9aa651f9982b832a42e1.asciidoc new file mode 100644 index 000000000..bb7f521f2 --- /dev/null +++ b/docs/doc_examples/c630a1f891aa9aa651f9982b832a42e1.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "my-pipeline", + processors: [ + { + drop: { + description: "Drop documents that contain 'network.name' of 'Guest'", + if: "ctx.network?.name != null && ctx.network.name.contains('Guest')", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/c6339d09f85000a6432304b0ec63b8f6.asciidoc b/docs/doc_examples/c6339d09f85000a6432304b0ec63b8f6.asciidoc new file mode 100644 index 000000000..f24162ad8 --- /dev/null +++ b/docs/doc_examples/c6339d09f85000a6432304b0ec63b8f6.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putComponentTemplate({ + name: "template_1", + template: { + settings: { + number_of_shards: 1, + }, + }, + version: 123, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c639036b87d02fb864e27c4ca29ef833.asciidoc b/docs/doc_examples/c639036b87d02fb864e27c4ca29ef833.asciidoc new file mode 100644 index 000000000..8bd9633cf --- /dev/null +++ b/docs/doc_examples/c639036b87d02fb864e27c4ca29ef833.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "stackoverflow", + size: 0, + query: { + query_string: { + query: "tags:kibana", + }, + }, + runtime_mappings: { + "tags.hash": { + type: "long", + script: "emit(doc['tags'].hashCode())", + }, + }, + aggs: { + my_unbiased_sample: { + diversified_sampler: { + shard_size: 200, + max_docs_per_value: 3, + field: "tags.hash", + }, + aggs: { + keywords: { + significant_terms: { + field: "tags", + exclude: ["kibana"], + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c64b61bedb21b9def8fce5092e677af9.asciidoc b/docs/doc_examples/c64b61bedb21b9def8fce5092e677af9.asciidoc new file mode 100644 index 000000000..4e2637039 --- /dev/null +++ b/docs/doc_examples/c64b61bedb21b9def8fce5092e677af9.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + suggest: { + "my-suggest-1": { + text: "tring out Elasticsearch", + term: { + field: "message", + }, + }, + "my-suggest-2": { + text: "kmichy", + term: { + field: "user.id", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c654b09be981be12fc7be0ba33f8652b.asciidoc b/docs/doc_examples/c654b09be981be12fc7be0ba33f8652b.asciidoc new file mode 100644 index 000000000..ed2a6cc50 --- /dev/null +++ b/docs/doc_examples/c654b09be981be12fc7be0ba33f8652b.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: { + type: "multilinestring", + coordinates: [ + [ + [1002, 200], + [1003, 200], + [1003, 300], + [1002, 300], + ], + [ + [1000, 100], + [1001, 100], + [1001, 100], + [1000, 100], + ], + [ + [1000.2, 100.2], + [1000.8, 100.2], + [1000.8, 100.8], + [1000.2, 100.8], + ], + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c65b00a285f510dcd2865aa3539b4e03.asciidoc b/docs/doc_examples/c65b00a285f510dcd2865aa3539b4e03.asciidoc new file mode 100644 index 000000000..236a07a74 --- /dev/null +++ b/docs/doc_examples/c65b00a285f510dcd2865aa3539b4e03.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.getTransform({ + size: 10, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c66dab0b114fa3e228e1c0e0e5a99b60.asciidoc b/docs/doc_examples/c66dab0b114fa3e228e1c0e0e5a99b60.asciidoc new file mode 100644 index 000000000..4ba09eeaf --- /dev/null +++ b/docs/doc_examples/c66dab0b114fa3e228e1c0e0e5a99b60.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + fields: ["user.first"], + _source: false, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c67b0f00c2e690303c0e5af2f51e0fea.asciidoc b/docs/doc_examples/c67b0f00c2e690303c0e5af2f51e0fea.asciidoc new file mode 100644 index 000000000..3dc3bcdf4 --- /dev/null +++ b/docs/doc_examples/c67b0f00c2e690303c0e5af2f51e0fea.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + match: { + message: "tring out Elasticsearch", + }, + }, + suggest: { + "my-suggestion": { + text: "tring out Elasticsearch", + term: { + field: "message", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c6abe91b5527870face2b826f37ba1da.asciidoc b/docs/doc_examples/c6abe91b5527870face2b826f37ba1da.asciidoc new file mode 100644 index 000000000..926646068 --- /dev/null +++ b/docs/doc_examples/c6abe91b5527870face2b826f37ba1da.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "image-index", + query: { + match: { + title: { + query: "mountain lake", + boost: 0.9, + }, + }, + }, + knn: { + field: "image-vector", + query_vector: [54, 10, -2], + k: 5, + num_candidates: 50, + boost: 0.1, + }, + size: 10, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c6b365c7da97d7e50f36820a7d36f548.asciidoc b/docs/doc_examples/c6b365c7da97d7e50f36820a7d36f548.asciidoc new file mode 100644 index 000000000..cfd0cc222 --- /dev/null +++ b/docs/doc_examples/c6b365c7da97d7e50f36820a7d36f548.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "my_index,my_other_index", + settings: { + "index.number_of_replicas": 1, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c6b5c695a9b757b5e7325345b206bde5.asciidoc b/docs/doc_examples/c6b5c695a9b757b5e7325345b206bde5.asciidoc new file mode 100644 index 000000000..788794cca --- /dev/null +++ b/docs/doc_examples/c6b5c695a9b757b5e7325345b206bde5.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.deletePipeline({ + id: "pipeline-*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/c6b8713bd49661d69d6b868f5b991d17.asciidoc b/docs/doc_examples/c6b8713bd49661d69d6b868f5b991d17.asciidoc new file mode 100644 index 000000000..7bdb10fe5 --- /dev/null +++ b/docs/doc_examples/c6b8713bd49661d69d6b868f5b991d17.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "job-candidates", + id: 1, + refresh: "true", + document: { + name: "Jane Smith", + programming_languages: ["c++", "java"], + required_matches: 2, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c6bdd5c7de79d6d9ac8e33a397b511e8.asciidoc b/docs/doc_examples/c6bdd5c7de79d6d9ac8e33a397b511e8.asciidoc new file mode 100644 index 000000000..51e78e61c --- /dev/null +++ b/docs/doc_examples/c6bdd5c7de79d6d9ac8e33a397b511e8.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + user_id: { + type: "long", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c6d39d22188dc7bbfdad811a94cbcc2b.asciidoc b/docs/doc_examples/c6d39d22188dc7bbfdad811a94cbcc2b.asciidoc new file mode 100644 index 000000000..7f81cd4b5 --- /dev/null +++ b/docs/doc_examples/c6d39d22188dc7bbfdad811a94cbcc2b.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "classic", + text: "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone.", +}); +console.log(response); +---- diff --git a/docs/doc_examples/c6d5e3b6ff9c665ec5344a4bfa7add80.asciidoc b/docs/doc_examples/c6d5e3b6ff9c665ec5344a4bfa7add80.asciidoc new file mode 100644 index 000000000..06f36c1fe --- /dev/null +++ b/docs/doc_examples/c6d5e3b6ff9c665ec5344a4bfa7add80.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "transport.tracer.include": "*", + "transport.tracer.exclude": "internal:coordination/fault_detection/*", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c733f20641b20e124f26198534755d6d.asciidoc b/docs/doc_examples/c733f20641b20e124f26198534755d6d.asciidoc new file mode 100644 index 000000000..4c0759c44 --- /dev/null +++ b/docs/doc_examples/c733f20641b20e124f26198534755d6d.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + aggs: { + "my-first-agg-name": { + terms: { + field: "my-field", + }, + }, + "my-second-agg-name": { + avg: { + field: "my-other-field", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c765ce78f3605c0e70d213f22aac8a53.asciidoc b/docs/doc_examples/c765ce78f3605c0e70d213f22aac8a53.asciidoc new file mode 100644 index 000000000..ec92cc77c --- /dev/null +++ b/docs/doc_examples/c765ce78f3605c0e70d213f22aac8a53.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.autoscaling.putAutoscalingPolicy({ + name: "my_autoscaling_policy", + policy: { + roles: ["data_hot"], + deciders: { + fixed: {}, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c76cb6a080959b0d87afd780cf814be2.asciidoc b/docs/doc_examples/c76cb6a080959b0d87afd780cf814be2.asciidoc new file mode 100644 index 000000000..6226ba7c2 --- /dev/null +++ b/docs/doc_examples/c76cb6a080959b0d87afd780cf814be2.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + bool: { + should: [ + { + term: { + message: "quick", + }, + }, + { + term: { + message: "brown", + }, + }, + { + prefix: { + message: "f", + }, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c793efe7280e9b6e09981c4d4f832348.asciidoc b/docs/doc_examples/c793efe7280e9b6e09981c4d4f832348.asciidoc new file mode 100644 index 000000000..ca162517a --- /dev/null +++ b/docs/doc_examples/c793efe7280e9b6e09981c4d4f832348.asciidoc @@ -0,0 +1,41 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, + }, + }, + mappings: { + properties: { + ip: { + type: "ip", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "idx", + id: 1, + document: { + ip: [ + "192.168.0.1", + "192.168.0.1", + "10.10.12.123", + "2001:db8::1:0:0:1", + "::afff:4567:890a", + ], + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/c79b284fa7a5d7421c6daae62bc697f9.asciidoc b/docs/doc_examples/c79b284fa7a5d7421c6daae62bc697f9.asciidoc new file mode 100644 index 000000000..2cf87b5b5 --- /dev/null +++ b/docs/doc_examples/c79b284fa7a5d7421c6daae62bc697f9.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.delete({ + index: "kibana_sample_data_ecommerce", +}); +console.log(response); +---- diff --git a/docs/doc_examples/c79e8ee86b332302b25c5c1f5f4f89d7.asciidoc b/docs/doc_examples/c79e8ee86b332302b25c5c1f5f4f89d7.asciidoc new file mode 100644 index 000000000..a8aabae5a --- /dev/null +++ b/docs/doc_examples/c79e8ee86b332302b25c5c1f5f4f89d7.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "dept_role", + indices: [ + { + names: ["*"], + privileges: ["read"], + query: { + term: { + department_id: 12, + }, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/c8210f23c10d0642f24c1e43faa4deda.asciidoc b/docs/doc_examples/c8210f23c10d0642f24c1e43faa4deda.asciidoc new file mode 100644 index 000000000..4de7de316 --- /dev/null +++ b/docs/doc_examples/c8210f23c10d0642f24c1e43faa4deda.asciidoc @@ -0,0 +1,41 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putComponentTemplate({ + name: "my-mappings", + template: { + mappings: { + properties: { + "@timestamp": { + type: "date", + format: "date_optional_time||epoch_millis", + }, + message: { + type: "wildcard", + }, + }, + }, + }, + _meta: { + description: "Mappings for @timestamp and message fields", + "my-custom-meta-field": "More arbitrary metadata", + }, +}); +console.log(response); + +const response1 = await client.cluster.putComponentTemplate({ + name: "my-settings", + template: { + settings: { + "index.lifecycle.name": "my-lifecycle-policy", + }, + }, + _meta: { + description: "Settings for ILM", + "my-custom-meta-field": "More arbitrary metadata", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/c849c6c8f8659dbb93e1c14356f74e37.asciidoc b/docs/doc_examples/c849c6c8f8659dbb93e1c14356f74e37.asciidoc deleted file mode 100644 index 3b868e8eb..000000000 --- a/docs/doc_examples/c849c6c8f8659dbb93e1c14356f74e37.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'my_index', - body: { - mappings: { - properties: { - city: { - type: 'text' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/c87038b96ab06d9a741a130f94de4f02.asciidoc b/docs/doc_examples/c87038b96ab06d9a741a130f94de4f02.asciidoc new file mode 100644 index 000000000..b3691d8b5 --- /dev/null +++ b/docs/doc_examples/c87038b96ab06d9a741a130f94de4f02.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.delete({ + index: "my-index-000001", + id: 1, + timeout: "5m", +}); +console.log(response); +---- diff --git a/docs/doc_examples/c873f9cd093e26515148f052e28c7805.asciidoc b/docs/doc_examples/c873f9cd093e26515148f052e28c7805.asciidoc new file mode 100644 index 000000000..56f09eb18 --- /dev/null +++ b/docs/doc_examples/c873f9cd093e26515148f052e28c7805.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.getModelSnapshots({ + job_id: "high_sum_total_sales", + start: 1575402236000, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c8aa8e8c0ac160b8c4efd1ac3b9f48f3.asciidoc b/docs/doc_examples/c8aa8e8c0ac160b8c4efd1ac3b9f48f3.asciidoc new file mode 100644 index 000000000..1bd714230 --- /dev/null +++ b/docs/doc_examples/c8aa8e8c0ac160b8c4efd1ac3b9f48f3.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "amazon-reviews", + mappings: { + properties: { + review_vector: { + type: "dense_vector", + dims: 8, + index: true, + similarity: "cosine", + }, + review_text: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c8bbf362f06a0d8dab33ec0d99743343.asciidoc b/docs/doc_examples/c8bbf362f06a0d8dab33ec0d99743343.asciidoc new file mode 100644 index 000000000..333d095ab --- /dev/null +++ b/docs/doc_examples/c8bbf362f06a0d8dab33ec0d99743343.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "classic", + filter: ["classic"], + text: "The 2 Q.U.I.C.K. Brown-Foxes jumped over the lazy dog's bone.", +}); +console.log(response); +---- diff --git a/docs/doc_examples/c8e2109b19d50467ab83a40006462e9f.asciidoc b/docs/doc_examples/c8e2109b19d50467ab83a40006462e9f.asciidoc new file mode 100644 index 000000000..e83d550e1 --- /dev/null +++ b/docs/doc_examples/c8e2109b19d50467ab83a40006462e9f.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.enrich.executePolicy({ + name: "my-policy", + wait_for_completion: "false", +}); +console.log(response); +---- diff --git a/docs/doc_examples/c92b761c18d8e1c3df75c04a21503e16.asciidoc b/docs/doc_examples/c92b761c18d8e1c3df75c04a21503e16.asciidoc new file mode 100644 index 000000000..ae890cd27 --- /dev/null +++ b/docs/doc_examples/c92b761c18d8e1c3df75c04a21503e16.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putComponentTemplate({ + name: "logs-my_app-settings", + template: { + settings: { + "index.default_pipeline": "logs-my_app-default", + "index.lifecycle.name": "logs", + }, + }, +}); +console.log(response); + +const response1 = await client.indices.putIndexTemplate({ + name: "logs-my_app-template", + index_patterns: ["logs-my_app-*"], + data_stream: {}, + priority: 500, + composed_of: ["logs-my_app-settings", "logs-my_app-mappings"], +}); +console.log(response1); +---- diff --git a/docs/doc_examples/c956bf1f0829a5f0357c0494ed8b6ca3.asciidoc b/docs/doc_examples/c956bf1f0829a5f0357c0494ed8b6ca3.asciidoc new file mode 100644 index 000000000..89b64a70b --- /dev/null +++ b/docs/doc_examples/c956bf1f0829a5f0357c0494ed8b6ca3.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchTemplate({ + index: "my-index", + id: "my-search-template", + params: { + query_string: "hello world", + from: 0, + size: 10, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c95d5317525c2ff625e6971c277247af.asciidoc b/docs/doc_examples/c95d5317525c2ff625e6971c277247af.asciidoc new file mode 100644 index 000000000..655ad864e --- /dev/null +++ b/docs/doc_examples/c95d5317525c2ff625e6971c277247af.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "keyword", + filter: ["lowercase"], + text: "john.SMITH@example.COM", +}); +console.log(response); +---- diff --git a/docs/doc_examples/c96669604d0e66a097ddf3093b025ccd.asciidoc b/docs/doc_examples/c96669604d0e66a097ddf3093b025ccd.asciidoc new file mode 100644 index 000000000..37d2b65d7 --- /dev/null +++ b/docs/doc_examples/c96669604d0e66a097ddf3093b025ccd.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + size: 0, + aggs: { + "my-agg-name": { + terms: { + field: "my-field", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c96e5740b79f703c5b77e3ddc9fdf3a0.asciidoc b/docs/doc_examples/c96e5740b79f703c5b77e3ddc9fdf3a0.asciidoc new file mode 100644 index 000000000..bbc28e291 --- /dev/null +++ b/docs/doc_examples/c96e5740b79f703c5b77e3ddc9fdf3a0.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "my-index-template", + index_patterns: ["my-data-stream*"], + data_stream: {}, + composed_of: ["my-mappings", "my-settings"], + priority: 500, + _meta: { + description: "Template for my time series data", + "my-custom-meta-field": "More arbitrary metadata", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c97fd95ebdcf56cc973582e37f732ed2.asciidoc b/docs/doc_examples/c97fd95ebdcf56cc973582e37f732ed2.asciidoc new file mode 100644 index 000000000..673c7aac1 --- /dev/null +++ b/docs/doc_examples/c97fd95ebdcf56cc973582e37f732ed2.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.enrich.getPolicy(); +console.log(response); +---- diff --git a/docs/doc_examples/c9a6ab0a56bb0177f158277185f68302.asciidoc b/docs/doc_examples/c9a6ab0a56bb0177f158277185f68302.asciidoc new file mode 100644 index 000000000..ad55541b1 --- /dev/null +++ b/docs/doc_examples/c9a6ab0a56bb0177f158277185f68302.asciidoc @@ -0,0 +1,58 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + metrics: { + type: "object", + subobjects: false, + properties: { + time: { + type: "long", + }, + "time.min": { + type: "long", + }, + "time.max": { + type: "long", + }, + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: "metric_1", + document: { + "metrics.time": 100, + "metrics.time.min": 10, + "metrics.time.max": 900, + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "my-index-000001", + id: "metric_2", + document: { + metrics: { + time: 100, + "time.min": 10, + "time.max": 900, + }, + }, +}); +console.log(response2); + +const response3 = await client.indices.getMapping({ + index: "my-index-000001", +}); +console.log(response3); +---- diff --git a/docs/doc_examples/c9afa715021f2e6450e72ac73271960c.asciidoc b/docs/doc_examples/c9afa715021f2e6450e72ac73271960c.asciidoc new file mode 100644 index 000000000..1d51cd87e --- /dev/null +++ b/docs/doc_examples/c9afa715021f2e6450e72ac73271960c.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "parent_example", + id: 1, + document: { + join: { + name: "question", + }, + body: "I have Windows 2003 server and i bought a new Windows 2008 server...", + title: + "Whats the best way to file transfer my site from server to a newer one?", + tags: ["windows-server-2003", "windows-server-2008", "file-transfer"], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c9b6cbe93c8bd23e3f658c3af4e70092.asciidoc b/docs/doc_examples/c9b6cbe93c8bd23e3f658c3af4e70092.asciidoc new file mode 100644 index 000000000..4d40dcc30 --- /dev/null +++ b/docs/doc_examples/c9b6cbe93c8bd23e3f658c3af4e70092.asciidoc @@ -0,0 +1,67 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + autocomplete: { + tokenizer: "autocomplete", + filter: ["lowercase"], + }, + autocomplete_search: { + tokenizer: "lowercase", + }, + }, + tokenizer: { + autocomplete: { + type: "edge_ngram", + min_gram: 2, + max_gram: 10, + token_chars: ["letter"], + }, + }, + }, + }, + mappings: { + properties: { + title: { + type: "text", + analyzer: "autocomplete", + search_analyzer: "autocomplete_search", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + title: "Quick Foxes", + }, +}); +console.log(response1); + +const response2 = await client.indices.refresh({ + index: "my-index-000001", +}); +console.log(response2); + +const response3 = await client.search({ + index: "my-index-000001", + query: { + match: { + title: { + query: "Quick Fo", + operator: "and", + }, + }, + }, +}); +console.log(response3); +---- diff --git a/docs/doc_examples/c9c396b94bb88098477e2b08b55a12ee.asciidoc b/docs/doc_examples/c9c396b94bb88098477e2b08b55a12ee.asciidoc new file mode 100644 index 000000000..819634ad8 --- /dev/null +++ b/docs/doc_examples/c9c396b94bb88098477e2b08b55a12ee.asciidoc @@ -0,0 +1,54 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index", + mappings: { + dynamic_templates: [ + { + geo_point: { + mapping: { + type: "geo_point", + }, + }, + }, + ], + }, +}); +console.log(response); + +const response1 = await client.bulk({ + operations: [ + { + index: { + _index: "my_index", + _id: "1", + dynamic_templates: { + work_location: "geo_point", + }, + }, + }, + { + field: "value1", + work_location: "41.12,-71.34", + raw_location: "41.12,-71.34", + }, + { + create: { + _index: "my_index", + _id: "2", + dynamic_templates: { + home_location: "geo_point", + }, + }, + }, + { + field: "value2", + home_location: "41.12,-71.34", + }, + ], +}); +console.log(response1); +---- diff --git a/docs/doc_examples/c9ce07a7d3d8a317f08535bdd3aa69a3.asciidoc b/docs/doc_examples/c9ce07a7d3d8a317f08535bdd3aa69a3.asciidoc new file mode 100644 index 000000000..d4f5c37d2 --- /dev/null +++ b/docs/doc_examples/c9ce07a7d3d8a317f08535bdd3aa69a3.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.update({ + index: "test", + id: 1, + script: { + source: + "if (ctx._source.tags.contains(params.tag)) { ctx.op = 'delete' } else { ctx.op = 'noop' }", + lang: "painless", + params: { + tag: "green", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/c9d9a1d751f20f6197c825cb4378fe9f.asciidoc b/docs/doc_examples/c9d9a1d751f20f6197c825cb4378fe9f.asciidoc new file mode 100644 index 000000000..52d6e5890 --- /dev/null +++ b/docs/doc_examples/c9d9a1d751f20f6197c825cb4378fe9f.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + terms: { + "user.id": ["kimchy", "elkbee"], + boost: 1, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ca06db2aa4747910278f96315f7be94b.asciidoc b/docs/doc_examples/ca06db2aa4747910278f96315f7be94b.asciidoc new file mode 100644 index 000000000..4b2d56fb3 --- /dev/null +++ b/docs/doc_examples/ca06db2aa4747910278f96315f7be94b.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_locations", + query: { + bool: { + must: { + match_all: {}, + }, + filter: { + geo_bounding_box: { + "pin.location": { + top: 40.73, + left: -74.1, + bottom: 40.01, + right: -71.12, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ca08e511e5907d258081b10a1a9f0072.asciidoc b/docs/doc_examples/ca08e511e5907d258081b10a1a9f0072.asciidoc new file mode 100644 index 000000000..f8cc1818e --- /dev/null +++ b/docs/doc_examples/ca08e511e5907d258081b10a1a9f0072.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "new-data-stream-template", + index_patterns: ["new-data-stream*"], + data_stream: {}, + priority: 500, + template: { + mappings: { + properties: { + "@timestamp": { + type: "date_nanos", + }, + }, + }, + settings: { + "sort.field": ["@timestamp"], + "sort.order": ["desc"], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ca1cc4bcef22fdf9153833bfe6a55294.asciidoc b/docs/doc_examples/ca1cc4bcef22fdf9153833bfe6a55294.asciidoc new file mode 100644 index 000000000..ce21a8a26 --- /dev/null +++ b/docs/doc_examples/ca1cc4bcef22fdf9153833bfe6a55294.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + refresh: "true", + operations: [ + { + index: { + _index: ".ds-my-data-stream-2099.03.08-000003", + _id: "bfspvnIBr7VVZlfp2lqX", + if_seq_no: 0, + if_primary_term: 1, + }, + }, + { + "@timestamp": "2099-03-08T11:06:07.000Z", + user: { + id: "8a4f500d", + }, + message: "Login successful", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/ca3bcd6278510ebced5f74484033cb36.asciidoc b/docs/doc_examples/ca3bcd6278510ebced5f74484033cb36.asciidoc new file mode 100644 index 000000000..79282a804 --- /dev/null +++ b/docs/doc_examples/ca3bcd6278510ebced5f74484033cb36.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.getScriptLanguages(); +console.log(response); +---- diff --git a/docs/doc_examples/ca5ae0eb7709f3807bc6239cd4bd9141.asciidoc b/docs/doc_examples/ca5ae0eb7709f3807bc6239cd4bd9141.asciidoc new file mode 100644 index 000000000..6fa3241c9 --- /dev/null +++ b/docs/doc_examples/ca5ae0eb7709f3807bc6239cd4bd9141.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getApiKey(); +console.log(response); +---- diff --git a/docs/doc_examples/ca5dda98e977125d40a7fe1e178e213f.asciidoc b/docs/doc_examples/ca5dda98e977125d40a7fe1e178e213f.asciidoc new file mode 100644 index 000000000..6b66ad338 --- /dev/null +++ b/docs/doc_examples/ca5dda98e977125d40a7fe1e178e213f.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + query: { + sparse_vector: { + field: "ml.tokens", + inference_id: "my-elser-model", + query: "How is the weather in Jamaica?", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ca98afbd6a90f63e02f62239d225313b.asciidoc b/docs/doc_examples/ca98afbd6a90f63e02f62239d225313b.asciidoc new file mode 100644 index 000000000..9f401ce83 --- /dev/null +++ b/docs/doc_examples/ca98afbd6a90f63e02f62239d225313b.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.danglingIndices.importDanglingIndex({ + index_uuid: "zmM4e0JtBkeUjiHD-MihPQ", + accept_data_loss: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/caaafef1a76c2bec677704c2dc233218.asciidoc b/docs/doc_examples/caaafef1a76c2bec677704c2dc233218.asciidoc new file mode 100644 index 000000000..d19897921 --- /dev/null +++ b/docs/doc_examples/caaafef1a76c2bec677704c2dc233218.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.simulateIndexTemplate({ + name: "my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/caab99520d3fe41f6154d74a7f696057.asciidoc b/docs/doc_examples/caab99520d3fe41f6154d74a7f696057.asciidoc new file mode 100644 index 000000000..d4a98b60b --- /dev/null +++ b/docs/doc_examples/caab99520d3fe41f6154d74a7f696057.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.delete({ + index: "my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/cac74a85c6b352a6e23d8673abae126f.asciidoc b/docs/doc_examples/cac74a85c6b352a6e23d8673abae126f.asciidoc new file mode 100644 index 000000000..3741af149 --- /dev/null +++ b/docs/doc_examples/cac74a85c6b352a6e23d8673abae126f.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.asyncSearch.submit({ + index: "kibana_sample_data_ecommerce", + size: 0, + aggs: { + my_agg: { + frequent_item_sets: { + minimum_set_size: 3, + fields: [ + { + field: "category.keyword", + }, + { + field: "geoip.city_name", + }, + ], + size: 3, + filter: { + term: { + "geoip.continent_name": "Europe", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cafed0e2c2b1d1574eb4a5ecd514a97a.asciidoc b/docs/doc_examples/cafed0e2c2b1d1574eb4a5ecd514a97a.asciidoc new file mode 100644 index 000000000..366ef7e6a --- /dev/null +++ b/docs/doc_examples/cafed0e2c2b1d1574eb4a5ecd514a97a.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.split({ + index: "my-index-000001", + target: "split-my-index-000001", + settings: { + "index.number_of_shards": 2, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cb01106bf524df5e0501d4c655c1aa7b.asciidoc b/docs/doc_examples/cb01106bf524df5e0501d4c655c1aa7b.asciidoc deleted file mode 100644 index ed57b73c3..000000000 --- a/docs/doc_examples/cb01106bf524df5e0501d4c655c1aa7b.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.reindex({ - slices: '5', - refresh: true, - body: { - source: { - index: 'twitter' - }, - dest: { - index: 'new_twitter' - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/cb0c3223fd45148497df73adfba2e9ce.asciidoc b/docs/doc_examples/cb0c3223fd45148497df73adfba2e9ce.asciidoc new file mode 100644 index 000000000..62d570c80 --- /dev/null +++ b/docs/doc_examples/cb0c3223fd45148497df73adfba2e9ce.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + source: { + index: "my-index-000001", + query: { + term: { + "user.id": "kimchy", + }, + }, + }, + dest: { + index: "my-new-index-000001", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cb16f1ff85399ddaa418834be580c9de.asciidoc b/docs/doc_examples/cb16f1ff85399ddaa418834be580c9de.asciidoc new file mode 100644 index 000000000..15951883c --- /dev/null +++ b/docs/doc_examples/cb16f1ff85399ddaa418834be580c9de.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "slm-admin", + cluster: ["manage_slm", "cluster:admin/snapshot/*"], + indices: [ + { + names: [".slm-history-*"], + privileges: ["all"], + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/cb1d2a787bbe88974cfc5f132556a51c.asciidoc b/docs/doc_examples/cb1d2a787bbe88974cfc5f132556a51c.asciidoc new file mode 100644 index 000000000..9d4169eb7 --- /dev/null +++ b/docs/doc_examples/cb1d2a787bbe88974cfc5f132556a51c.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.deleteDataStream({ + name: "*", + expand_wildcards: "all", +}); +console.log(response); +---- diff --git a/docs/doc_examples/cb2f70601cb004b9ece9b0b43a9dc21a.asciidoc b/docs/doc_examples/cb2f70601cb004b9ece9b0b43a9dc21a.asciidoc new file mode 100644 index 000000000..43c87dda3 --- /dev/null +++ b/docs/doc_examples/cb2f70601cb004b9ece9b0b43a9dc21a.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.clearCache({ + index: "my-index-000001,my-index-000002", + request: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/cb3c483816b6ea150ff6c559fa144d32.asciidoc b/docs/doc_examples/cb3c483816b6ea150ff6c559fa144d32.asciidoc new file mode 100644 index 000000000..93090861b --- /dev/null +++ b/docs/doc_examples/cb3c483816b6ea150ff6c559fa144d32.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "timeseries_policy", + policy: { + phases: { + hot: { + actions: { + rollover: { + max_primary_shard_size: "50GB", + max_age: "30d", + }, + }, + }, + delete: { + min_age: "90d", + actions: { + delete: {}, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cb4388b72d41c431ec9ca8255b2f65fb.asciidoc b/docs/doc_examples/cb4388b72d41c431ec9ca8255b2f65fb.asciidoc new file mode 100644 index 000000000..4c0f31631 --- /dev/null +++ b/docs/doc_examples/cb4388b72d41c431ec9ca8255b2f65fb.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "example", + mappings: { + properties: { + geometry: { + type: "shape", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "example", + id: 1, + refresh: "wait_for", + document: { + name: "Lucky Landing", + geometry: { + type: "point", + coordinates: [1355.400544, 5255.530286], + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/cb71332115c92cfb89375abd30b8bbbb.asciidoc b/docs/doc_examples/cb71332115c92cfb89375abd30b8bbbb.asciidoc new file mode 100644 index 000000000..dfa1e1323 --- /dev/null +++ b/docs/doc_examples/cb71332115c92cfb89375abd30b8bbbb.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.master({ + v: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/cb71c6ecfb8b19725c374572444e5d32.asciidoc b/docs/doc_examples/cb71c6ecfb8b19725c374572444e5d32.asciidoc new file mode 100644 index 000000000..ca22d0ffd --- /dev/null +++ b/docs/doc_examples/cb71c6ecfb8b19725c374572444e5d32.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + aggs: { + avg_start: { + avg: { + field: "measures.start", + }, + }, + avg_end: { + avg: { + field: "measures.end", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cba3462a307e2483c14e3e198f6960e3.asciidoc b/docs/doc_examples/cba3462a307e2483c14e3e198f6960e3.asciidoc new file mode 100644 index 000000000..70b810179 --- /dev/null +++ b/docs/doc_examples/cba3462a307e2483c14e3e198f6960e3.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + _meta: { + description: "used for nginx log", + project: { + name: "myProject", + department: "myDepartment", + }, + }, + phases: { + warm: { + min_age: "10d", + actions: { + forcemerge: { + max_num_segments: 1, + }, + }, + }, + delete: { + min_age: "30d", + actions: { + delete: {}, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cbc2b5595890f87165aab1a741b1d22c.asciidoc b/docs/doc_examples/cbc2b5595890f87165aab1a741b1d22c.asciidoc new file mode 100644 index 000000000..f58997869 --- /dev/null +++ b/docs/doc_examples/cbc2b5595890f87165aab1a741b1d22c.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "my-timestamp-pipeline", + description: "Shifts the @timestamp to the last 15 minutes", + processors: [ + { + set: { + field: "ingest_time", + value: "{{_ingest.timestamp}}", + }, + }, + { + script: { + lang: "painless", + source: + '\n def delta = ChronoUnit.SECONDS.between(\n ZonedDateTime.parse("2022-06-21T15:49:00Z"),\n ZonedDateTime.parse(ctx["ingest_time"])\n );\n ctx["@timestamp"] = ZonedDateTime.parse(ctx["@timestamp"]).plus(delta,ChronoUnit.SECONDS).toString();\n ', + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/cbfd6f23f8283e64ec3157c65bb722c4.asciidoc b/docs/doc_examples/cbfd6f23f8283e64ec3157c65bb722c4.asciidoc new file mode 100644 index 000000000..310baf041 --- /dev/null +++ b/docs/doc_examples/cbfd6f23f8283e64ec3157c65bb722c4.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.templates({ + v: "true", + s: "order:desc,index_patterns", +}); +console.log(response); +---- diff --git a/docs/doc_examples/cc0cca5556ec6224c7134c233734beed.asciidoc b/docs/doc_examples/cc0cca5556ec6224c7134c233734beed.asciidoc new file mode 100644 index 000000000..0985386a9 --- /dev/null +++ b/docs/doc_examples/cc0cca5556ec6224c7134c233734beed.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.remoteInfo(); +console.log(response); +---- diff --git a/docs/doc_examples/cc56be758d5d75febbd975786187c861.asciidoc b/docs/doc_examples/cc56be758d5d75febbd975786187c861.asciidoc new file mode 100644 index 000000000..b453c2cd4 --- /dev/null +++ b/docs/doc_examples/cc56be758d5d75febbd975786187c861.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.createServiceToken({ + namespace: "elastic", + service: "fleet-server", +}); +console.log(response); +---- diff --git a/docs/doc_examples/cc5eefcc2102aae7e87b0c87b4af10b8.asciidoc b/docs/doc_examples/cc5eefcc2102aae7e87b0c87b4af10b8.asciidoc new file mode 100644 index 000000000..b9bbc5172 --- /dev/null +++ b/docs/doc_examples/cc5eefcc2102aae7e87b0c87b4af10b8.asciidoc @@ -0,0 +1,44 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "mv", + mappings: { + properties: { + b: { + type: "keyword", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "mv", + refresh: "true", + operations: [ + { + index: {}, + }, + { + a: 1, + b: ["foo", "foo", "bar"], + }, + { + index: {}, + }, + { + a: 2, + b: ["bar", "bar"], + }, + ], +}); +console.log(response1); + +const response2 = await client.esql.query({ + query: "FROM mv | LIMIT 2", +}); +console.log(response2); +---- diff --git a/docs/doc_examples/cc7f1c74ede6810e2c9db19256d6b653.asciidoc b/docs/doc_examples/cc7f1c74ede6810e2c9db19256d6b653.asciidoc new file mode 100644 index 000000000..d193cb365 --- /dev/null +++ b/docs/doc_examples/cc7f1c74ede6810e2c9db19256d6b653.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + query: { + match: { + "http.response": "304", + }, + }, + fields: ["http.response"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/cc90639f2e65bd89cb73296cac6135cf.asciidoc b/docs/doc_examples/cc90639f2e65bd89cb73296cac6135cf.asciidoc new file mode 100644 index 000000000..c5bd1a7ef --- /dev/null +++ b/docs/doc_examples/cc90639f2e65bd89cb73296cac6135cf.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.deleteTrainedModel({ + model_id: "regression-job-one-1574775307356", +}); +console.log(response); +---- diff --git a/docs/doc_examples/cc9dac8db7a1482e2fbe3235197c3de1.asciidoc b/docs/doc_examples/cc9dac8db7a1482e2fbe3235197c3de1.asciidoc new file mode 100644 index 000000000..b90fcd491 --- /dev/null +++ b/docs/doc_examples/cc9dac8db7a1482e2fbe3235197c3de1.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.restore({ + repository: "my_repository", + snapshot: "snapshot_2", + wait_for_completion: "true", + indices: "index_1,index_2", + ignore_unavailable: true, + include_global_state: false, + rename_pattern: "index_(.+)", + rename_replacement: "restored_index_$1", + include_aliases: false, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ccc613951c61f0b17e1ed8a2d3ae54a2.asciidoc b/docs/doc_examples/ccc613951c61f0b17e1ed8a2d3ae54a2.asciidoc new file mode 100644 index 000000000..e1e69507e --- /dev/null +++ b/docs/doc_examples/ccc613951c61f0b17e1ed8a2d3ae54a2.asciidoc @@ -0,0 +1,70 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.simulate.ingest({ + docs: [ + { + _index: "my-index", + _id: "id", + _source: { + foo: "bar", + }, + }, + { + _index: "my-index", + _id: "id", + _source: { + foo: "rab", + }, + }, + ], + pipeline_substitutions: { + "my-pipeline": { + processors: [ + { + set: { + field: "field3", + value: "value3", + }, + }, + ], + }, + }, + component_template_substitutions: { + "my-component-template": { + template: { + mappings: { + dynamic: "true", + properties: { + field3: { + type: "keyword", + }, + }, + }, + settings: { + index: { + default_pipeline: "my-pipeline", + }, + }, + }, + }, + }, + index_template_substitutions: { + "my-index-template": { + index_patterns: ["my-index-*"], + composed_of: ["component_template_1", "component_template_2"], + }, + }, + mapping_addition: { + dynamic: "strict", + properties: { + foo: { + type: "keyword", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ccec66fb20d5ede6c691e0890cfe402a.asciidoc b/docs/doc_examples/ccec66fb20d5ede6c691e0890cfe402a.asciidoc new file mode 100644 index 000000000..a606b3cd3 --- /dev/null +++ b/docs/doc_examples/ccec66fb20d5ede6c691e0890cfe402a.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.deleteJob({ + job_id: "total-requests", + wait_for_completion: "false", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ccf84c1e5e5602a9e841cb8f7e3bb29f.asciidoc b/docs/doc_examples/ccf84c1e5e5602a9e841cb8f7e3bb29f.asciidoc new file mode 100644 index 000000000..1bd50f8c4 --- /dev/null +++ b/docs/doc_examples/ccf84c1e5e5602a9e841cb8f7e3bb29f.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "standard_example", + settings: { + analysis: { + analyzer: { + rebuilt_standard: { + tokenizer: "standard", + filter: ["lowercase"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cd16538654e0f834ff19fe6cf329c398.asciidoc b/docs/doc_examples/cd16538654e0f834ff19fe6cf329c398.asciidoc new file mode 100644 index 000000000..2b2407cee --- /dev/null +++ b/docs/doc_examples/cd16538654e0f834ff19fe6cf329c398.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "hugging-face-embeddings", + mappings: { + properties: { + content_embedding: { + type: "dense_vector", + dims: 768, + element_type: "float", + }, + content: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cd247f267968aa0927bfdad56852f8f5.asciidoc b/docs/doc_examples/cd247f267968aa0927bfdad56852f8f5.asciidoc deleted file mode 100644 index a9e7678d6..000000000 --- a/docs/doc_examples/cd247f267968aa0927bfdad56852f8f5.asciidoc +++ /dev/null @@ -1,18 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'bank', - body: { - query: { - match: { - address: 'mill lane' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/cd373a6eb1ef4748616500b26fab3006.asciidoc b/docs/doc_examples/cd373a6eb1ef4748616500b26fab3006.asciidoc new file mode 100644 index 000000000..68187df30 --- /dev/null +++ b/docs/doc_examples/cd373a6eb1ef4748616500b26fab3006.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.asyncSearch.submit({ + index: "sales*", + size: 0, + sort: [ + { + date: { + order: "asc", + }, + }, + ], + aggs: { + sale_date: { + date_histogram: { + field: "date", + calendar_interval: "1d", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cd38c601ab293a6ec0e2df71d0c96b58.asciidoc b/docs/doc_examples/cd38c601ab293a6ec0e2df71d0c96b58.asciidoc new file mode 100644 index 000000000..26a5401c2 --- /dev/null +++ b/docs/doc_examples/cd38c601ab293a6ec0e2df71d0c96b58.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putComponentTemplate({ + name: "template_with_2_shards", + template: { + settings: { + "index.number_of_shards": 2, + }, + }, +}); +console.log(response); + +const response1 = await client.cluster.putComponentTemplate({ + name: "template_with_3_shards", + template: { + settings: { + "index.number_of_shards": 3, + }, + }, +}); +console.log(response1); + +const response2 = await client.indices.putIndexTemplate({ + name: "template_1", + index_patterns: ["t*"], + composed_of: ["template_with_2_shards", "template_with_3_shards"], +}); +console.log(response2); +---- diff --git a/docs/doc_examples/cd5bc5bf7cd58d7b1492c9c298b345f6.asciidoc b/docs/doc_examples/cd5bc5bf7cd58d7b1492c9c298b345f6.asciidoc deleted file mode 100644 index 7646900a9..000000000 --- a/docs/doc_examples/cd5bc5bf7cd58d7b1492c9c298b345f6.asciidoc +++ /dev/null @@ -1,29 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - aggs: { - actors: { - terms: { - field: 'actors', - size: 10, - collect_mode: 'breadth_first' - }, - aggs: { - costars: { - terms: { - field: 'actors', - size: 5 - } - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/cd67ad2c09fafef2d441c3502d0bb3d7.asciidoc b/docs/doc_examples/cd67ad2c09fafef2d441c3502d0bb3d7.asciidoc new file mode 100644 index 000000000..14d8971a1 --- /dev/null +++ b/docs/doc_examples/cd67ad2c09fafef2d441c3502d0bb3d7.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putDataLifecycle({ + name: "my-data-stream", + data_retention: "7d", +}); +console.log(response); +---- diff --git a/docs/doc_examples/cd6eee201a233b989ac1f2794fa6d640.asciidoc b/docs/doc_examples/cd6eee201a233b989ac1f2794fa6d640.asciidoc new file mode 100644 index 000000000..d6447e49e --- /dev/null +++ b/docs/doc_examples/cd6eee201a233b989ac1f2794fa6d640.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-data-stream", + filter_path: "-hits.events._source", + runtime_mappings: { + day_of_week: { + type: "keyword", + script: "emit(doc['@timestamp'].value.dayOfWeekEnum.toString())", + }, + }, + query: '\n process where process.name == "regsvr32.exe"\n ', + fields: ["@timestamp", "day_of_week"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/cd6fa7f63c93bb04824acd3a7d1f8de3.asciidoc b/docs/doc_examples/cd6fa7f63c93bb04824acd3a7d1f8de3.asciidoc new file mode 100644 index 000000000..0cd77f88f --- /dev/null +++ b/docs/doc_examples/cd6fa7f63c93bb04824acd3a7d1f8de3.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + span_not: { + include: { + span_term: { + field1: "hoya", + }, + }, + exclude: { + span_near: { + clauses: [ + { + span_term: { + field1: "la", + }, + }, + { + span_term: { + field1: "hoya", + }, + }, + ], + slop: 0, + in_order: true, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cd7da0c3769682f546cc1888e569382e.asciidoc b/docs/doc_examples/cd7da0c3769682f546cc1888e569382e.asciidoc new file mode 100644 index 000000000..7505626c3 --- /dev/null +++ b/docs/doc_examples/cd7da0c3769682f546cc1888e569382e.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + match_phrase: { + message: "number 1", + }, + }, + highlight: { + fields: { + message: { + type: "plain", + fragment_size: 15, + number_of_fragments: 3, + fragmenter: "span", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cd8006165ac64f1ef99af48e5a35a25b.asciidoc b/docs/doc_examples/cd8006165ac64f1ef99af48e5a35a25b.asciidoc new file mode 100644 index 000000000..329ea0d48 --- /dev/null +++ b/docs/doc_examples/cd8006165ac64f1ef99af48e5a35a25b.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getPrivileges({ + application: "myapp", + name: "read", +}); +console.log(response); +---- diff --git a/docs/doc_examples/cd93919e13f656ad2e6629f45c579b93.asciidoc b/docs/doc_examples/cd93919e13f656ad2e6629f45c579b93.asciidoc new file mode 100644 index 000000000..85109f792 --- /dev/null +++ b/docs/doc_examples/cd93919e13f656ad2e6629f45c579b93.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.shardStores({ + index: "test", +}); +console.log(response); +---- diff --git a/docs/doc_examples/cda045dfd79acd160ed8668f2ee17ea7.asciidoc b/docs/doc_examples/cda045dfd79acd160ed8668f2ee17ea7.asciidoc new file mode 100644 index 000000000..393db177b --- /dev/null +++ b/docs/doc_examples/cda045dfd79acd160ed8668f2ee17ea7.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + pretty: "true", + query: { + match: { + full_text: "Quick Brown Foxes!", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cdb68b3f565df7c85e52a55864b37d40.asciidoc b/docs/doc_examples/cdb68b3f565df7c85e52a55864b37d40.asciidoc new file mode 100644 index 000000000..75ecf1530 --- /dev/null +++ b/docs/doc_examples/cdb68b3f565df7c85e52a55864b37d40.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-new-index-000001", + mappings: { + properties: { + user_id: { + type: "keyword", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cdb7613b445e6ed6e8b473f9cae1af90.asciidoc b/docs/doc_examples/cdb7613b445e6ed6e8b473f9cae1af90.asciidoc new file mode 100644 index 000000000..ee14809f6 --- /dev/null +++ b/docs/doc_examples/cdb7613b445e6ed6e8b473f9cae1af90.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + intervals: { + my_text: { + all_of: { + ordered: true, + max_gaps: 1, + intervals: [ + { + match: { + query: "my favorite food", + max_gaps: 0, + ordered: true, + }, + }, + { + match: { + query: "cold porridge", + max_gaps: 4, + ordered: true, + }, + }, + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cdc04e6d3d37f036c7045ee4a582ef06.asciidoc b/docs/doc_examples/cdc04e6d3d37f036c7045ee4a582ef06.asciidoc new file mode 100644 index 000000000..db4342fe6 --- /dev/null +++ b/docs/doc_examples/cdc04e6d3d37f036c7045ee4a582ef06.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + dynamic_templates: [ + { + strings_as_keywords: { + match_mapping_type: "string", + mapping: { + type: "text", + norms: false, + fields: { + keyword: { + type: "keyword", + ignore_above: 256, + }, + }, + }, + }, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cdc38c98320a0df705ec8d173c725375.asciidoc b/docs/doc_examples/cdc38c98320a0df705ec8d173c725375.asciidoc new file mode 100644 index 000000000..b17259566 --- /dev/null +++ b/docs/doc_examples/cdc38c98320a0df705ec8d173c725375.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_locations", + size: 0, + aggs: { + grouped: { + geohex_grid: { + field: "location", + precision: 1, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cdce7bc083dfb36e6f1d465a5c9d5049.asciidoc b/docs/doc_examples/cdce7bc083dfb36e6f1d465a5c9d5049.asciidoc new file mode 100644 index 000000000..1eabbfa9a --- /dev/null +++ b/docs/doc_examples/cdce7bc083dfb36e6f1d465a5c9d5049.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "GET", + path: "/_connector/_sync_job", +}); +console.log(response); +---- diff --git a/docs/doc_examples/cdd29b01e730b3996de68a2788050021.asciidoc b/docs/doc_examples/cdd29b01e730b3996de68a2788050021.asciidoc new file mode 100644 index 000000000..f1425e5c3 --- /dev/null +++ b/docs/doc_examples/cdd29b01e730b3996de68a2788050021.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.enrich.deletePolicy({ + name: "my-policy", +}); +console.log(response); +---- diff --git a/docs/doc_examples/cdd7127681254f4d614cc075f9e6fbcf.asciidoc b/docs/doc_examples/cdd7127681254f4d614cc075f9e6fbcf.asciidoc new file mode 100644 index 000000000..8f3de891a --- /dev/null +++ b/docs/doc_examples/cdd7127681254f4d614cc075f9e6fbcf.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.deleteByQuery({ + index: "my-index-000001", + query: { + term: { + "user.id": "kimchy", + }, + }, + max_docs: 1, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cde19d110a58317610033ea3dcb0eb80.asciidoc b/docs/doc_examples/cde19d110a58317610033ea3dcb0eb80.asciidoc new file mode 100644 index 000000000..9d5aacd3a --- /dev/null +++ b/docs/doc_examples/cde19d110a58317610033ea3dcb0eb80.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.renderSearchTemplate({ + source: + '\n {\n "query": {\n "match": {\n {{#query_message}}\n {{#query_string}}\n "message": "Hello {{#first_name_section}}{{first_name}}{{/first_name_section}} {{#last_name_section}}{{last_name}}{{/last_name_section}}"\n {{/query_string}}\n {{/query_message}}\n }\n }\n }\n ', + params: { + query_message: { + query_string: { + first_name_section: { + first_name: "John", + }, + last_name_section: { + last_name: "kimchy", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cde4104a29dfe942d55863cdd8718627.asciidoc b/docs/doc_examples/cde4104a29dfe942d55863cdd8718627.asciidoc new file mode 100644 index 000000000..baab28e94 --- /dev/null +++ b/docs/doc_examples/cde4104a29dfe942d55863cdd8718627.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.slm.getStatus(); +console.log(response); +---- diff --git a/docs/doc_examples/cdedd5f33f7e5f7acde561e97bff61de.asciidoc b/docs/doc_examples/cdedd5f33f7e5f7acde561e97bff61de.asciidoc deleted file mode 100644 index 29285ab2b..000000000 --- a/docs/doc_examples/cdedd5f33f7e5f7acde561e97bff61de.asciidoc +++ /dev/null @@ -1,19 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'my_index', - pretty: true, - body: { - query: { - term: { - full_text: 'Quick Brown Foxes!' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/cdf400299acd1c7b1b7bb42e284e3d08.asciidoc b/docs/doc_examples/cdf400299acd1c7b1b7bb42e284e3d08.asciidoc new file mode 100644 index 000000000..044a77056 --- /dev/null +++ b/docs/doc_examples/cdf400299acd1c7b1b7bb42e284e3d08.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.update({ + index: "test", + id: 1, + script: { + source: "ctx._source.tags.add(params.tag)", + lang: "painless", + params: { + tag: "blue", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cdfd4fef983c1c0fe8d7417f67d01eae.asciidoc b/docs/doc_examples/cdfd4fef983c1c0fe8d7417f67d01eae.asciidoc new file mode 100644 index 000000000..235c96478 --- /dev/null +++ b/docs/doc_examples/cdfd4fef983c1c0fe8d7417f67d01eae.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + settings: { + "index.number_of_replicas": 1, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ce0a1aba713b0448b0c6a504af7b3a08.asciidoc b/docs/doc_examples/ce0a1aba713b0448b0c6a504af7b3a08.asciidoc new file mode 100644 index 000000000..7e7984e2e --- /dev/null +++ b/docs/doc_examples/ce0a1aba713b0448b0c6a504af7b3a08.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.slm.getStats(); +console.log(response); +---- diff --git a/docs/doc_examples/ce0c3d7330727f7673cf68fc9a1cfb86.asciidoc b/docs/doc_examples/ce0c3d7330727f7673cf68fc9a1cfb86.asciidoc new file mode 100644 index 000000000..11032c855 --- /dev/null +++ b/docs/doc_examples/ce0c3d7330727f7673cf68fc9a1cfb86.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.clearCache({ + index: "my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ce247fc08371e1b30cb52195e521c076.asciidoc b/docs/doc_examples/ce247fc08371e1b30cb52195e521c076.asciidoc new file mode 100644 index 000000000..da117c77f --- /dev/null +++ b/docs/doc_examples/ce247fc08371e1b30cb52195e521c076.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_locations", + query: { + bool: { + must: { + match_all: {}, + }, + filter: { + geo_bounding_box: { + "pin.location": { + top_left: [-74.1, 40.73], + bottom_right: [-71.12, 40.01], + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ce2c2e8f5a2e4daf051b6e10122e5aae.asciidoc b/docs/doc_examples/ce2c2e8f5a2e4daf051b6e10122e5aae.asciidoc new file mode 100644 index 000000000..b59f93b68 --- /dev/null +++ b/docs/doc_examples/ce2c2e8f5a2e4daf051b6e10122e5aae.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-index-000001", + properties: { + text_embedding: { + type: "dense_vector", + dims: 384, + index_options: { + type: "int4_hnsw", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ce3c391c2b1915cfc44a2917bca71d19.asciidoc b/docs/doc_examples/ce3c391c2b1915cfc44a2917bca71d19.asciidoc new file mode 100644 index 000000000..b9f5d4012 --- /dev/null +++ b/docs/doc_examples/ce3c391c2b1915cfc44a2917bca71d19.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.putDataFrameAnalytics({ + id: "loganalytics", + description: "Outlier detection on log data", + source: { + index: "logdata", + }, + dest: { + index: "logdata_out", + }, + analysis: { + outlier_detection: { + compute_feature_influence: true, + outlier_fraction: 0.05, + standardization_enabled: true, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ce725697f93b3eebb3a266314568565a.asciidoc b/docs/doc_examples/ce725697f93b3eebb3a266314568565a.asciidoc new file mode 100644 index 000000000..c47955a54 --- /dev/null +++ b/docs/doc_examples/ce725697f93b3eebb3a266314568565a.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "fingerprint_example", + settings: { + analysis: { + analyzer: { + rebuilt_fingerprint: { + tokenizer: "standard", + filter: ["lowercase", "asciifolding", "fingerprint"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ce8471d31e5d60309e142feb040fd2f8.asciidoc b/docs/doc_examples/ce8471d31e5d60309e142feb040fd2f8.asciidoc new file mode 100644 index 000000000..d5a91c659 --- /dev/null +++ b/docs/doc_examples/ce8471d31e5d60309e142feb040fd2f8.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.queryWatches(); +console.log(response); +---- diff --git a/docs/doc_examples/ce899fcf55da72fc32e623d1ad88b301.asciidoc b/docs/doc_examples/ce899fcf55da72fc32e623d1ad88b301.asciidoc new file mode 100644 index 000000000..9a217bfec --- /dev/null +++ b/docs/doc_examples/ce899fcf55da72fc32e623d1ad88b301.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putComponentTemplate({ + name: "logs-foo_component2", + template: { + mappings: { + properties: { + "host.ip": { + type: "ip", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ce8eebfb810335803630abe83278bee7.asciidoc b/docs/doc_examples/ce8eebfb810335803630abe83278bee7.asciidoc new file mode 100644 index 000000000..2ae4c207b --- /dev/null +++ b/docs/doc_examples/ce8eebfb810335803630abe83278bee7.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getApiKey({ + active_only: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/cecfaa659af6646b3b67d7b311586fa0.asciidoc b/docs/doc_examples/cecfaa659af6646b3b67d7b311586fa0.asciidoc new file mode 100644 index 000000000..bae03c0ff --- /dev/null +++ b/docs/doc_examples/cecfaa659af6646b3b67d7b311586fa0.asciidoc @@ -0,0 +1,50 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "attachment", + description: "Extract attachment information from arrays", + processors: [ + { + foreach: { + field: "attachments", + processor: { + attachment: { + target_field: "_ingest._value.attachment", + field: "_ingest._value.data", + remove_binary: true, + }, + }, + }, + }, + ], +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: "my_id", + pipeline: "attachment", + document: { + attachments: [ + { + filename: "ipsum.txt", + data: "dGhpcyBpcwpqdXN0IHNvbWUgdGV4dAo=", + }, + { + filename: "test.txt", + data: "VGhpcyBpcyBhIHRlc3QK", + }, + ], + }, +}); +console.log(response1); + +const response2 = await client.get({ + index: "my-index-000001", + id: "my_id", +}); +console.log(response2); +---- diff --git a/docs/doc_examples/cedb56a71cc743d80263ce352bb21720.asciidoc b/docs/doc_examples/cedb56a71cc743d80263ce352bb21720.asciidoc new file mode 100644 index 000000000..c36f080a5 --- /dev/null +++ b/docs/doc_examples/cedb56a71cc743d80263ce352bb21720.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "sparse_embedding", + inference_id: "my-elser-model", + inference_config: { + service: "elser", + service_settings: { + num_allocations: 1, + num_threads: 1, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cee491dd0a8d10ed0cb11a2faa0c99f0.asciidoc b/docs/doc_examples/cee491dd0a8d10ed0cb11a2faa0c99f0.asciidoc new file mode 100644 index 000000000..6a20452e4 --- /dev/null +++ b/docs/doc_examples/cee491dd0a8d10ed0cb11a2faa0c99f0.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.inferTrainedModel({ + model_id: "model2", + docs: [ + { + text_field: + "The Amazon rainforest covers most of the Amazon basin in South America", + }, + ], + inference_config: { + ner: { + tokenization: { + bert: { + truncate: "first", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cee591c1fc70d4f180c623a3a6d07755.asciidoc b/docs/doc_examples/cee591c1fc70d4f180c623a3a6d07755.asciidoc new file mode 100644 index 000000000..0a7361e50 --- /dev/null +++ b/docs/doc_examples/cee591c1fc70d4f180c623a3a6d07755.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getToken({ + grant_type: "client_credentials", +}); +console.log(response); +---- diff --git a/docs/doc_examples/cf23f18761df33f08bc6f6d1875496fd.asciidoc b/docs/doc_examples/cf23f18761df33f08bc6f6d1875496fd.asciidoc new file mode 100644 index 000000000..35fad32dd --- /dev/null +++ b/docs/doc_examples/cf23f18761df33f08bc6f6d1875496fd.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "my-index-000001", + settings: { + index: { + "routing.allocation.total_shards_per_node": 5, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cf47cd4a39cd62a3ecad919e54a67bca.asciidoc b/docs/doc_examples/cf47cd4a39cd62a3ecad919e54a67bca.asciidoc new file mode 100644 index 000000000..455ef41c7 --- /dev/null +++ b/docs/doc_examples/cf47cd4a39cd62a3ecad919e54a67bca.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + term: { + _ignored: "@timestamp", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cf5dab4334783ca9b8942eab68fb7174.asciidoc b/docs/doc_examples/cf5dab4334783ca9b8942eab68fb7174.asciidoc new file mode 100644 index 000000000..51fa92abb --- /dev/null +++ b/docs/doc_examples/cf5dab4334783ca9b8942eab68fb7174.asciidoc @@ -0,0 +1,45 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "products", + size: 0, + query: { + match: { + name: "led tv", + }, + }, + aggs: { + resellers: { + nested: { + path: "resellers", + }, + aggs: { + filter_reseller: { + filter: { + bool: { + filter: [ + { + term: { + "resellers.reseller": "companyB", + }, + }, + ], + }, + }, + aggs: { + min_price: { + min: { + field: "resellers.price", + }, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cf75a880c749a2f2010a8ec3f348e5c3.asciidoc b/docs/doc_examples/cf75a880c749a2f2010a8ec3f348e5c3.asciidoc new file mode 100644 index 000000000..379cc61c9 --- /dev/null +++ b/docs/doc_examples/cf75a880c749a2f2010a8ec3f348e5c3.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-data-stream", + keep_on_completion: true, + wait_for_completion_timeout: "2s", + query: '\n process where process.name == "cmd.exe"\n ', +}); +console.log(response); +---- diff --git a/docs/doc_examples/cf8ca470156698dbf47fdc822d0a714f.asciidoc b/docs/doc_examples/cf8ca470156698dbf47fdc822d0a714f.asciidoc new file mode 100644 index 000000000..ec92536ff --- /dev/null +++ b/docs/doc_examples/cf8ca470156698dbf47fdc822d0a714f.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "GET", + path: "/_internal/desired_nodes/_latest", +}); +console.log(response); +---- diff --git a/docs/doc_examples/cf9f51d719a2e90ffe36ed6fe56a4a69.asciidoc b/docs/doc_examples/cf9f51d719a2e90ffe36ed6fe56a4a69.asciidoc new file mode 100644 index 000000000..24bc313b4 --- /dev/null +++ b/docs/doc_examples/cf9f51d719a2e90ffe36ed6fe56a4a69.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "remote-replication", + cluster: ["manage_ccr"], + indices: [ + { + names: ["follower-index-name"], + privileges: ["monitor", "read", "write", "manage_follow_index"], + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/cfad3631be0634ee49c424f9ccec62d9.asciidoc b/docs/doc_examples/cfad3631be0634ee49c424f9ccec62d9.asciidoc new file mode 100644 index 000000000..24848abdd --- /dev/null +++ b/docs/doc_examples/cfad3631be0634ee49c424f9ccec62d9.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.invalidateApiKey({ + owner: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/cfbaea6f0df045c5d940bbb6a9c69cd8.asciidoc b/docs/doc_examples/cfbaea6f0df045c5d940bbb6a9c69cd8.asciidoc deleted file mode 100644 index e21d4bca8..000000000 --- a/docs/doc_examples/cfbaea6f0df045c5d940bbb6a9c69cd8.asciidoc +++ /dev/null @@ -1,28 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'bank', - body: { - size: 0, - aggs: { - group_by_state: { - terms: { - field: 'state.keyword' - }, - aggs: { - average_balance: { - avg: { - field: 'balance' - } - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/cfc37446bd892d1ac42a3c8e8b204e6c.asciidoc b/docs/doc_examples/cfc37446bd892d1ac42a3c8e8b204e6c.asciidoc deleted file mode 100644 index b7d494527..000000000 --- a/docs/doc_examples/cfc37446bd892d1ac42a3c8e8b204e6c.asciidoc +++ /dev/null @@ -1,12 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.get({ - index: 'test2', - id: '1' -}) -console.log(response) ----- - diff --git a/docs/doc_examples/cfd4b34f35e531a20739a3b308d57134.asciidoc b/docs/doc_examples/cfd4b34f35e531a20739a3b308d57134.asciidoc new file mode 100644 index 000000000..b7d51f35c --- /dev/null +++ b/docs/doc_examples/cfd4b34f35e531a20739a3b308d57134.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + hot: { + actions: { + rollover: { + max_docs: 100000000, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/cffce059425d3d21e7f9571500d63524.asciidoc b/docs/doc_examples/cffce059425d3d21e7f9571500d63524.asciidoc new file mode 100644 index 000000000..341274e75 --- /dev/null +++ b/docs/doc_examples/cffce059425d3d21e7f9571500d63524.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.deleteRole({ + name: "my_admin_role", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d003ee256d24aa6000bd9dbf1d608dc5.asciidoc b/docs/doc_examples/d003ee256d24aa6000bd9dbf1d608dc5.asciidoc new file mode 100644 index 000000000..7af2430a5 --- /dev/null +++ b/docs/doc_examples/d003ee256d24aa6000bd9dbf1d608dc5.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "elser-v2-test", + processors: [ + { + inference: { + model_id: ".elser_model_2", + input_output: [ + { + input_field: "content", + output_field: "content_embedding", + }, + ], + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/d003f9110e5a474230abe11f36da9297.asciidoc b/docs/doc_examples/d003f9110e5a474230abe11f36da9297.asciidoc new file mode 100644 index 000000000..3684f13dc --- /dev/null +++ b/docs/doc_examples/d003f9110e5a474230abe11f36da9297.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.simulate({ + pipeline: { + description: "Hide my IP", + processors: [ + { + redact: { + field: "message", + patterns: ["%{IP:client}"], + }, + }, + ], + }, + docs: [ + { + _source: { + message: "55.3.244.1 GET /index.html 15824 0.043", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/d01d309b0257d6fbca6d0941adeb3256.asciidoc b/docs/doc_examples/d01d309b0257d6fbca6d0941adeb3256.asciidoc new file mode 100644 index 000000000..597ee3209 --- /dev/null +++ b/docs/doc_examples/d01d309b0257d6fbca6d0941adeb3256.asciidoc @@ -0,0 +1,45 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putComponentTemplate({ + name: "ct1", + template: { + settings: { + "index.number_of_shards": 2, + }, + }, +}); +console.log(response); + +const response1 = await client.cluster.putComponentTemplate({ + name: "ct2", + template: { + settings: { + "index.number_of_replicas": 0, + }, + mappings: { + properties: { + "@timestamp": { + type: "date", + }, + }, + }, + }, +}); +console.log(response1); + +const response2 = await client.indices.putIndexTemplate({ + name: "final-template", + index_patterns: ["my-index-*"], + composed_of: ["ct1", "ct2"], + priority: 5, +}); +console.log(response2); + +const response3 = await client.indices.simulateIndexTemplate({ + name: "my-index-000001", +}); +console.log(response3); +---- diff --git a/docs/doc_examples/d03139a851888db53f8b7affd85eb495.asciidoc b/docs/doc_examples/d03139a851888db53f8b7affd85eb495.asciidoc new file mode 100644 index 000000000..78a5a38bc --- /dev/null +++ b/docs/doc_examples/d03139a851888db53f8b7affd85eb495.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.connector.checkIn({ + connector_id: "my-connector", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d0378fe5e3aad05a2fd2e6e81213374f.asciidoc b/docs/doc_examples/d0378fe5e3aad05a2fd2e6e81213374f.asciidoc new file mode 100644 index 000000000..1b2d7df64 --- /dev/null +++ b/docs/doc_examples/d0378fe5e3aad05a2fd2e6e81213374f.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "bulgarian_example", + settings: { + analysis: { + filter: { + bulgarian_stop: { + type: "stop", + stopwords: "_bulgarian_", + }, + bulgarian_keywords: { + type: "keyword_marker", + keywords: ["пример"], + }, + bulgarian_stemmer: { + type: "stemmer", + language: "bulgarian", + }, + }, + analyzer: { + rebuilt_bulgarian: { + tokenizer: "standard", + filter: [ + "lowercase", + "bulgarian_stop", + "bulgarian_keywords", + "bulgarian_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d03b0e2f0f3f5ac8d53287c445007a89.asciidoc b/docs/doc_examples/d03b0e2f0f3f5ac8d53287c445007a89.asciidoc new file mode 100644 index 000000000..fac12de59 --- /dev/null +++ b/docs/doc_examples/d03b0e2f0f3f5ac8d53287c445007a89.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + default_field: { + type: "text", + }, + boolean_sim_field: { + type: "text", + similarity: "boolean", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d04f0c8c44e8b4fb55f2e7d9d05977e7.asciidoc b/docs/doc_examples/d04f0c8c44e8b4fb55f2e7d9d05977e7.asciidoc new file mode 100644 index 000000000..23de5f5d8 --- /dev/null +++ b/docs/doc_examples/d04f0c8c44e8b4fb55f2e7d9d05977e7.asciidoc @@ -0,0 +1,66 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + operations: [ + { + index: { + _index: "books", + }, + }, + { + name: "Revelation Space", + author: "Alastair Reynolds", + release_date: "2000-03-15", + page_count: 585, + }, + { + index: { + _index: "books", + }, + }, + { + name: "1984", + author: "George Orwell", + release_date: "1985-06-01", + page_count: 328, + }, + { + index: { + _index: "books", + }, + }, + { + name: "Fahrenheit 451", + author: "Ray Bradbury", + release_date: "1953-10-15", + page_count: 227, + }, + { + index: { + _index: "books", + }, + }, + { + name: "Brave New World", + author: "Aldous Huxley", + release_date: "1932-06-01", + page_count: 268, + }, + { + index: { + _index: "books", + }, + }, + { + name: "The Handmaids Tale", + author: "Margaret Atwood", + release_date: "1985-06-01", + page_count: 311, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/d050c6fa7d806457a5f32d30b07e9521.asciidoc b/docs/doc_examples/d050c6fa7d806457a5f32d30b07e9521.asciidoc new file mode 100644 index 000000000..cd4a3d736 --- /dev/null +++ b/docs/doc_examples/d050c6fa7d806457a5f32d30b07e9521.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "my-pipeline", + processors: [ + { + dot_expander: { + description: "Expand 'my-object-field.my-property'", + field: "my-object-field.my-property", + }, + }, + { + set: { + description: "Set 'my-object-field.my-property' to 10", + field: "my-object-field.my-property", + value: 10, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/d0546f047359b85a7e98207dc8de896a.asciidoc b/docs/doc_examples/d0546f047359b85a7e98207dc8de896a.asciidoc new file mode 100644 index 000000000..3375c612b --- /dev/null +++ b/docs/doc_examples/d0546f047359b85a7e98207dc8de896a.asciidoc @@ -0,0 +1,42 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + "index.mapping.coerce": false, + }, + mappings: { + properties: { + number_one: { + type: "integer", + coerce: true, + }, + number_two: { + type: "integer", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + number_one: "10", + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "my-index-000001", + id: 2, + document: { + number_two: "10", + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/d05b2a37106fce0ebbd41e2fd6bd26c2.asciidoc b/docs/doc_examples/d05b2a37106fce0ebbd41e2fd6bd26c2.asciidoc new file mode 100644 index 000000000..e865f4e45 --- /dev/null +++ b/docs/doc_examples/d05b2a37106fce0ebbd41e2fd6bd26c2.asciidoc @@ -0,0 +1,59 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "metrics_index", + mappings: { + properties: { + latency_histo: { + type: "histogram", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "metrics_index", + id: 1, + refresh: "true", + document: { + "network.name": "net-1", + latency_histo: { + values: [0.1, 0.2, 0.3, 0.4, 0.5], + counts: [3, 7, 23, 12, 6], + }, + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "metrics_index", + id: 2, + refresh: "true", + document: { + "network.name": "net-2", + latency_histo: { + values: [0.1, 0.2, 0.3, 0.4, 0.5], + counts: [8, 17, 8, 7, 6], + }, + }, +}); +console.log(response2); + +const response3 = await client.search({ + index: "metrics_index", + size: 0, + filter_path: "aggregations", + aggs: { + min_latency: { + min: { + field: "latency_histo", + }, + }, + }, +}); +console.log(response3); +---- diff --git a/docs/doc_examples/d06a649bc38aa9a6433b64efa78d8cb5.asciidoc b/docs/doc_examples/d06a649bc38aa9a6433b64efa78d8cb5.asciidoc new file mode 100644 index 000000000..380d5aa17 --- /dev/null +++ b/docs/doc_examples/d06a649bc38aa9a6433b64efa78d8cb5.asciidoc @@ -0,0 +1,68 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "my-index", + refresh: "true", + operations: [ + { + index: {}, + }, + { + timestamp: "2020-04-30T14:30:17-05:00", + message: + '40.135.0.0 - - [30/Apr/2020:14:30:17 -0500] "GET /images/hm_bg.jpg HTTP/1.0" 200 24736', + }, + { + index: {}, + }, + { + timestamp: "2020-04-30T14:30:53-05:00", + message: + '232.0.0.0 - - [30/Apr/2020:14:30:53 -0500] "GET /images/hm_bg.jpg HTTP/1.0" 200 24736', + }, + { + index: {}, + }, + { + timestamp: "2020-04-30T14:31:12-05:00", + message: + '26.1.0.0 - - [30/Apr/2020:14:31:12 -0500] "GET /images/hm_bg.jpg HTTP/1.0" 200 24736', + }, + { + index: {}, + }, + { + timestamp: "2020-04-30T14:31:19-05:00", + message: + '247.37.0.0 - - [30/Apr/2020:14:31:19 -0500] "GET /french/splash_inet.html HTTP/1.0" 200 3781', + }, + { + index: {}, + }, + { + timestamp: "2020-04-30T14:31:22-05:00", + message: + '247.37.0.0 - - [30/Apr/2020:14:31:22 -0500] "GET /images/hm_nbg.jpg HTTP/1.0" 304 0', + }, + { + index: {}, + }, + { + timestamp: "2020-04-30T14:31:27-05:00", + message: + '252.0.0.0 - - [30/Apr/2020:14:31:27 -0500] "GET /images/hm_bg.jpg HTTP/1.0" 200 24736', + }, + { + index: {}, + }, + { + timestamp: "2020-04-30T14:31:28-05:00", + message: "not a valid apache log", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/d095b422d9803c02b62c01adffc85376.asciidoc b/docs/doc_examples/d095b422d9803c02b62c01adffc85376.asciidoc new file mode 100644 index 000000000..5177e0ab0 --- /dev/null +++ b/docs/doc_examples/d095b422d9803c02b62c01adffc85376.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.rollup.getJobs({ + id: "sensor", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d0a8a938a2fa913b6fdbc871079a59dd.asciidoc b/docs/doc_examples/d0a8a938a2fa913b6fdbc871079a59dd.asciidoc deleted file mode 100644 index 62a4d9455..000000000 --- a/docs/doc_examples/d0a8a938a2fa913b6fdbc871079a59dd.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - term: { - user: { - value: 'Kimchy', - boost: 1 - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/d0dee031197214b59ff9ac7540527d2c.asciidoc b/docs/doc_examples/d0dee031197214b59ff9ac7540527d2c.asciidoc new file mode 100644 index 000000000..de8e1dc6e --- /dev/null +++ b/docs/doc_examples/d0dee031197214b59ff9ac7540527d2c.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_date_histo: { + date_histogram: { + field: "date", + calendar_interval: "1M", + }, + aggs: { + the_sum: { + sum: { + field: "price", + }, + }, + the_movfn: { + moving_fn: { + buckets_path: "the_sum", + window: 10, + script: "MovingFunctions.unweightedAvg(values)", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d0fad375f6e074e9067ed93d3faa07bd.asciidoc b/docs/doc_examples/d0fad375f6e074e9067ed93d3faa07bd.asciidoc new file mode 100644 index 000000000..309e1a7ba --- /dev/null +++ b/docs/doc_examples/d0fad375f6e074e9067ed93d3faa07bd.asciidoc @@ -0,0 +1,103 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "museums", + mappings: { + properties: { + location: { + type: "point", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "museums", + refresh: "true", + operations: [ + { + index: { + _id: 1, + }, + }, + { + location: "POINT (491.2350 5237.4081)", + city: "Amsterdam", + name: "NEMO Science Museum", + }, + { + index: { + _id: 2, + }, + }, + { + location: "POINT (490.1618 5236.9219)", + city: "Amsterdam", + name: "Museum Het Rembrandthuis", + }, + { + index: { + _id: 3, + }, + }, + { + location: "POINT (491.4722 5237.1667)", + city: "Amsterdam", + name: "Nederlands Scheepvaartmuseum", + }, + { + index: { + _id: 4, + }, + }, + { + location: "POINT (440.5200 5122.2900)", + city: "Antwerp", + name: "Letterenhuis", + }, + { + index: { + _id: 5, + }, + }, + { + location: "POINT (233.6389 4886.1111)", + city: "Paris", + name: "Musée du Louvre", + }, + { + index: { + _id: 6, + }, + }, + { + location: "POINT (232.7000 4886.0000)", + city: "Paris", + name: "Musée d'Orsay", + }, + ], +}); +console.log(response1); + +const response2 = await client.search({ + index: "museums", + size: 0, + query: { + match: { + name: "musée", + }, + }, + aggs: { + viewport: { + cartesian_bounds: { + field: "location", + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/d0fde00ef381e61b8a9e99f18cb5970a.asciidoc b/docs/doc_examples/d0fde00ef381e61b8a9e99f18cb5970a.asciidoc new file mode 100644 index 000000000..1ad80af1d --- /dev/null +++ b/docs/doc_examples/d0fde00ef381e61b8a9e99f18cb5970a.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + simple_query_string: { + query: "foo | bar + baz*", + flags: "OR|AND|PREFIX", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d11ea753a5d86f7e630fd69a069948b1.asciidoc b/docs/doc_examples/d11ea753a5d86f7e630fd69a069948b1.asciidoc new file mode 100644 index 000000000..8c3fb1686 --- /dev/null +++ b/docs/doc_examples/d11ea753a5d86f7e630fd69a069948b1.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.query({ + format: "json", + query: "SELECT * FROM library ORDER BY page_count DESC", + fetch_size: 5, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d1299b9ae1e621d2fdd0b8644c142ace.asciidoc b/docs/doc_examples/d1299b9ae1e621d2fdd0b8644c142ace.asciidoc new file mode 100644 index 000000000..cb85bad2c --- /dev/null +++ b/docs/doc_examples/d1299b9ae1e621d2fdd0b8644c142ace.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "log-messages", + filter_path: "aggregations", + aggs: { + daily: { + date_histogram: { + field: "time", + fixed_interval: "1d", + }, + aggs: { + categories: { + categorize_text: { + field: "message", + categorization_filters: ["\\w+\\_\\d{3}"], + }, + aggs: { + hit: { + top_hits: { + size: 1, + sort: ["time"], + _source: "message", + }, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d12df43ffcdcd937bae9b26fb475e239.asciidoc b/docs/doc_examples/d12df43ffcdcd937bae9b26fb475e239.asciidoc new file mode 100644 index 000000000..bf51e32ce --- /dev/null +++ b/docs/doc_examples/d12df43ffcdcd937bae9b26fb475e239.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "uax_url_email", + text: "Email me at john.smith@global-international.com", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d133b5d82238f7d4778c341cbe0bc969.asciidoc b/docs/doc_examples/d133b5d82238f7d4778c341cbe0bc969.asciidoc new file mode 100644 index 000000000..a42b122e1 --- /dev/null +++ b/docs/doc_examples/d133b5d82238f7d4778c341cbe0bc969.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.mtermvectors({ + docs: [ + { + _index: "my-index-000001", + doc: { + message: "test test test", + }, + }, + { + _index: "my-index-000001", + doc: { + message: "Another test ...", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/d13c7cdfc976e0c7b70737cd6a7becb8.asciidoc b/docs/doc_examples/d13c7cdfc976e0c7b70737cd6a7becb8.asciidoc new file mode 100644 index 000000000..85d920867 --- /dev/null +++ b/docs/doc_examples/d13c7cdfc976e0c7b70737cd6a7becb8.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + runtime_mappings: { + "price.adjusted": { + type: "double", + script: { + source: "emit(doc['price'].value * params.adjustment)", + params: { + adjustment: 0.9, + }, + }, + }, + }, + aggs: { + by_date: { + date_histogram: { + field: "date", + calendar_interval: "month", + }, + aggs: { + avg_price: { + rate: { + field: "price.adjusted", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d14fe5838fc02224f4b5ade2626d6026.asciidoc b/docs/doc_examples/d14fe5838fc02224f4b5ade2626d6026.asciidoc new file mode 100644 index 000000000..9eafbf38d --- /dev/null +++ b/docs/doc_examples/d14fe5838fc02224f4b5ade2626d6026.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.explainLifecycle({ + index: "my-index-000001", + human: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d17269bb80fb63ec0bf37d219e003dcb.asciidoc b/docs/doc_examples/d17269bb80fb63ec0bf37d219e003dcb.asciidoc deleted file mode 100644 index b4973123c..000000000 --- a/docs/doc_examples/d17269bb80fb63ec0bf37d219e003dcb.asciidoc +++ /dev/null @@ -1,32 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - sort: [ - { - _geo_distance: { - 'pin.location': [ - -70, - 40 - ], - order: 'asc', - unit: 'km', - mode: 'min', - distance_type: 'arc', - ignore_unmapped: true - } - } - ], - query: { - term: { - user: 'kimchy' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/d1b3b7d2bb2ab90d15fd10318abd24db.asciidoc b/docs/doc_examples/d1b3b7d2bb2ab90d15fd10318abd24db.asciidoc deleted file mode 100644 index c08921115..000000000 --- a/docs/doc_examples/d1b3b7d2bb2ab90d15fd10318abd24db.asciidoc +++ /dev/null @@ -1,29 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'my_index', - body: { - mappings: { - properties: { - post_date: { - type: 'date' - }, - user: { - type: 'keyword' - }, - name: { - type: 'keyword' - }, - age: { - type: 'integer' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/d1b53bc9794e8609bd6f2245624bf977.asciidoc b/docs/doc_examples/d1b53bc9794e8609bd6f2245624bf977.asciidoc new file mode 100644 index 000000000..8564b2dd5 --- /dev/null +++ b/docs/doc_examples/d1b53bc9794e8609bd6f2245624bf977.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.estimateModelMemory({ + analysis_config: { + bucket_span: "5m", + detectors: [ + { + function: "sum", + field_name: "bytes", + by_field_name: "status", + partition_field_name: "app", + }, + ], + influencers: ["source_ip", "dest_ip"], + }, + overall_cardinality: { + status: 10, + app: 50, + }, + max_bucket_cardinality: { + source_ip: 300, + dest_ip: 30, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d1bcf2eb63a462bfdcf01a68e68d5b4a.asciidoc b/docs/doc_examples/d1bcf2eb63a462bfdcf01a68e68d5b4a.asciidoc deleted file mode 100644 index 279780d17..000000000 --- a/docs/doc_examples/d1bcf2eb63a462bfdcf01a68e68d5b4a.asciidoc +++ /dev/null @@ -1,23 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'my_index', - pretty: true, - body: { - query: { - terms: { - color: { - index: 'my_index', - id: '2', - path: 'color' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/d1ce66957f8bd84bf01c4bfaee3ba0c3.asciidoc b/docs/doc_examples/d1ce66957f8bd84bf01c4bfaee3ba0c3.asciidoc new file mode 100644 index 000000000..ea46327a9 --- /dev/null +++ b/docs/doc_examples/d1ce66957f8bd84bf01c4bfaee3ba0c3.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-data-stream", + filter_path: "hits.events._source.@timestamp,hits.events._source.process.pid", + query: '\n process where process.name == "regsvr32.exe"\n ', +}); +console.log(response); +---- diff --git a/docs/doc_examples/d1d8b6e642db1a7c70dbbf0fe6d8e92d.asciidoc b/docs/doc_examples/d1d8b6e642db1a7c70dbbf0fe6d8e92d.asciidoc new file mode 100644 index 000000000..93df61ff0 --- /dev/null +++ b/docs/doc_examples/d1d8b6e642db1a7c70dbbf0fe6d8e92d.asciidoc @@ -0,0 +1,52 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + retriever: { + rrf: { + retrievers: [ + { + standard: { + query: { + multi_match: { + query: "How is the weather in Jamaica?", + fields: ["title", "description"], + }, + }, + }, + }, + { + standard: { + query: { + sparse_vector: { + field: "ml.inference.title_expanded.predicted_value", + inference_id: "my-elser-model", + query: "How is the weather in Jamaica?", + boost: 1, + }, + }, + }, + }, + { + standard: { + query: { + sparse_vector: { + field: "ml.inference.description_expanded.predicted_value", + inference_id: "my-elser-model", + query: "How is the weather in Jamaica?", + boost: 1, + }, + }, + }, + }, + ], + window_size: 10, + rank_constant: 20, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d1e0fee64389e7c8d4c092030626b61f.asciidoc b/docs/doc_examples/d1e0fee64389e7c8d4c092030626b61f.asciidoc new file mode 100644 index 000000000..e19f93f40 --- /dev/null +++ b/docs/doc_examples/d1e0fee64389e7c8d4c092030626b61f.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getApiKey({ + name: "my-*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d1ea13e1e8372cbf1480a414723ff55a.asciidoc b/docs/doc_examples/d1ea13e1e8372cbf1480a414723ff55a.asciidoc new file mode 100644 index 000000000..8f098647f --- /dev/null +++ b/docs/doc_examples/d1ea13e1e8372cbf1480a414723ff55a.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.createApiKey({ + name: "connector_name-connector-api-key", + role_descriptors: { + "connector_name-connector-role": { + cluster: ["monitor", "manage_connector"], + indices: [ + { + names: [ + "index_name", + ".search-acl-filter-index_name", + ".elastic-connectors*", + ], + privileges: ["all"], + allow_restricted_indices: false, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d1ecce3632ae338b5e329b0e5ff3bed7.asciidoc b/docs/doc_examples/d1ecce3632ae338b5e329b0e5ff3bed7.asciidoc new file mode 100644 index 000000000..1d5b2b25e --- /dev/null +++ b/docs/doc_examples/d1ecce3632ae338b5e329b0e5ff3bed7.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + my_join_field: { + type: "join", + relations: { + question: "answer", + }, + eager_global_ordinals: false, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d1fde25de1980b7e84fa878289fd0bcb.asciidoc b/docs/doc_examples/d1fde25de1980b7e84fa878289fd0bcb.asciidoc new file mode 100644 index 000000000..623cf32f0 --- /dev/null +++ b/docs/doc_examples/d1fde25de1980b7e84fa878289fd0bcb.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + size: 0, + q: "extra:test", + filter_path: "hits.total", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d2153f3100bf12c2de98f14eb86ab061.asciidoc b/docs/doc_examples/d2153f3100bf12c2de98f14eb86ab061.asciidoc deleted file mode 100644 index 8d4335ffa..000000000 --- a/docs/doc_examples/d2153f3100bf12c2de98f14eb86ab061.asciidoc +++ /dev/null @@ -1,11 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'twitter' -}) -console.log(response) ----- - diff --git a/docs/doc_examples/d222c6a6ec7a3beca6c97011b0874512.asciidoc b/docs/doc_examples/d222c6a6ec7a3beca6c97011b0874512.asciidoc deleted file mode 100644 index d5a16f507..000000000 --- a/docs/doc_examples/d222c6a6ec7a3beca6c97011b0874512.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.getSource({ - index: 'twitter', - id: '1', - _source_includes: '*.id', - _source_excludes: 'entities' -}) -console.log(response) ----- - diff --git a/docs/doc_examples/d23452f333b77bf5b463310e2a665560.asciidoc b/docs/doc_examples/d23452f333b77bf5b463310e2a665560.asciidoc new file mode 100644 index 000000000..711ada3a1 --- /dev/null +++ b/docs/doc_examples/d23452f333b77bf5b463310e2a665560.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "my_director", + refresh: "true", + cluster: ["manage"], + indices: [ + { + names: ["index1", "index2"], + privileges: ["manage"], + }, + ], + run_as: ["jacknich", "rdeniro"], + metadata: { + version: 1, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d260225cf97e068ead2a8a6bb5aefd90.asciidoc b/docs/doc_examples/d260225cf97e068ead2a8a6bb5aefd90.asciidoc new file mode 100644 index 000000000..9a5f60d47 --- /dev/null +++ b/docs/doc_examples/d260225cf97e068ead2a8a6bb5aefd90.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "russian_example", + settings: { + analysis: { + filter: { + russian_stop: { + type: "stop", + stopwords: "_russian_", + }, + russian_keywords: { + type: "keyword_marker", + keywords: ["пример"], + }, + russian_stemmer: { + type: "stemmer", + language: "russian", + }, + }, + analyzer: { + rebuilt_russian: { + tokenizer: "standard", + filter: [ + "lowercase", + "russian_stop", + "russian_keywords", + "russian_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d268aec16bb1eb909b634e856175094c.asciidoc b/docs/doc_examples/d268aec16bb1eb909b634e856175094c.asciidoc new file mode 100644 index 000000000..1e4498566 --- /dev/null +++ b/docs/doc_examples/d268aec16bb1eb909b634e856175094c.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_stop_analyzer: { + type: "stop", + stopwords: ["the", "over"], + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.analyze({ + index: "my-index-000001", + analyzer: "my_stop_analyzer", + text: "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone.", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/d27591881da6f5767523b1beb233adc7.asciidoc b/docs/doc_examples/d27591881da6f5767523b1beb233adc7.asciidoc new file mode 100644 index 000000000..907c0a556 --- /dev/null +++ b/docs/doc_examples/d27591881da6f5767523b1beb233adc7.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.createRepository({ + name: "my_backup", + repository: { + type: "azure", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d2e7dead222cfbebbd2c21a7cc1893b4.asciidoc b/docs/doc_examples/d2e7dead222cfbebbd2c21a7cc1893b4.asciidoc new file mode 100644 index 000000000..ff0c652e4 --- /dev/null +++ b/docs/doc_examples/d2e7dead222cfbebbd2c21a7cc1893b4.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.state({ + metric: "metadata", + filter_path: "metadata.indices.*.system", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d2f52c106685bd8eab47e11d644d7a70.asciidoc b/docs/doc_examples/d2f52c106685bd8eab47e11d644d7a70.asciidoc new file mode 100644 index 000000000..8ebe00905 --- /dev/null +++ b/docs/doc_examples/d2f52c106685bd8eab47e11d644d7a70.asciidoc @@ -0,0 +1,52 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + date: { + type: "date", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + date: "2015-01-01", + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "my-index-000001", + id: 2, + document: { + date: "2015-01-01T12:10:30Z", + }, +}); +console.log(response2); + +const response3 = await client.index({ + index: "my-index-000001", + id: 3, + document: { + date: 1420070400001, + }, +}); +console.log(response3); + +const response4 = await client.search({ + index: "my-index-000001", + sort: { + date: "asc", + }, +}); +console.log(response4); +---- diff --git a/docs/doc_examples/d2f6040c058a9555dfa62bb42d896a8f.asciidoc b/docs/doc_examples/d2f6040c058a9555dfa62bb42d896a8f.asciidoc new file mode 100644 index 000000000..c9196570f --- /dev/null +++ b/docs/doc_examples/d2f6040c058a9555dfa62bb42d896a8f.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_queries1", + query: { + percolate: { + field: "query", + document: { + my_field: "abcd", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d2f6fb271e97fde8685d7744e6718cc7.asciidoc b/docs/doc_examples/d2f6fb271e97fde8685d7744e6718cc7.asciidoc new file mode 100644 index 000000000..93e7ac887 --- /dev/null +++ b/docs/doc_examples/d2f6fb271e97fde8685d7744e6718cc7.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "text_payloads", + id: 1, + document: { + text: "the|0 brown|3 fox|4 is|0 quick|10", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d3016e4e8025362ad9a05ee86bb2061f.asciidoc b/docs/doc_examples/d3016e4e8025362ad9a05ee86bb2061f.asciidoc deleted file mode 100644 index 17fdfe181..000000000 --- a/docs/doc_examples/d3016e4e8025362ad9a05ee86bb2061f.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.updateAliases({ - body: { - actions: [ - { - add: { - index: 'twitter', - alias: 'alias1' - } - } - ] - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/d305110a8cabfbebd1e38d85559d1023.asciidoc b/docs/doc_examples/d305110a8cabfbebd1e38d85559d1023.asciidoc new file mode 100644 index 000000000..42e37aee7 --- /dev/null +++ b/docs/doc_examples/d305110a8cabfbebd1e38d85559d1023.asciidoc @@ -0,0 +1,62 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "cjk_example", + settings: { + analysis: { + filter: { + english_stop: { + type: "stop", + stopwords: [ + "a", + "and", + "are", + "as", + "at", + "be", + "but", + "by", + "for", + "if", + "in", + "into", + "is", + "it", + "no", + "not", + "of", + "on", + "or", + "s", + "such", + "t", + "that", + "the", + "their", + "then", + "there", + "these", + "they", + "this", + "to", + "was", + "will", + "with", + "www", + ], + }, + }, + analyzer: { + rebuilt_cjk: { + tokenizer: "standard", + filter: ["cjk_width", "lowercase", "cjk_bigram", "english_stop"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d3088d5fa59b3ab110f64fb4f9b0065c.asciidoc b/docs/doc_examples/d3088d5fa59b3ab110f64fb4f9b0065c.asciidoc deleted file mode 100644 index aed022fbe..000000000 --- a/docs/doc_examples/d3088d5fa59b3ab110f64fb4f9b0065c.asciidoc +++ /dev/null @@ -1,18 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.index({ - index: 'my_index', - id: '1', - body: { - color: [ - 'blue', - 'green' - ] - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/d31062ff8c015387889fed4ad86fd914.asciidoc b/docs/doc_examples/d31062ff8c015387889fed4ad86fd914.asciidoc deleted file mode 100644 index a4c4076b7..000000000 --- a/docs/doc_examples/d31062ff8c015387889fed4ad86fd914.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - wildcard: { - user: { - value: 'ki*y', - boost: 1, - rewrite: 'constant_score' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/d3263afc69b6f969b9bbd8738cd07b97.asciidoc b/docs/doc_examples/d3263afc69b6f969b9bbd8738cd07b97.asciidoc new file mode 100644 index 000000000..0fc058e58 --- /dev/null +++ b/docs/doc_examples/d3263afc69b6f969b9bbd8738cd07b97.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.pauseFollow({ + index: "follower_index", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d3440ec81dde5f1a01c0206cb35e539c.asciidoc b/docs/doc_examples/d3440ec81dde5f1a01c0206cb35e539c.asciidoc new file mode 100644 index 000000000..8a5eced01 --- /dev/null +++ b/docs/doc_examples/d3440ec81dde5f1a01c0206cb35e539c.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + wait_for_completion: "false", + source: { + index: "test-data", + size: 50, + }, + dest: { + index: "azure-openai-embeddings", + pipeline: "azure_openai_embeddings_pipeline", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d34946f59b6f938b141a37cb0b729308.asciidoc b/docs/doc_examples/d34946f59b6f938b141a37cb0b729308.asciidoc new file mode 100644 index 000000000..522b79829 --- /dev/null +++ b/docs/doc_examples/d34946f59b6f938b141a37cb0b729308.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.enrich.putPolicy({ + name: "postal_policy", + geo_match: { + indices: "postal_codes", + match_field: "location", + enrich_fields: ["location", "postal_code"], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d35a4d78a8b70c9e4d636efb0a92be9d.asciidoc b/docs/doc_examples/d35a4d78a8b70c9e4d636efb0a92be9d.asciidoc new file mode 100644 index 000000000..80591c90a --- /dev/null +++ b/docs/doc_examples/d35a4d78a8b70c9e4d636efb0a92be9d.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "products", + aggs: { + genres_and_products: { + multi_terms: { + terms: [ + { + field: "genre", + }, + { + field: "product", + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d35c8cf7a98b3f112e1de8797ec6689d.asciidoc b/docs/doc_examples/d35c8cf7a98b3f112e1de8797ec6689d.asciidoc new file mode 100644 index 000000000..644390ba0 --- /dev/null +++ b/docs/doc_examples/d35c8cf7a98b3f112e1de8797ec6689d.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.oidcPrepareAuthentication({ + iss: "http://127.0.0.1:8080/", + login_hint: "this_is_an_opaque_string", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d3672a87a857ddb87519788236e57497.asciidoc b/docs/doc_examples/d3672a87a857ddb87519788236e57497.asciidoc new file mode 100644 index 000000000..dad59f975 --- /dev/null +++ b/docs/doc_examples/d3672a87a857ddb87519788236e57497.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "jinaai-index", + retriever: { + text_similarity_reranker: { + retriever: { + standard: { + query: { + semantic: { + field: "content", + query: "who inspired taking care of the sea?", + }, + }, + }, + }, + field: "content", + rank_window_size: 100, + inference_id: "jinaai-rerank", + inference_text: "who inspired taking care of the sea?", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d37b065a94b3ff65a2a8a204fc3b097c.asciidoc b/docs/doc_examples/d37b065a94b3ff65a2a8a204fc3b097c.asciidoc new file mode 100644 index 000000000..4e26625de --- /dev/null +++ b/docs/doc_examples/d37b065a94b3ff65a2a8a204fc3b097c.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.getStatus({ + id: "FmNJRUZ1YWZCU3dHY1BIOUhaenVSRkEaaXFlZ3h4c1RTWFNocDdnY2FSaERnUTozNDE=", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d37b0bda2bd24ab310e6b26708c7c6fb.asciidoc b/docs/doc_examples/d37b0bda2bd24ab310e6b26708c7c6fb.asciidoc new file mode 100644 index 000000000..6967389d1 --- /dev/null +++ b/docs/doc_examples/d37b0bda2bd24ab310e6b26708c7c6fb.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_date_histo: { + date_histogram: { + field: "date", + calendar_interval: "1M", + }, + aggs: { + the_sum: { + sum: { + field: "price", + }, + }, + the_movavg: { + moving_fn: { + buckets_path: "the_sum", + window: 10, + script: "return values.length > 0 ? values[0] : Double.NaN", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d3a0f648d0fd50b54a4e9ebe363c5047.asciidoc b/docs/doc_examples/d3a0f648d0fd50b54a4e9ebe363c5047.asciidoc new file mode 100644 index 000000000..46940cf06 --- /dev/null +++ b/docs/doc_examples/d3a0f648d0fd50b54a4e9ebe363c5047.asciidoc @@ -0,0 +1,44 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "retrievers_example", + retriever: { + linear: { + retrievers: [ + { + retriever: { + standard: { + query: { + query_string: { + query: "(information retrieval) OR (artificial intelligence)", + default_field: "text", + }, + }, + }, + }, + weight: 2, + normalizer: "minmax", + }, + { + retriever: { + knn: { + field: "vector", + query_vector: [0.23, 0.67, 0.89], + k: 3, + num_candidates: 5, + }, + }, + weight: 1.5, + normalizer: "minmax", + }, + ], + rank_window_size: 10, + }, + }, + _source: false, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d3a5b70d493e0bd77b3f2b586341c83c.asciidoc b/docs/doc_examples/d3a5b70d493e0bd77b3f2b586341c83c.asciidoc new file mode 100644 index 000000000..3a490c68d --- /dev/null +++ b/docs/doc_examples/d3a5b70d493e0bd77b3f2b586341c83c.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-index-000001", + runtime: { + "http.responses": { + type: "long", + script: + '\n String response=dissect(\'%{clientip} %{ident} %{auth} [%{@timestamp}] "%{verb} %{request} HTTP/%{httpversion}" %{response} %{size}\').extract(doc["message"].value)?.response;\n if (response != null) emit(Integer.parseInt(response));\n ', + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d3d117fec34301520ccdb26332e7c98a.asciidoc b/docs/doc_examples/d3d117fec34301520ccdb26332e7c98a.asciidoc new file mode 100644 index 000000000..96279479b --- /dev/null +++ b/docs/doc_examples/d3d117fec34301520ccdb26332e7c98a.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.simulate({ + pipeline: { + processors: [ + { + registered_domain: { + field: "fqdn", + target_field: "url", + }, + }, + ], + }, + docs: [ + { + _source: { + fqdn: "www.example.ac.uk", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/d3dccdb15822e971ededb9f6f7d8ada1.asciidoc b/docs/doc_examples/d3dccdb15822e971ededb9f6f7d8ada1.asciidoc new file mode 100644 index 000000000..91c379871 --- /dev/null +++ b/docs/doc_examples/d3dccdb15822e971ededb9f6f7d8ada1.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + query_string: { + fields: ["content", "name.*^5"], + query: "this AND that OR thus", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d3e5edac5b461020017fd9d8ec7a91fa.asciidoc b/docs/doc_examples/d3e5edac5b461020017fd9d8ec7a91fa.asciidoc new file mode 100644 index 000000000..716555e1c --- /dev/null +++ b/docs/doc_examples/d3e5edac5b461020017fd9d8ec7a91fa.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "clicks_admin", + run_as: ["clicks_watcher_1"], + cluster: ["monitor"], + indices: [ + { + names: ["events-*"], + privileges: ["read"], + field_security: { + grant: ["category", "@timestamp", "message"], + }, + query: '{"match": {"category": "click"}}', + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/d3e9e1169c3514fd46e253cd8b5ae3cb.asciidoc b/docs/doc_examples/d3e9e1169c3514fd46e253cd8b5ae3cb.asciidoc new file mode 100644 index 000000000..7d704155a --- /dev/null +++ b/docs/doc_examples/d3e9e1169c3514fd46e253cd8b5ae3cb.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "standard", + filter: ["my_script_filter"], + }, + }, + filter: { + my_script_filter: { + type: "predicate_token_filter", + script: { + source: + '\n token.type.contains("ALPHANUM")\n ', + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d4158d486e7fee2702a14068b69e3b33.asciidoc b/docs/doc_examples/d4158d486e7fee2702a14068b69e3b33.asciidoc new file mode 100644 index 000000000..ccee5c776 --- /dev/null +++ b/docs/doc_examples/d4158d486e7fee2702a14068b69e3b33.asciidoc @@ -0,0 +1,154 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "datastream_template", + index_patterns: ["datastream*"], + data_stream: {}, + template: { + lifecycle: { + downsampling: [ + { + after: "1m", + fixed_interval: "1h", + }, + ], + }, + settings: { + index: { + mode: "time_series", + }, + }, + mappings: { + properties: { + "@timestamp": { + type: "date", + }, + kubernetes: { + properties: { + container: { + properties: { + cpu: { + properties: { + usage: { + properties: { + core: { + properties: { + ns: { + type: "long", + }, + }, + }, + limit: { + properties: { + pct: { + type: "float", + }, + }, + }, + nanocores: { + type: "long", + time_series_metric: "gauge", + }, + node: { + properties: { + pct: { + type: "float", + }, + }, + }, + }, + }, + }, + }, + memory: { + properties: { + available: { + properties: { + bytes: { + type: "long", + time_series_metric: "gauge", + }, + }, + }, + majorpagefaults: { + type: "long", + }, + pagefaults: { + type: "long", + time_series_metric: "gauge", + }, + rss: { + properties: { + bytes: { + type: "long", + time_series_metric: "gauge", + }, + }, + }, + usage: { + properties: { + bytes: { + type: "long", + time_series_metric: "gauge", + }, + limit: { + properties: { + pct: { + type: "float", + }, + }, + }, + node: { + properties: { + pct: { + type: "float", + }, + }, + }, + }, + }, + workingset: { + properties: { + bytes: { + type: "long", + time_series_metric: "gauge", + }, + }, + }, + }, + }, + name: { + type: "keyword", + }, + start_time: { + type: "date", + }, + }, + }, + host: { + type: "keyword", + time_series_dimension: true, + }, + namespace: { + type: "keyword", + time_series_dimension: true, + }, + node: { + type: "keyword", + time_series_dimension: true, + }, + pod: { + type: "keyword", + time_series_dimension: true, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d4323be84152fa91abd76e966d4751dc.asciidoc b/docs/doc_examples/d4323be84152fa91abd76e966d4751dc.asciidoc new file mode 100644 index 000000000..de913f758 --- /dev/null +++ b/docs/doc_examples/d4323be84152fa91abd76e966d4751dc.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.queryApiKeys({ + query: { + term: { + name: { + value: "application-key-1", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d443db2755fde3b49ca3a9d296c4a96f.asciidoc b/docs/doc_examples/d443db2755fde3b49ca3a9d296c4a96f.asciidoc new file mode 100644 index 000000000..138641329 --- /dev/null +++ b/docs/doc_examples/d443db2755fde3b49ca3a9d296c4a96f.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "delimited_payload", + settings: { + analysis: { + analyzer: { + whitespace_delimited_payload: { + tokenizer: "whitespace", + filter: ["delimited_payload"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d44ecc69090c0b2bc08a6cbc2e3467c5.asciidoc b/docs/doc_examples/d44ecc69090c0b2bc08a6cbc2e3467c5.asciidoc new file mode 100644 index 000000000..c90396ab3 --- /dev/null +++ b/docs/doc_examples/d44ecc69090c0b2bc08a6cbc2e3467c5.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "news", + query: { + simple_query_string: { + query: "+elasticsearch +pozmantier", + }, + }, + _source: ["title", "source"], + highlight: { + fields: { + content: {}, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d46e9739bbf25eb2f7225f58ab08b2a7.asciidoc b/docs/doc_examples/d46e9739bbf25eb2f7225f58ab08b2a7.asciidoc new file mode 100644 index 000000000..63458b1c0 --- /dev/null +++ b/docs/doc_examples/d46e9739bbf25eb2f7225f58ab08b2a7.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.samlCompleteLogout({ + realm: "saml1", + ids: ["_1c368075e0b3..."], + content: "PHNhbWxwOkxvZ291dFJlc3BvbnNlIHhtbG5zOnNhbWxwPSJ1cm46...", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d48b274a4b6098ffef0c016c6c945fb9.asciidoc b/docs/doc_examples/d48b274a4b6098ffef0c016c6c945fb9.asciidoc new file mode 100644 index 000000000..a1e4b7603 --- /dev/null +++ b/docs/doc_examples/d48b274a4b6098ffef0c016c6c945fb9.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getToken({ + grant_type: "refresh_token", + refresh_token: "vLBPvmAB6KvwvJZr27cS", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d49318764244113ad2ac4cc0f06d77ec.asciidoc b/docs/doc_examples/d49318764244113ad2ac4cc0f06d77ec.asciidoc new file mode 100644 index 000000000..3c6a63893 --- /dev/null +++ b/docs/doc_examples/d49318764244113ad2ac4cc0f06d77ec.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "image-index", + mappings: { + properties: { + "image-vector": { + type: "dense_vector", + dims: 3, + similarity: "l2_norm", + index_options: { + type: "hnsw", + m: 32, + ef_construction: 100, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d4a41fb74b41b41a0ee114a2311f2815.asciidoc b/docs/doc_examples/d4a41fb74b41b41a0ee114a2311f2815.asciidoc new file mode 100644 index 000000000..beea387be --- /dev/null +++ b/docs/doc_examples/d4a41fb74b41b41a0ee114a2311f2815.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + hot: { + actions: { + rollover: { + max_age: "7d", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d4b405ef0302227e050ac8f0e39068e1.asciidoc b/docs/doc_examples/d4b405ef0302227e050ac8f0e39068e1.asciidoc new file mode 100644 index 000000000..eaa534005 --- /dev/null +++ b/docs/doc_examples/d4b405ef0302227e050ac8f0e39068e1.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.evaluateDataFrame({ + index: "my_analytics_dest_index", + evaluation: { + outlier_detection: { + actual_field: "is_outlier", + predicted_probability_field: "ml.outlier_score", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d4b4cefba4318caeba7480187faf2b13.asciidoc b/docs/doc_examples/d4b4cefba4318caeba7480187faf2b13.asciidoc deleted file mode 100644 index c005254ad..000000000 --- a/docs/doc_examples/d4b4cefba4318caeba7480187faf2b13.asciidoc +++ /dev/null @@ -1,15 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.index({ - index: 'my_index', - id: '1', - body: { - full_text: 'Quick Brown Foxes!' - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/d4b50ae96e541c0031264a10f6afccbf.asciidoc b/docs/doc_examples/d4b50ae96e541c0031264a10f6afccbf.asciidoc new file mode 100644 index 000000000..2669eba06 --- /dev/null +++ b/docs/doc_examples/d4b50ae96e541c0031264a10f6afccbf.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.migrateToDataStream({ + name: "my-time-series-data", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d4cdcf01014c75693b080c778071c1b5.asciidoc b/docs/doc_examples/d4cdcf01014c75693b080c778071c1b5.asciidoc new file mode 100644 index 000000000..a7d60de27 --- /dev/null +++ b/docs/doc_examples/d4cdcf01014c75693b080c778071c1b5.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "exams", + size: 0, + aggs: { + grades_stats: { + stats: { + field: "grade", + missing: 0, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d4d450f536d747d5ef5050d2d8c66f09.asciidoc b/docs/doc_examples/d4d450f536d747d5ef5050d2d8c66f09.asciidoc new file mode 100644 index 000000000..551748bea --- /dev/null +++ b/docs/doc_examples/d4d450f536d747d5ef5050d2d8c66f09.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "my-index-000001", + refresh: "true", + operations: [ + { + index: { + _id: 1, + }, + }, + { + user: { + id: "kimchy", + }, + "@timestamp": "2099-11-15T14:12:12", + message: "trying out Elasticsearch", + }, + { + index: { + _id: 2, + }, + }, + { + user: { + id: "kimchi", + }, + "@timestamp": "2099-11-15T14:12:13", + message: "My user ID is similar to kimchy!", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/d4df39f72d3a3b80cd4042f6a21c3f19.asciidoc b/docs/doc_examples/d4df39f72d3a3b80cd4042f6a21c3f19.asciidoc new file mode 100644 index 000000000..aa1a436a0 --- /dev/null +++ b/docs/doc_examples/d4df39f72d3a3b80cd4042f6a21c3f19.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putIpLocationDatabase({ + id: "my-database-2", + configuration: { + name: "standard_location", + ipinfo: {}, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d4ef6ac034c4d42cb75d830ec69146e6.asciidoc b/docs/doc_examples/d4ef6ac034c4d42cb75d830ec69146e6.asciidoc new file mode 100644 index 000000000..9f176704b --- /dev/null +++ b/docs/doc_examples/d4ef6ac034c4d42cb75d830ec69146e6.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.deleteAutoFollowPattern({ + name: "my_auto_follow_pattern", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d4fb482a51d67a1af48e429af6019a46.asciidoc b/docs/doc_examples/d4fb482a51d67a1af48e429af6019a46.asciidoc new file mode 100644 index 000000000..2ab3635f9 --- /dev/null +++ b/docs/doc_examples/d4fb482a51d67a1af48e429af6019a46.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + index: { + "sort.field": ["username", "date"], + "sort.order": ["asc", "desc"], + }, + }, + mappings: { + properties: { + username: { + type: "keyword", + doc_values: true, + }, + date: { + type: "date", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d50a3c64890f88af32c6d4ef4899d82a.asciidoc b/docs/doc_examples/d50a3c64890f88af32c6d4ef4899d82a.asciidoc deleted file mode 100644 index 754985636..000000000 --- a/docs/doc_examples/d50a3c64890f88af32c6d4ef4899d82a.asciidoc +++ /dev/null @@ -1,26 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - sort: [ - { - _geo_distance: { - 'pin.location': '40,-70', - order: 'asc', - unit: 'km' - } - } - ], - query: { - term: { - user: 'kimchy' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/d50b030edfe6d1128eb76aa5ba9d4e27.asciidoc b/docs/doc_examples/d50b030edfe6d1128eb76aa5ba9d4e27.asciidoc new file mode 100644 index 000000000..4494a29ac --- /dev/null +++ b/docs/doc_examples/d50b030edfe6d1128eb76aa5ba9d4e27.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.putTrainedModelAlias({ + model_id: "flight-delay-prediction-1580004349800", + model_alias: "flight_delay_model", + reassign: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d5132d34ae922fa8e898889b627a1405.asciidoc b/docs/doc_examples/d5132d34ae922fa8e898889b627a1405.asciidoc new file mode 100644 index 000000000..663f0c9b5 --- /dev/null +++ b/docs/doc_examples/d5132d34ae922fa8e898889b627a1405.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "child_example", + size: 0, + aggs: { + "top-tags": { + terms: { + field: "tags.keyword", + size: 10, + }, + aggs: { + "to-answers": { + children: { + type: "answer", + }, + aggs: { + "top-names": { + terms: { + field: "owner.display_name.keyword", + size: 10, + }, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d5242b1ab0213f25e5e0742032274ce6.asciidoc b/docs/doc_examples/d5242b1ab0213f25e5e0742032274ce6.asciidoc new file mode 100644 index 000000000..865d407f9 --- /dev/null +++ b/docs/doc_examples/d5242b1ab0213f25e5e0742032274ce6.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "attachment", + description: "Extract attachment information", + processors: [ + { + attachment: { + field: "data", + remove_binary: true, + }, + }, + ], +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: "my_id", + pipeline: "attachment", + document: { + data: "e1xydGYxXGFuc2kNCkxvcmVtIGlwc3VtIGRvbG9yIHNpdCBhbWV0DQpccGFyIH0=", + }, +}); +console.log(response1); + +const response2 = await client.get({ + index: "my-index-000001", + id: "my_id", +}); +console.log(response2); +---- diff --git a/docs/doc_examples/d524db57be9f16abac5396895b9a2a59.asciidoc b/docs/doc_examples/d524db57be9f16abac5396895b9a2a59.asciidoc new file mode 100644 index 000000000..2c534e759 --- /dev/null +++ b/docs/doc_examples/d524db57be9f16abac5396895b9a2a59.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.resolveIndex({ + name: "my-index-*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d547d55efbf75374f6de1f224323bc73.asciidoc b/docs/doc_examples/d547d55efbf75374f6de1f224323bc73.asciidoc new file mode 100644 index 000000000..06b300ec2 --- /dev/null +++ b/docs/doc_examples/d547d55efbf75374f6de1f224323bc73.asciidoc @@ -0,0 +1,46 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "geocells", + mappings: { + properties: { + geocell: { + type: "geo_shape", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.ingest.putPipeline({ + id: "geotile2shape", + description: "translate rectangular z/x/y geotile to bounding box", + processors: [ + { + geo_grid: { + field: "geocell", + tile_type: "geotile", + }, + }, + ], +}); +console.log(response1); + +const response2 = await client.ingest.putPipeline({ + id: "geohex2shape", + description: "translate H3 cell to polygon", + processors: [ + { + geo_grid: { + field: "geocell", + tile_type: "geohex", + target_format: "wkt", + }, + }, + ], +}); +console.log(response2); +---- diff --git a/docs/doc_examples/d5533f08f5cc0479f07a46c761f0786b.asciidoc b/docs/doc_examples/d5533f08f5cc0479f07a46c761f0786b.asciidoc new file mode 100644 index 000000000..1a2273a4e --- /dev/null +++ b/docs/doc_examples/d5533f08f5cc0479f07a46c761f0786b.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + counter: { + type: "integer", + store: false, + }, + tags: { + type: "keyword", + store: true, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d56a9d89282df56adbbc34b91390ac17.asciidoc b/docs/doc_examples/d56a9d89282df56adbbc34b91390ac17.asciidoc new file mode 100644 index 000000000..e987bbba2 --- /dev/null +++ b/docs/doc_examples/d56a9d89282df56adbbc34b91390ac17.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.getAutoFollowPattern({ + name: "", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d59e9cc75814575aa5e275dbe262918c.asciidoc b/docs/doc_examples/d59e9cc75814575aa5e275dbe262918c.asciidoc new file mode 100644 index 000000000..1e76b4c4b --- /dev/null +++ b/docs/doc_examples/d59e9cc75814575aa5e275dbe262918c.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_locations", + query: { + geo_grid: { + location: { + geohash: "u0", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d5abaf1fd26f0abf410dd8827d077bbf.asciidoc b/docs/doc_examples/d5abaf1fd26f0abf410dd8827d077bbf.asciidoc new file mode 100644 index 000000000..20d959497 --- /dev/null +++ b/docs/doc_examples/d5abaf1fd26f0abf410dd8827d077bbf.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + match_all: {}, + }, + sort: ["my_id"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/d5bf9bc08f622ece98632a14a3982e27.asciidoc b/docs/doc_examples/d5bf9bc08f622ece98632a14a3982e27.asciidoc new file mode 100644 index 000000000..a6bdb28e8 --- /dev/null +++ b/docs/doc_examples/d5bf9bc08f622ece98632a14a3982e27.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match_all: {}, + }, + script_fields: { + test1: { + script: "params['_source']['message']", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d5d0ecf75843ddb5f92cfebd089e53e9.asciidoc b/docs/doc_examples/d5d0ecf75843ddb5f92cfebd089e53e9.asciidoc new file mode 100644 index 000000000..1d3f9bd7e --- /dev/null +++ b/docs/doc_examples/d5d0ecf75843ddb5f92cfebd089e53e9.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + source: { + index: "my-index-000001", + _source: ["user.id", "_doc"], + }, + dest: { + index: "my-new-index-000001", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d5dcddc6398b473b6ad9bce5c6adf986.asciidoc b/docs/doc_examples/d5dcddc6398b473b6ad9bce5c6adf986.asciidoc new file mode 100644 index 000000000..37aa7ee9b --- /dev/null +++ b/docs/doc_examples/d5dcddc6398b473b6ad9bce5c6adf986.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + scroll: "1m", + sort: ["_doc"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/d5ead6aacbfbedc8396f87bb34acc880.asciidoc b/docs/doc_examples/d5ead6aacbfbedc8396f87bb34acc880.asciidoc new file mode 100644 index 000000000..631a13ebf --- /dev/null +++ b/docs/doc_examples/d5ead6aacbfbedc8396f87bb34acc880.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.get({ + id: "FkpMRkJGS1gzVDRlM3g4ZzMyRGlLbkEaTXlJZHdNT09TU2VTZVBoNDM3cFZMUToxMDM=", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d603e76ab70131f7ec6b08758f95a0e3.asciidoc b/docs/doc_examples/d603e76ab70131f7ec6b08758f95a0e3.asciidoc new file mode 100644 index 000000000..8f4d19539 --- /dev/null +++ b/docs/doc_examples/d603e76ab70131f7ec6b08758f95a0e3.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.recovery({ + v: "true", + h: "i,s,t,ty,st,rep,snap,f,fp,b,bp", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d64679f8a53928fe9958dbe5ee5d9d13.asciidoc b/docs/doc_examples/d64679f8a53928fe9958dbe5ee5d9d13.asciidoc new file mode 100644 index 000000000..f00794963 --- /dev/null +++ b/docs/doc_examples/d64679f8a53928fe9958dbe5ee5d9d13.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + parent_id: { + type: "answer", + id: "1", + }, + }, + aggs: { + parents: { + terms: { + field: "my_join_field#question", + size: 10, + }, + }, + }, + runtime_mappings: { + parent: { + type: "long", + script: + "\n emit(Integer.parseInt(doc['my_join_field#question'].value)) \n ", + }, + }, + fields: [ + { + field: "parent", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/d64d509440afbed7cefd04b6898962eb.asciidoc b/docs/doc_examples/d64d509440afbed7cefd04b6898962eb.asciidoc new file mode 100644 index 000000000..8643f368a --- /dev/null +++ b/docs/doc_examples/d64d509440afbed7cefd04b6898962eb.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_geoshapes", + query: { + bool: { + must: { + match_all: {}, + }, + filter: { + geo_distance: { + distance: "200km", + "pin.location": { + lat: 40, + lon: -70, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d66e2b4d1931bf88c72e74670156e43f.asciidoc b/docs/doc_examples/d66e2b4d1931bf88c72e74670156e43f.asciidoc new file mode 100644 index 000000000..f3794bd47 --- /dev/null +++ b/docs/doc_examples/d66e2b4d1931bf88c72e74670156e43f.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + track_total_hits: 100, + query: { + match: { + "user.id": "elkbee", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d681508a745b2bc777d47ba606d24224.asciidoc b/docs/doc_examples/d681508a745b2bc777d47ba606d24224.asciidoc new file mode 100644 index 000000000..9a01deced --- /dev/null +++ b/docs/doc_examples/d681508a745b2bc777d47ba606d24224.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.fielddata({ + v: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d681b643da0d7f0a384f627b6d56111b.asciidoc b/docs/doc_examples/d681b643da0d7f0a384f627b6d56111b.asciidoc new file mode 100644 index 000000000..12af9491f --- /dev/null +++ b/docs/doc_examples/d681b643da0d7f0a384f627b6d56111b.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index", + mappings: { + properties: { + message: { + type: "wildcard", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d690a6af462c70a783625a323e11c72c.asciidoc b/docs/doc_examples/d690a6af462c70a783625a323e11c72c.asciidoc new file mode 100644 index 000000000..b6c413d2d --- /dev/null +++ b/docs/doc_examples/d690a6af462c70a783625a323e11c72c.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "test-index", + settings: { + number_of_shards: 1, + number_of_replicas: 1, + "index.lifecycle.name": "my_policy", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d69bd36335774c8ae1286cee21310241.asciidoc b/docs/doc_examples/d69bd36335774c8ae1286cee21310241.asciidoc new file mode 100644 index 000000000..d74ae75be --- /dev/null +++ b/docs/doc_examples/d69bd36335774c8ae1286cee21310241.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "remote-search", + remote_indices: [ + { + clusters: ["my_remote_cluster"], + names: ["target-index"], + privileges: ["read", "read_cross_cluster", "view_index_metadata"], + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/d69cf7c82602431d9e339583e7dfb988.asciidoc b/docs/doc_examples/d69cf7c82602431d9e339583e7dfb988.asciidoc new file mode 100644 index 000000000..935a516fc --- /dev/null +++ b/docs/doc_examples/d69cf7c82602431d9e339583e7dfb988.asciidoc @@ -0,0 +1,48 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + std_english: { + type: "standard", + stopwords: "_english_", + }, + }, + }, + }, + mappings: { + properties: { + my_text: { + type: "text", + analyzer: "standard", + fields: { + english: { + type: "text", + analyzer: "std_english", + }, + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.analyze({ + index: "my-index-000001", + field: "my_text", + text: "The old brown cow", +}); +console.log(response1); + +const response2 = await client.indices.analyze({ + index: "my-index-000001", + field: "my_text.english", + text: "The old brown cow", +}); +console.log(response2); +---- diff --git a/docs/doc_examples/d6a21afa4a94b9baa734eac430940bcf.asciidoc b/docs/doc_examples/d6a21afa4a94b9baa734eac430940bcf.asciidoc new file mode 100644 index 000000000..17ba11227 --- /dev/null +++ b/docs/doc_examples/d6a21afa4a94b9baa734eac430940bcf.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.connector.list({ + from: 0, + size: 2, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d6a4548b29e939fb197189c20c7c016f.asciidoc b/docs/doc_examples/d6a4548b29e939fb197189c20c7c016f.asciidoc new file mode 100644 index 000000000..745cb7efe --- /dev/null +++ b/docs/doc_examples/d6a4548b29e939fb197189c20c7c016f.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "chat_completion", + inference_id: "chat-completion-endpoint", + inference_config: { + service: "elastic", + service_settings: { + model_id: "model-1", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d70f55cd29cdb2dcd775ffa9e23ff393.asciidoc b/docs/doc_examples/d70f55cd29cdb2dcd775ffa9e23ff393.asciidoc new file mode 100644 index 000000000..812d3b354 --- /dev/null +++ b/docs/doc_examples/d70f55cd29cdb2dcd775ffa9e23ff393.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + runtime_mappings: { + "price.adjusted": { + type: "double", + script: + "\n double price = doc['price'].value;\n if (doc['promoted'].value) {\n price *= 0.8;\n }\n emit(price);\n ", + }, + }, + aggs: { + max_price: { + max: { + field: "price.adjusted", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d7141bd4d0db964f5cc4a872ad79dce9.asciidoc b/docs/doc_examples/d7141bd4d0db964f5cc4a872ad79dce9.asciidoc new file mode 100644 index 000000000..e06b1bd91 --- /dev/null +++ b/docs/doc_examples/d7141bd4d0db964f5cc4a872ad79dce9.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.features.resetFeatures(); +console.log(response); +---- diff --git a/docs/doc_examples/d718b63cf1b6591a1d59a0cf4fd995eb.asciidoc b/docs/doc_examples/d718b63cf1b6591a1d59a0cf4fd995eb.asciidoc deleted file mode 100644 index 318bce61b..000000000 --- a/docs/doc_examples/d718b63cf1b6591a1d59a0cf4fd995eb.asciidoc +++ /dev/null @@ -1,18 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.index({ - index: 'twitter', - id: '1', - op_type: 'create', - body: { - user: 'kimchy', - post_date: '2009-11-15T14:12:12', - message: 'trying out Elasticsearch' - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/d7348119df9f89a556a7b767d5298c7e.asciidoc b/docs/doc_examples/d7348119df9f89a556a7b767d5298c7e.asciidoc new file mode 100644 index 000000000..01a333e19 --- /dev/null +++ b/docs/doc_examples/d7348119df9f89a556a7b767d5298c7e.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "tour", + filter_path: "aggregations", + aggregations: { + path: { + terms: { + field: "city", + }, + aggregations: { + museum_tour: { + geo_line: { + point: { + field: "location", + }, + sort: { + field: "@timestamp", + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d7717318d93d0a1f3ad049f9c6604417.asciidoc b/docs/doc_examples/d7717318d93d0a1f3ad049f9c6604417.asciidoc new file mode 100644 index 000000000..8cf16de62 --- /dev/null +++ b/docs/doc_examples/d7717318d93d0a1f3ad049f9c6604417.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "my_tokenizer", + }, + }, + tokenizer: { + my_tokenizer: { + type: "standard", + max_token_length: 5, + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.analyze({ + index: "my-index-000001", + analyzer: "my_analyzer", + text: "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone.", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/d775836a0d7abecc6637aa988f204c30.asciidoc b/docs/doc_examples/d775836a0d7abecc6637aa988f204c30.asciidoc new file mode 100644 index 000000000..5e976e1de --- /dev/null +++ b/docs/doc_examples/d775836a0d7abecc6637aa988f204c30.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + document: { + fullname: "John Doe", + text: "test test test ", + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 2, + refresh: "wait_for", + document: { + fullname: "Jane Doe", + text: "Another test ...", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/d7898526d239d2aea83727fb982f8f77.asciidoc b/docs/doc_examples/d7898526d239d2aea83727fb982f8f77.asciidoc new file mode 100644 index 000000000..de62a08de --- /dev/null +++ b/docs/doc_examples/d7898526d239d2aea83727fb982f8f77.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.refresh(); +console.log(response); +---- diff --git a/docs/doc_examples/d7919fb6f4d02dde1390775eb8365b79.asciidoc b/docs/doc_examples/d7919fb6f4d02dde1390775eb8365b79.asciidoc new file mode 100644 index 000000000..649a0757e --- /dev/null +++ b/docs/doc_examples/d7919fb6f4d02dde1390775eb8365b79.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-index-000001", + properties: { + my_field: { + type: "text", + fielddata: true, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d7a55a7c491e97079e429483085f1d58.asciidoc b/docs/doc_examples/d7a55a7c491e97079e429483085f1d58.asciidoc new file mode 100644 index 000000000..7eef74763 --- /dev/null +++ b/docs/doc_examples/d7a55a7c491e97079e429483085f1d58.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "dsl-data-stream-template", + index_patterns: ["dsl-data-stream*"], + data_stream: {}, + priority: 500, + template: { + settings: { + "index.lifecycle.name": "pre-dsl-ilm-policy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d7a5b0159ffdcdd1ab9078b38829a08b.asciidoc b/docs/doc_examples/d7a5b0159ffdcdd1ab9078b38829a08b.asciidoc new file mode 100644 index 000000000..8bfce16e0 --- /dev/null +++ b/docs/doc_examples/d7a5b0159ffdcdd1ab9078b38829a08b.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + retriever: { + rrf: { + retrievers: [ + { + standard: { + query: { + term: { + text: "shoes", + }, + }, + }, + }, + { + standard: { + query: { + semantic: { + field: "semantic_field", + query: "shoes", + }, + }, + }, + }, + ], + rank_window_size: 50, + rank_constant: 20, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d7ae456f119246e95f2f4c37e7544b8c.asciidoc b/docs/doc_examples/d7ae456f119246e95f2f4c37e7544b8c.asciidoc new file mode 100644 index 000000000..afe5e6046 --- /dev/null +++ b/docs/doc_examples/d7ae456f119246e95f2f4c37e7544b8c.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.startDatafeed({ + datafeed_id: "datafeed-low_request_rate", + start: "2019-04-07T18:22:16Z", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d7b61bfb6adb22986a43388b823894cc.asciidoc b/docs/doc_examples/d7b61bfb6adb22986a43388b823894cc.asciidoc new file mode 100644 index 000000000..3e0ce9910 --- /dev/null +++ b/docs/doc_examples/d7b61bfb6adb22986a43388b823894cc.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "text_embedding", + inference_id: "cohere_embeddings", + inference_config: { + service: "cohere", + service_settings: { + api_key: "", + model_id: "embed-english-v3.0", + embedding_type: "byte", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d7d92816cac64b7c70d72b0000eeeeea.asciidoc b/docs/doc_examples/d7d92816cac64b7c70d72b0000eeeeea.asciidoc new file mode 100644 index 000000000..e854a2e49 --- /dev/null +++ b/docs/doc_examples/d7d92816cac64b7c70d72b0000eeeeea.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "test_role3", + indices: [ + { + names: ["*"], + privileges: ["read"], + field_security: { + grant: ["customer.handle"], + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/d7f42d1b906dc406be1819d17c625d5f.asciidoc b/docs/doc_examples/d7f42d1b906dc406be1819d17c625d5f.asciidoc new file mode 100644 index 000000000..6dd63ecb0 --- /dev/null +++ b/docs/doc_examples/d7f42d1b906dc406be1819d17c625d5f.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + filter_path: "aggregations", + aggs: { + t_shirts: { + filter: { + term: { + type: "t-shirt", + }, + }, + aggs: { + avg_price: { + avg: { + field: "price", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d7fe687201ac87b307cd06ed015dd317.asciidoc b/docs/doc_examples/d7fe687201ac87b307cd06ed015dd317.asciidoc new file mode 100644 index 000000000..c0b8ab7d9 --- /dev/null +++ b/docs/doc_examples/d7fe687201ac87b307cd06ed015dd317.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-index-000001", + properties: { + user_id: { + type: "keyword", + ignore_above: 100, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d803ed00d8f45f81c33e415e1c1ecb8c.asciidoc b/docs/doc_examples/d803ed00d8f45f81c33e415e1c1ecb8c.asciidoc new file mode 100644 index 000000000..76fcaf558 --- /dev/null +++ b/docs/doc_examples/d803ed00d8f45f81c33e415e1c1ecb8c.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + source: { + index: "my-data-stream", + query: { + range: { + "@timestamp": { + gte: "now-7d/d", + lte: "now/d", + }, + }, + }, + }, + dest: { + index: "new-data-stream", + op_type: "create", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d80ac403d8d936ca9dec185c7da13f2f.asciidoc b/docs/doc_examples/d80ac403d8d936ca9dec185c7da13f2f.asciidoc new file mode 100644 index 000000000..efb173b6b --- /dev/null +++ b/docs/doc_examples/d80ac403d8d936ca9dec185c7da13f2f.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.putScript({ + id: "my-stored-script", + script: { + lang: "painless", + source: "Math.log(_score * 2) + params['my_modifier']", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d8310e5606c61e7a6e64a90838b1a830.asciidoc b/docs/doc_examples/d8310e5606c61e7a6e64a90838b1a830.asciidoc new file mode 100644 index 000000000..2ebf0acd5 --- /dev/null +++ b/docs/doc_examples/d8310e5606c61e7a6e64a90838b1a830.asciidoc @@ -0,0 +1,46 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "parent_example", + id: 2, + routing: 1, + document: { + join: { + name: "answer", + parent: "1", + }, + owner: { + location: "Norfolk, United Kingdom", + display_name: "Sam", + id: 48, + }, + body: "Unfortunately you're pretty much limited to FTP...", + creation_date: "2009-05-04T13:45:37.030", + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "parent_example", + id: 3, + routing: 1, + refresh: "true", + document: { + join: { + name: "answer", + parent: "1", + }, + owner: { + location: "Norfolk, United Kingdom", + display_name: "Troll", + id: 49, + }, + body: "Use Linux...", + creation_date: "2009-05-05T13:45:37.030", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/d8496fa0e5a394fd758617ed6a6c956f.asciidoc b/docs/doc_examples/d8496fa0e5a394fd758617ed6a6c956f.asciidoc new file mode 100644 index 000000000..ea74db82f --- /dev/null +++ b/docs/doc_examples/d8496fa0e5a394fd758617ed6a6c956f.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + percolate: { + field: "query", + document: { + message: "The quick brown fox jumps over the lazy dog", + }, + }, + }, + highlight: { + fields: { + message: {}, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d84a861ce563508aeaaf30a9dd84b5cf.asciidoc b/docs/doc_examples/d84a861ce563508aeaaf30a9dd84b5cf.asciidoc new file mode 100644 index 000000000..6deb4fde6 --- /dev/null +++ b/docs/doc_examples/d84a861ce563508aeaaf30a9dd84b5cf.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + hot: { + actions: { + rollover: { + max_age: "7d", + max_size: "100gb", + min_docs: 1000, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d851282dba548251d10db5954a339307.asciidoc b/docs/doc_examples/d851282dba548251d10db5954a339307.asciidoc new file mode 100644 index 000000000..0dcc1cb4e --- /dev/null +++ b/docs/doc_examples/d851282dba548251d10db5954a339307.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "twitter", + query: { + match: { + title: "elasticsearch", + }, + }, + search_after: [1463538857, "654323"], + sort: [ + { + date: "asc", + }, + { + tie_breaker_id: "asc", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/d870d5bd1f97fc75872a298fcddec513.asciidoc b/docs/doc_examples/d870d5bd1f97fc75872a298fcddec513.asciidoc new file mode 100644 index 000000000..0d7463275 --- /dev/null +++ b/docs/doc_examples/d870d5bd1f97fc75872a298fcddec513.asciidoc @@ -0,0 +1,155 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.textStructure.findStructure({ + text_files: [ + { + name: "Leviathan Wakes", + author: "James S.A. Corey", + release_date: "2011-06-02", + page_count: 561, + }, + { + name: "Hyperion", + author: "Dan Simmons", + release_date: "1989-05-26", + page_count: 482, + }, + { + name: "Dune", + author: "Frank Herbert", + release_date: "1965-06-01", + page_count: 604, + }, + { + name: "Dune Messiah", + author: "Frank Herbert", + release_date: "1969-10-15", + page_count: 331, + }, + { + name: "Children of Dune", + author: "Frank Herbert", + release_date: "1976-04-21", + page_count: 408, + }, + { + name: "God Emperor of Dune", + author: "Frank Herbert", + release_date: "1981-05-28", + page_count: 454, + }, + { + name: "Consider Phlebas", + author: "Iain M. Banks", + release_date: "1987-04-23", + page_count: 471, + }, + { + name: "Pandora's Star", + author: "Peter F. Hamilton", + release_date: "2004-03-02", + page_count: 768, + }, + { + name: "Revelation Space", + author: "Alastair Reynolds", + release_date: "2000-03-15", + page_count: 585, + }, + { + name: "A Fire Upon the Deep", + author: "Vernor Vinge", + release_date: "1992-06-01", + page_count: 613, + }, + { + name: "Ender's Game", + author: "Orson Scott Card", + release_date: "1985-06-01", + page_count: 324, + }, + { + name: "1984", + author: "George Orwell", + release_date: "1985-06-01", + page_count: 328, + }, + { + name: "Fahrenheit 451", + author: "Ray Bradbury", + release_date: "1953-10-15", + page_count: 227, + }, + { + name: "Brave New World", + author: "Aldous Huxley", + release_date: "1932-06-01", + page_count: 268, + }, + { + name: "Foundation", + author: "Isaac Asimov", + release_date: "1951-06-01", + page_count: 224, + }, + { + name: "The Giver", + author: "Lois Lowry", + release_date: "1993-04-26", + page_count: 208, + }, + { + name: "Slaughterhouse-Five", + author: "Kurt Vonnegut", + release_date: "1969-06-01", + page_count: 275, + }, + { + name: "The Hitchhiker's Guide to the Galaxy", + author: "Douglas Adams", + release_date: "1979-10-12", + page_count: 180, + }, + { + name: "Snow Crash", + author: "Neal Stephenson", + release_date: "1992-06-01", + page_count: 470, + }, + { + name: "Neuromancer", + author: "William Gibson", + release_date: "1984-07-01", + page_count: 271, + }, + { + name: "The Handmaid's Tale", + author: "Margaret Atwood", + release_date: "1985-06-01", + page_count: 311, + }, + { + name: "Starship Troopers", + author: "Robert A. Heinlein", + release_date: "1959-12-01", + page_count: 335, + }, + { + name: "The Left Hand of Darkness", + author: "Ursula K. Le Guin", + release_date: "1969-06-01", + page_count: 304, + }, + { + name: "The Moon is a Harsh Mistress", + author: "Robert A. Heinlein", + release_date: "1966-04-01", + page_count: 288, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/d87175daed2327565d4325528c6d8b38.asciidoc b/docs/doc_examples/d87175daed2327565d4325528c6d8b38.asciidoc new file mode 100644 index 000000000..60b8237df --- /dev/null +++ b/docs/doc_examples/d87175daed2327565d4325528c6d8b38.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.get({ + index: "my-index-000001", + id: 0, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d87cfcc0a297f75ffe646b2e61940d14.asciidoc b/docs/doc_examples/d87cfcc0a297f75ffe646b2e61940d14.asciidoc new file mode 100644 index 000000000..4fe177b8f --- /dev/null +++ b/docs/doc_examples/d87cfcc0a297f75ffe646b2e61940d14.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "uppercase_example", + settings: { + analysis: { + analyzer: { + whitespace_uppercase: { + tokenizer: "whitespace", + filter: ["uppercase"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d880630b6f7dc634c4078293f9cd3d80.asciidoc b/docs/doc_examples/d880630b6f7dc634c4078293f9cd3d80.asciidoc new file mode 100644 index 000000000..25e54fdb2 --- /dev/null +++ b/docs/doc_examples/d880630b6f7dc634c4078293f9cd3d80.asciidoc @@ -0,0 +1,40 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_buckets: { + composite: { + size: 2, + sources: [ + { + date: { + date_histogram: { + field: "timestamp", + calendar_interval: "1d", + order: "desc", + }, + }, + }, + { + product: { + terms: { + field: "product", + order: "asc", + }, + }, + }, + ], + after: { + date: 1494288000000, + product: "mad max", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d88f883ed2fb8be35cd3e72ddffcf4ef.asciidoc b/docs/doc_examples/d88f883ed2fb8be35cd3e72ddffcf4ef.asciidoc new file mode 100644 index 000000000..2c104ebce --- /dev/null +++ b/docs/doc_examples/d88f883ed2fb8be35cd3e72ddffcf4ef.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "length_custom_example", + settings: { + analysis: { + analyzer: { + whitespace_length_2_to_10_char: { + tokenizer: "whitespace", + filter: ["length_2_to_10_char"], + }, + }, + filter: { + length_2_to_10_char: { + type: "length", + min: 2, + max: 10, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d89d36741d906a71eca6c144e8d83889.asciidoc b/docs/doc_examples/d89d36741d906a71eca6c144e8d83889.asciidoc new file mode 100644 index 000000000..d45a6b65d --- /dev/null +++ b/docs/doc_examples/d89d36741d906a71eca6c144e8d83889.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.tasks.cancel({ + task_id: "oTUltX4IQMOUUVeiohTt8A:12345", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d8a82511cb94f49b4fe4828fee3ba074.asciidoc b/docs/doc_examples/d8a82511cb94f49b4fe4828fee3ba074.asciidoc new file mode 100644 index 000000000..60093c26e --- /dev/null +++ b/docs/doc_examples/d8a82511cb94f49b4fe4828fee3ba074.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.nodes({ + v: "true", + h: "name,node*,heap*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d8b2a88b5eca99d3691ad3cd40266736.asciidoc b/docs/doc_examples/d8b2a88b5eca99d3691ad3cd40266736.asciidoc deleted file mode 100644 index 3526a8624..000000000 --- a/docs/doc_examples/d8b2a88b5eca99d3691ad3cd40266736.asciidoc +++ /dev/null @@ -1,26 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'my-index', - body: { - mappings: { - properties: { - age: { - type: 'integer' - }, - email: { - type: 'keyword' - }, - name: { - type: 'text' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/d8c053ee26c1533ce936ec81101d8e1b.asciidoc b/docs/doc_examples/d8c053ee26c1533ce936ec81101d8e1b.asciidoc new file mode 100644 index 000000000..3875298ba --- /dev/null +++ b/docs/doc_examples/d8c053ee26c1533ce936ec81101d8e1b.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.getIpLocationDatabase({ + id: "my-database-id", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d8c401a5b7359ec65947b9f35ecf6927.asciidoc b/docs/doc_examples/d8c401a5b7359ec65947b9f35ecf6927.asciidoc new file mode 100644 index 000000000..402a1ea30 --- /dev/null +++ b/docs/doc_examples/d8c401a5b7359ec65947b9f35ecf6927.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "my_tokenizer", + }, + }, + tokenizer: { + my_tokenizer: { + type: "ngram", + min_gram: 3, + max_gram: 3, + token_chars: ["letter", "digit"], + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.analyze({ + index: "my-index-000001", + analyzer: "my_analyzer", + text: "2 Quick Foxes.", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/d8ea6a1a1c546bf29f65f8c65439b156.asciidoc b/docs/doc_examples/d8ea6a1a1c546bf29f65f8c65439b156.asciidoc new file mode 100644 index 000000000..f037ca9a4 --- /dev/null +++ b/docs/doc_examples/d8ea6a1a1c546bf29f65f8c65439b156.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "byte-image-index", + mappings: { + properties: { + "byte-image-vector": { + type: "dense_vector", + element_type: "byte", + dims: 2, + }, + title: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d8fa7ca2ec8dbfa034603ea566e33f5b.asciidoc b/docs/doc_examples/d8fa7ca2ec8dbfa034603ea566e33f5b.asciidoc new file mode 100644 index 000000000..cc29b6058 --- /dev/null +++ b/docs/doc_examples/d8fa7ca2ec8dbfa034603ea566e33f5b.asciidoc @@ -0,0 +1,45 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + filter_path: "aggregations", + aggs: { + the_filter: { + filters: { + keyed: false, + filters: { + "t-shirt": { + term: { + type: "t-shirt", + }, + }, + hat: { + term: { + type: "hat", + }, + }, + }, + }, + aggs: { + avg_price: { + avg: { + field: "price", + }, + }, + sort_by_avg_price: { + bucket_sort: { + sort: { + avg_price: "asc", + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d90a84a24a407731dfc1929ac8327746.asciidoc b/docs/doc_examples/d90a84a24a407731dfc1929ac8327746.asciidoc deleted file mode 100644 index 35a5315ac..000000000 --- a/docs/doc_examples/d90a84a24a407731dfc1929ac8327746.asciidoc +++ /dev/null @@ -1,13 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.delete({ - index: 'twitter', - id: '1', - timeout: '5m' -}) -console.log(response) ----- - diff --git a/docs/doc_examples/d93d52b6057a7aff3d0766ca44c505e0.asciidoc b/docs/doc_examples/d93d52b6057a7aff3d0766ca44c505e0.asciidoc new file mode 100644 index 000000000..d6c59ff90 --- /dev/null +++ b/docs/doc_examples/d93d52b6057a7aff3d0766ca44c505e0.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putComponentTemplate({ + name: "my-aliases", + template: { + aliases: { + "my-alias": {}, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.putIndexTemplate({ + name: "my-index-template", + index_patterns: ["my-index-*"], + composed_of: ["my-aliases", "my-mappings", "my-settings"], + template: { + aliases: { + "yet-another-alias": {}, + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/d9474f66970c6955e24b17c7447e7b5f.asciidoc b/docs/doc_examples/d9474f66970c6955e24b17c7447e7b5f.asciidoc deleted file mode 100644 index 789b778b3..000000000 --- a/docs/doc_examples/d9474f66970c6955e24b17c7447e7b5f.asciidoc +++ /dev/null @@ -1,24 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'my_index', - body: { - mappings: { - properties: { - name: { - properties: { - first: { - type: 'text' - } - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/d94f666616dea141dcb7aaf08a35bc10.asciidoc b/docs/doc_examples/d94f666616dea141dcb7aaf08a35bc10.asciidoc new file mode 100644 index 000000000..617d7bfe4 --- /dev/null +++ b/docs/doc_examples/d94f666616dea141dcb7aaf08a35bc10.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: [ + { + type: "keep_types", + types: [""], + mode: "exclude", + }, + ], + text: "1 quick fox 2 lazy dogs", +}); +console.log(response); +---- diff --git a/docs/doc_examples/d952ac7c73219d8cabc080679e035514.asciidoc b/docs/doc_examples/d952ac7c73219d8cabc080679e035514.asciidoc new file mode 100644 index 000000000..7e7400f38 --- /dev/null +++ b/docs/doc_examples/d952ac7c73219d8cabc080679e035514.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + knn: { + field: "my_embeddings.predicted_value", + k: 10, + num_candidates: 100, + query_vector_builder: { + text_embedding: { + model_id: "sentence-transformers__msmarco-minilm-l-12-v3", + model_text: "the query string", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d979f934af0992fb8c8596beff80b638.asciidoc b/docs/doc_examples/d979f934af0992fb8c8596beff80b638.asciidoc new file mode 100644 index 000000000..77cbe0562 --- /dev/null +++ b/docs/doc_examples/d979f934af0992fb8c8596beff80b638.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + _source: ["obj1.*", "obj2.*"], + query: { + match: { + "user.id": "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d983c1ea730eeabac9e914656d7c9be2.asciidoc b/docs/doc_examples/d983c1ea730eeabac9e914656d7c9be2.asciidoc new file mode 100644 index 000000000..dd82fb061 --- /dev/null +++ b/docs/doc_examples/d983c1ea730eeabac9e914656d7c9be2.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "latvian_example", + settings: { + analysis: { + filter: { + latvian_stop: { + type: "stop", + stopwords: "_latvian_", + }, + latvian_keywords: { + type: "keyword_marker", + keywords: ["piemērs"], + }, + latvian_stemmer: { + type: "stemmer", + language: "latvian", + }, + }, + analyzer: { + rebuilt_latvian: { + tokenizer: "standard", + filter: [ + "lowercase", + "latvian_stop", + "latvian_keywords", + "latvian_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d98fb2ff2cdd154dff4a576430755d98.asciidoc b/docs/doc_examples/d98fb2ff2cdd154dff4a576430755d98.asciidoc new file mode 100644 index 000000000..e8c8e1c1e --- /dev/null +++ b/docs/doc_examples/d98fb2ff2cdd154dff4a576430755d98.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + timestamp: { + type: "date", + }, + temperature: { + type: "long", + }, + voltage: { + type: "double", + }, + node: { + type: "keyword", + }, + voltage_corrected: { + type: "double", + on_script_error: "fail", + script: { + source: + "\n emit(doc['voltage'].value * params['multiplier'])\n ", + params: { + multiplier: 4, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d9a1ad1c5746b75972c74dd4d3a3d623.asciidoc b/docs/doc_examples/d9a1ad1c5746b75972c74dd4d3a3d623.asciidoc new file mode 100644 index 000000000..a010647b8 --- /dev/null +++ b/docs/doc_examples/d9a1ad1c5746b75972c74dd4d3a3d623.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + my_join_field: { + type: "join", + relations: { + question: ["answer", "comment"], + answer: "vote", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d9de409a4a197ce7cbe3714e07155d34.asciidoc b/docs/doc_examples/d9de409a4a197ce7cbe3714e07155d34.asciidoc new file mode 100644 index 000000000..59f9b118c --- /dev/null +++ b/docs/doc_examples/d9de409a4a197ce7cbe3714e07155d34.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + function_score: { + query: { + match: { + body: "foo", + }, + }, + functions: [ + { + script_score: { + script: { + source: "pure_df", + lang: "expert_scripts", + params: { + field: "body", + term: "foo", + }, + }, + }, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/d9e0cba8e150681d861f5fd1545514e2.asciidoc b/docs/doc_examples/d9e0cba8e150681d861f5fd1545514e2.asciidoc new file mode 100644 index 000000000..9c9f58277 --- /dev/null +++ b/docs/doc_examples/d9e0cba8e150681d861f5fd1545514e2.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.query({ + format: "txt", + query: + "SELECT YEAR(release_date) AS year FROM library WHERE page_count > ? AND author = ? GROUP BY year HAVING COUNT(*) > ?", + params: [300, "Frank Herbert", 0], +}); +console.log(response); +---- diff --git a/docs/doc_examples/da0fe1316e5b8fd68e2a8525bcd8b0f6.asciidoc b/docs/doc_examples/da0fe1316e5b8fd68e2a8525bcd8b0f6.asciidoc new file mode 100644 index 000000000..ed9394a53 --- /dev/null +++ b/docs/doc_examples/da0fe1316e5b8fd68e2a8525bcd8b0f6.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + bool: { + must: { + match: { + body: "elasticsearch", + }, + }, + should: { + rank_feature: { + field: "pagerank", + saturation: { + pivot: 10, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/da18bae37cda566c0254b30c15221b01.asciidoc b/docs/doc_examples/da18bae37cda566c0254b30c15221b01.asciidoc new file mode 100644 index 000000000..c120c31c8 --- /dev/null +++ b/docs/doc_examples/da18bae37cda566c0254b30c15221b01.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.clearCachedServiceTokens({ + namespace: "elastic", + service: "fleet-server", + name: "token1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/da24c13eee8c9aeae9a23faf80489e31.asciidoc b/docs/doc_examples/da24c13eee8c9aeae9a23faf80489e31.asciidoc new file mode 100644 index 000000000..a1f77cba3 --- /dev/null +++ b/docs/doc_examples/da24c13eee8c9aeae9a23faf80489e31.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.delete({ + index: "my-index", +}); +console.log(response); + +const response1 = await client.reindex({ + source: { + index: "restored-my-index", + }, + dest: { + index: "my-index", + }, +}); +console.log(response1); + +const response2 = await client.indices.deleteDataStream({ + name: "logs-my_app-default", +}); +console.log(response2); + +const response3 = await client.reindex({ + source: { + index: "restored-logs-my_app-default", + }, + dest: { + index: "logs-my_app-default", + op_type: "create", + }, +}); +console.log(response3); +---- diff --git a/docs/doc_examples/da3f280bc65b581fb3097be768061bee.asciidoc b/docs/doc_examples/da3f280bc65b581fb3097be768061bee.asciidoc new file mode 100644 index 000000000..8297d5687 --- /dev/null +++ b/docs/doc_examples/da3f280bc65b581fb3097be768061bee.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.samlPrepareAuthentication({ + acs: "https://kibana.org/api/security/saml/callback", +}); +console.log(response); +---- diff --git a/docs/doc_examples/da8db0769dff7305f178c12b1111bc99.asciidoc b/docs/doc_examples/da8db0769dff7305f178c12b1111bc99.asciidoc new file mode 100644 index 000000000..1e6dc21fe --- /dev/null +++ b/docs/doc_examples/da8db0769dff7305f178c12b1111bc99.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + simple_query_string: { + query: "this is a test", + fields: ["subject^3", "message"], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/da90e457e2a34fe47dd82a0a2f336095.asciidoc b/docs/doc_examples/da90e457e2a34fe47dd82a0a2f336095.asciidoc new file mode 100644 index 000000000..908eb903c --- /dev/null +++ b/docs/doc_examples/da90e457e2a34fe47dd82a0a2f336095.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "networks", + id: 1, + refresh: "wait_for", + document: { + range: "10.100.0.0/16", + name: "production", + department: "OPS", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/daae2e6acebc84e537764f4ba07f2e6e.asciidoc b/docs/doc_examples/daae2e6acebc84e537764f4ba07f2e6e.asciidoc new file mode 100644 index 000000000..4e1d675ae --- /dev/null +++ b/docs/doc_examples/daae2e6acebc84e537764f4ba07f2e6e.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "cluster.routing.allocation.exclude._name": null, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dabb159e0b3456024889fb9754a10655.asciidoc b/docs/doc_examples/dabb159e0b3456024889fb9754a10655.asciidoc new file mode 100644 index 000000000..1323fde55 --- /dev/null +++ b/docs/doc_examples/dabb159e0b3456024889fb9754a10655.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "example", + mappings: { + properties: { + geometry: { + type: "shape", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dabcf0bead37cae1d3e5d2813fd3ccfe.asciidoc b/docs/doc_examples/dabcf0bead37cae1d3e5d2813fd3ccfe.asciidoc new file mode 100644 index 000000000..f96585338 --- /dev/null +++ b/docs/doc_examples/dabcf0bead37cae1d3e5d2813fd3ccfe.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + query_string: { + query: 'ip_addr:"2001:db8::/48"', + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dac8ec8547bc446637fd97d9fa872f4f.asciidoc b/docs/doc_examples/dac8ec8547bc446637fd97d9fa872f4f.asciidoc new file mode 100644 index 000000000..724f4c75b --- /dev/null +++ b/docs/doc_examples/dac8ec8547bc446637fd97d9fa872f4f.asciidoc @@ -0,0 +1,82 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.putDataFrameAnalytics({ + id: "flight_prices", + source: { + index: ["kibana_sample_data_flights"], + }, + dest: { + index: "kibana_sample_flight_prices", + }, + analysis: { + regression: { + dependent_variable: "AvgTicketPrice", + num_top_feature_importance_values: 2, + feature_processors: [ + { + frequency_encoding: { + field: "DestWeather", + feature_name: "DestWeather_frequency", + frequency_map: { + Rain: 0.14604811155570188, + "Heavy Fog": 0.14604811155570188, + "Thunder & Lightning": 0.14604811155570188, + Cloudy: 0.14604811155570188, + "Damaging Wind": 0.14604811155570188, + Hail: 0.14604811155570188, + Sunny: 0.14604811155570188, + Clear: 0.14604811155570188, + }, + }, + }, + { + target_mean_encoding: { + field: "DestWeather", + feature_name: "DestWeather_targetmean", + target_map: { + Rain: 626.5588814585794, + "Heavy Fog": 626.5588814585794, + "Thunder & Lightning": 626.5588814585794, + Hail: 626.5588814585794, + "Damaging Wind": 626.5588814585794, + Cloudy: 626.5588814585794, + Clear: 626.5588814585794, + Sunny: 626.5588814585794, + }, + default_value: 624.0249512020454, + }, + }, + { + one_hot_encoding: { + field: "DestWeather", + hot_map: { + Rain: "DestWeather_Rain", + "Heavy Fog": "DestWeather_Heavy Fog", + "Thunder & Lightning": "DestWeather_Thunder & Lightning", + Cloudy: "DestWeather_Cloudy", + "Damaging Wind": "DestWeather_Damaging Wind", + Hail: "DestWeather_Hail", + Clear: "DestWeather_Clear", + Sunny: "DestWeather_Sunny", + }, + }, + }, + ], + }, + }, + analyzed_fields: { + includes: [ + "AvgTicketPrice", + "Cancelled", + "DestWeather", + "FlightDelayMin", + "DistanceMiles", + ], + }, + model_memory_limit: "30mb", +}); +console.log(response); +---- diff --git a/docs/doc_examples/dad2d4add751fde5c39475ca709cc14b.asciidoc b/docs/doc_examples/dad2d4add751fde5c39475ca709cc14b.asciidoc new file mode 100644 index 000000000..25d579fbc --- /dev/null +++ b/docs/doc_examples/dad2d4add751fde5c39475ca709cc14b.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "test", + settings: { + "index.routing.allocation.include.size": "big,medium", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dadb69a225778ecd6528924c0aa029bb.asciidoc b/docs/doc_examples/dadb69a225778ecd6528924c0aa029bb.asciidoc new file mode 100644 index 000000000..fc4d72b96 --- /dev/null +++ b/docs/doc_examples/dadb69a225778ecd6528924c0aa029bb.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "image-index", + mappings: { + properties: { + "image-vector": { + type: "dense_vector", + dims: 3, + similarity: "l2_norm", + }, + "title-vector": { + type: "dense_vector", + dims: 5, + similarity: "l2_norm", + }, + title: { + type: "text", + }, + "file-type": { + type: "keyword", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dae57cf7df18adb4dc64426eb159733a.asciidoc b/docs/doc_examples/dae57cf7df18adb4dc64426eb159733a.asciidoc new file mode 100644 index 000000000..43ab168fe --- /dev/null +++ b/docs/doc_examples/dae57cf7df18adb4dc64426eb159733a.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "latency", + size: 0, + aggs: { + load_time_outlier: { + percentiles: { + field: "load_time", + percents: [95, 99, 99.9], + hdr: { + number_of_significant_value_digits: 3, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/daf5631eba5285f1b929d5d8d8dc0d50.asciidoc b/docs/doc_examples/daf5631eba5285f1b929d5d8d8dc0d50.asciidoc new file mode 100644 index 000000000..228ba4197 --- /dev/null +++ b/docs/doc_examples/daf5631eba5285f1b929d5d8d8dc0d50.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "my_tokenizer", + }, + }, + tokenizer: { + my_tokenizer: { + type: "uax_url_email", + max_token_length: 5, + }, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.indices.analyze({ + index: "my-index-000001", + analyzer: "my_analyzer", + text: "john.smith@global-international.com", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/db19cc7a26ca80106d86d688f4be67a8.asciidoc b/docs/doc_examples/db19cc7a26ca80106d86d688f4be67a8.asciidoc new file mode 100644 index 000000000..8f0e29b61 --- /dev/null +++ b/docs/doc_examples/db19cc7a26ca80106d86d688f4be67a8.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.stopDataFrameAnalytics({ + id: "loganalytics", +}); +console.log(response); +---- diff --git a/docs/doc_examples/db6cba451ba562abe953d09ad80cc15c.asciidoc b/docs/doc_examples/db6cba451ba562abe953d09ad80cc15c.asciidoc deleted file mode 100644 index 364ce69b0..000000000 --- a/docs/doc_examples/db6cba451ba562abe953d09ad80cc15c.asciidoc +++ /dev/null @@ -1,17 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - query_string: { - query: 'city.\\*:(this AND that OR thus)' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/db773f690edf659ac9b044dc854c77eb.asciidoc b/docs/doc_examples/db773f690edf659ac9b044dc854c77eb.asciidoc new file mode 100644 index 000000000..5a4fd2320 --- /dev/null +++ b/docs/doc_examples/db773f690edf659ac9b044dc854c77eb.asciidoc @@ -0,0 +1,78 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "museums", + mappings: { + properties: { + location: { + type: "geo_point", + }, + name: { + type: "keyword", + }, + price: { + type: "long", + }, + included: { + type: "boolean", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "museums", + refresh: "true", + operations: [ + { + index: { + _id: "1", + }, + }, + { + location: "POINT (4.912350 52.374081)", + name: "NEMO Science Museum", + price: 1750, + included: true, + }, + { + index: { + _id: "2", + }, + }, + { + location: "POINT (4.901618 52.369219)", + name: "Museum Het Rembrandthuis", + price: 1500, + included: false, + }, + { + index: { + _id: "3", + }, + }, + { + location: "POINT (4.914722 52.371667)", + name: "Nederlands Scheepvaartmuseum", + price: 1650, + included: true, + }, + { + index: { + _id: "4", + }, + }, + { + location: "POINT (4.914722 52.371667)", + name: "Amsterdam Centre for Architecture", + price: 0, + included: true, + }, + ], +}); +console.log(response1); +---- diff --git a/docs/doc_examples/db8710a9793ae0817a45892d33468160.asciidoc b/docs/doc_examples/db8710a9793ae0817a45892d33468160.asciidoc new file mode 100644 index 000000000..28e18c33d --- /dev/null +++ b/docs/doc_examples/db8710a9793ae0817a45892d33468160.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.diskUsage({ + index: "my-index-000001", + run_expensive_tasks: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/db879dcf70abc4a9a14063a9a2d8d6f5.asciidoc b/docs/doc_examples/db879dcf70abc4a9a14063a9a2d8d6f5.asciidoc new file mode 100644 index 000000000..2bf5d8591 --- /dev/null +++ b/docs/doc_examples/db879dcf70abc4a9a14063a9a2d8d6f5.asciidoc @@ -0,0 +1,93 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "museums", + mappings: { + properties: { + location: { + type: "geo_point", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "museums", + refresh: "true", + operations: [ + { + index: { + _id: 1, + }, + }, + { + location: "POINT (4.912350 52.374081)", + name: "NEMO Science Museum", + }, + { + index: { + _id: 2, + }, + }, + { + location: "POINT (4.901618 52.369219)", + name: "Museum Het Rembrandthuis", + }, + { + index: { + _id: 3, + }, + }, + { + location: "POINT (4.914722 52.371667)", + name: "Nederlands Scheepvaartmuseum", + }, + { + index: { + _id: 4, + }, + }, + { + location: "POINT (4.405200 51.222900)", + name: "Letterenhuis", + }, + { + index: { + _id: 5, + }, + }, + { + location: "POINT (2.336389 48.861111)", + name: "Musée du Louvre", + }, + { + index: { + _id: 6, + }, + }, + { + location: "POINT (2.327000 48.860000)", + name: "Musée d'Orsay", + }, + ], +}); +console.log(response1); + +const response2 = await client.search({ + index: "museums", + size: 0, + aggregations: { + "large-grid": { + geohash_grid: { + field: "location", + precision: 3, + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/db9a8e3edee7c9a96ea0875fd4bbaa69.asciidoc b/docs/doc_examples/db9a8e3edee7c9a96ea0875fd4bbaa69.asciidoc new file mode 100644 index 000000000..8f8803735 --- /dev/null +++ b/docs/doc_examples/db9a8e3edee7c9a96ea0875fd4bbaa69.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.getSettings(); +console.log(response); +---- diff --git a/docs/doc_examples/dbc50b8c934171e94604575a8b36f349.asciidoc b/docs/doc_examples/dbc50b8c934171e94604575a8b36f349.asciidoc new file mode 100644 index 000000000..b69c9c9f2 --- /dev/null +++ b/docs/doc_examples/dbc50b8c934171e94604575a8b36f349.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.forcemerge({ + index: "my-index-000001", + max_num_segments: 5, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dbcd8892dd01c43d5a60c94173574faf.asciidoc b/docs/doc_examples/dbcd8892dd01c43d5a60c94173574faf.asciidoc new file mode 100644 index 000000000..f4c200d3c --- /dev/null +++ b/docs/doc_examples/dbcd8892dd01c43d5a60c94173574faf.asciidoc @@ -0,0 +1,41 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "range_index", + settings: { + number_of_shards: 2, + }, + mappings: { + properties: { + expected_attendees: { + type: "integer_range", + }, + time_frame: { + type: "date_range", + format: "yyyy-MM-dd||epoch_millis", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "range_index", + id: 1, + refresh: "true", + document: { + expected_attendees: { + gte: 10, + lte: 20, + }, + time_frame: { + gte: "2019-10-28", + lte: "2019-11-04", + }, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/dbd1b930782d34d7396fdb2db1216c0d.asciidoc b/docs/doc_examples/dbd1b930782d34d7396fdb2db1216c0d.asciidoc new file mode 100644 index 000000000..956826f86 --- /dev/null +++ b/docs/doc_examples/dbd1b930782d34d7396fdb2db1216c0d.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + ids: { + values: ["1", "4", "100"], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dbdd58cdeac9ef20b42ff73e4864e697.asciidoc b/docs/doc_examples/dbdd58cdeac9ef20b42ff73e4864e697.asciidoc new file mode 100644 index 000000000..2b96106b8 --- /dev/null +++ b/docs/doc_examples/dbdd58cdeac9ef20b42ff73e4864e697.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getFieldMapping({ + index: "_all", + fields: "*.id", +}); +console.log(response); +---- diff --git a/docs/doc_examples/dbf93d02ab86a09929a21232b19709cc.asciidoc b/docs/doc_examples/dbf93d02ab86a09929a21232b19709cc.asciidoc new file mode 100644 index 000000000..b7f768ac8 --- /dev/null +++ b/docs/doc_examples/dbf93d02ab86a09929a21232b19709cc.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.stopTrainedModelDeployment({ + model_id: "my_model_for_search", +}); +console.log(response); +---- diff --git a/docs/doc_examples/dbf9abc37899352751dab0ede62af2fd.asciidoc b/docs/doc_examples/dbf9abc37899352751dab0ede62af2fd.asciidoc new file mode 100644 index 000000000..9b11a1f56 --- /dev/null +++ b/docs/doc_examples/dbf9abc37899352751dab0ede62af2fd.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.invalidateToken({ + token: + "dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==", +}); +console.log(response); +---- diff --git a/docs/doc_examples/dc15e2373e5ecbe09b4ea0858eb63d47.asciidoc b/docs/doc_examples/dc15e2373e5ecbe09b4ea0858eb63d47.asciidoc deleted file mode 100644 index f2d26605b..000000000 --- a/docs/doc_examples/dc15e2373e5ecbe09b4ea0858eb63d47.asciidoc +++ /dev/null @@ -1,38 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - aggs: { - countries: { - terms: { - field: 'artist.country', - order: { - 'rock>playback_stats.avg': 'desc' - } - }, - aggs: { - rock: { - filter: { - term: { - genre: 'rock' - } - }, - aggs: { - playback_stats: { - stats: { - field: 'play_count' - } - } - } - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/dc33160f4087443f867080a8f5b2cfbd.asciidoc b/docs/doc_examples/dc33160f4087443f867080a8f5b2cfbd.asciidoc new file mode 100644 index 000000000..cb0e48671 --- /dev/null +++ b/docs/doc_examples/dc33160f4087443f867080a8f5b2cfbd.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.esql.query({ + format: "json", + query: + "\n FROM library\n | KEEP author, name, page_count, release_date\n | SORT page_count DESC\n | LIMIT 5\n ", + columnar: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dc3b7603e7d688106acb804059af7834.asciidoc b/docs/doc_examples/dc3b7603e7d688106acb804059af7834.asciidoc new file mode 100644 index 000000000..9d2cf81d9 --- /dev/null +++ b/docs/doc_examples/dc3b7603e7d688106acb804059af7834.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + _source: false, + query: { + match: { + "user.id": "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dc468865da947b4a9136a5b92878d918.asciidoc b/docs/doc_examples/dc468865da947b4a9136a5b92878d918.asciidoc new file mode 100644 index 000000000..30cdb1f6b --- /dev/null +++ b/docs/doc_examples/dc468865da947b4a9136a5b92878d918.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.createApiKey({ + name: "my-other-api-key", + metadata: { + application: "my-application", + environment: { + level: 2, + trusted: true, + tags: ["dev", "staging"], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dc4dcfeae8a5f248639335c2c9809549.asciidoc b/docs/doc_examples/dc4dcfeae8a5f248639335c2c9809549.asciidoc new file mode 100644 index 000000000..32844b898 --- /dev/null +++ b/docs/doc_examples/dc4dcfeae8a5f248639335c2c9809549.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "path_hierarchy", + text: "/one/two/three", +}); +console.log(response); +---- diff --git a/docs/doc_examples/dc8c94c9bef1f879282caea5c406f36e.asciidoc b/docs/doc_examples/dc8c94c9bef1f879282caea5c406f36e.asciidoc new file mode 100644 index 000000000..dc8a7c3f0 --- /dev/null +++ b/docs/doc_examples/dc8c94c9bef1f879282caea5c406f36e.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "keyword", + filter: ["lowercase"], + char_filter: ["html_strip"], + text: "this is a test", +}); +console.log(response); +---- diff --git a/docs/doc_examples/dcc02ad69da0a5aa10c4e53b34be8ec0.asciidoc b/docs/doc_examples/dcc02ad69da0a5aa10c4e53b34be8ec0.asciidoc new file mode 100644 index 000000000..ff1616147 --- /dev/null +++ b/docs/doc_examples/dcc02ad69da0a5aa10c4e53b34be8ec0.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.mget({ + docs: [ + { + _index: "my-index-000001", + _id: "1", + }, + { + _index: "my-index-000001", + _id: "2", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/dcee24dba43050e4b01b6e3a3211ce09.asciidoc b/docs/doc_examples/dcee24dba43050e4b01b6e3a3211ce09.asciidoc new file mode 100644 index 000000000..6e3b6b17d --- /dev/null +++ b/docs/doc_examples/dcee24dba43050e4b01b6e3a3211ce09.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + "@timestamp": { + format: "strict_date_optional_time||epoch_second", + type: "date", + }, + message: { + type: "wildcard", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dcf82f3aacae49c0bb4ccbc673f13e9f.asciidoc b/docs/doc_examples/dcf82f3aacae49c0bb4ccbc673f13e9f.asciidoc new file mode 100644 index 000000000..384d005bd --- /dev/null +++ b/docs/doc_examples/dcf82f3aacae49c0bb4ccbc673f13e9f.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + size: 10, + query: { + script_score: { + query: { + knn: { + query_vector: [0.04283529, 0.85670587, -0.51402352, 0], + field: "my_int4_vector", + num_candidates: 20, + }, + }, + script: { + source: "(dotProduct(params.queryVector, 'my_int4_vector') + 1.0)", + params: { + queryVector: [0.04283529, 0.85670587, -0.51402352, 0], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dcfa7f479a33f459a2d222a92e651451.asciidoc b/docs/doc_examples/dcfa7f479a33f459a2d222a92e651451.asciidoc new file mode 100644 index 000000000..1287912d5 --- /dev/null +++ b/docs/doc_examples/dcfa7f479a33f459a2d222a92e651451.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "my_admin_role", + description: + "Grants full access to all management features within the cluster.", + cluster: ["all"], + indices: [ + { + names: ["index1", "index2"], + privileges: ["all"], + field_security: { + grant: ["title", "body"], + }, + query: '{"match": {"title": "foo"}}', + }, + ], + applications: [ + { + application: "myapp", + privileges: ["admin", "read"], + resources: ["*"], + }, + ], + run_as: ["other_user"], + metadata: { + version: 1, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dd0b196a099e1cca08c5ce4dd74e935a.asciidoc b/docs/doc_examples/dd0b196a099e1cca08c5ce4dd74e935a.asciidoc new file mode 100644 index 000000000..45ed76e36 --- /dev/null +++ b/docs/doc_examples/dd0b196a099e1cca08c5ce4dd74e935a.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.putWatch({ + id: "cluster_health_watch", + trigger: { + schedule: { + interval: "10s", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dd16c9c981551c9da47ebb5ef5105fa0.asciidoc b/docs/doc_examples/dd16c9c981551c9da47ebb5ef5105fa0.asciidoc new file mode 100644 index 000000000..c00660b74 --- /dev/null +++ b/docs/doc_examples/dd16c9c981551c9da47ebb5ef5105fa0.asciidoc @@ -0,0 +1,57 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.updateAliases({ + actions: [ + { + add: { + index: ".reindexed-v9-ml-anomalies-custom-example", + alias: ".ml-anomalies-example1", + filter: { + term: { + job_id: { + value: "example1", + }, + }, + }, + is_hidden: true, + }, + }, + { + add: { + index: ".reindexed-v9-ml-anomalies-custom-example", + alias: ".ml-anomalies-example2", + filter: { + term: { + job_id: { + value: "example2", + }, + }, + }, + is_hidden: true, + }, + }, + { + remove: { + index: ".ml-anomalies-custom-example", + aliases: ".ml-anomalies-*", + }, + }, + { + remove_index: { + index: ".ml-anomalies-custom-example", + }, + }, + { + add: { + index: ".reindexed-v9-ml-anomalies-custom-example", + alias: ".ml-anomalies-custom-example", + is_hidden: true, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/dd1a25d821d0c8deaeaa9c8083152a54.asciidoc b/docs/doc_examples/dd1a25d821d0c8deaeaa9c8083152a54.asciidoc new file mode 100644 index 000000000..4114ec4f6 --- /dev/null +++ b/docs/doc_examples/dd1a25d821d0c8deaeaa9c8083152a54.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.processorGrok({ + s: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/dd3b263e9fa4226e59bedfc957d399d2.asciidoc b/docs/doc_examples/dd3b263e9fa4226e59bedfc957d399d2.asciidoc new file mode 100644 index 000000000..9b43b5804 --- /dev/null +++ b/docs/doc_examples/dd3b263e9fa4226e59bedfc957d399d2.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.query({ + format: "txt", + query: "SELECT * FROM library WHERE release_date < '2000-01-01'", +}); +console.log(response); +---- diff --git a/docs/doc_examples/dd3ee00ab2af607b32532180d60a41d4.asciidoc b/docs/doc_examples/dd3ee00ab2af607b32532180d60a41d4.asciidoc new file mode 100644 index 000000000..ad777029c --- /dev/null +++ b/docs/doc_examples/dd3ee00ab2af607b32532180d60a41d4.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "standard", + filter: ["lowercase", "my_snow"], + }, + }, + filter: { + my_snow: { + type: "snowball", + language: "English", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dd4f051ab62f0507e3b6e3d6f333e85f.asciidoc b/docs/doc_examples/dd4f051ab62f0507e3b6e3d6f333e85f.asciidoc new file mode 100644 index 000000000..0ff322366 --- /dev/null +++ b/docs/doc_examples/dd4f051ab62f0507e3b6e3d6f333e85f.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.getComponentTemplate(); +console.log(response); +---- diff --git a/docs/doc_examples/dd71b0c9f9197684ff29c61062c55660.asciidoc b/docs/doc_examples/dd71b0c9f9197684ff29c61062c55660.asciidoc new file mode 100644 index 000000000..ff630da8a --- /dev/null +++ b/docs/doc_examples/dd71b0c9f9197684ff29c61062c55660.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getSettings(); +console.log(response); +---- diff --git a/docs/doc_examples/dd7814258121d3c2e576a7f00469d7e3.asciidoc b/docs/doc_examples/dd7814258121d3c2e576a7f00469d7e3.asciidoc new file mode 100644 index 000000000..da4ac42b8 --- /dev/null +++ b/docs/doc_examples/dd7814258121d3c2e576a7f00469d7e3.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "mistral_embeddings_pipeline", + processors: [ + { + inference: { + model_id: "mistral_embeddings", + input_output: { + input_field: "content", + output_field: "content_embedding", + }, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/dd792bb53703a57f9207e36d16e26255.asciidoc b/docs/doc_examples/dd792bb53703a57f9207e36d16e26255.asciidoc new file mode 100644 index 000000000..9623daabc --- /dev/null +++ b/docs/doc_examples/dd792bb53703a57f9207e36d16e26255.asciidoc @@ -0,0 +1,67 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "my-index-000001", + refresh: "true", + operations: [ + { + index: {}, + }, + { + timestamp: 1516729294000, + temperature: 200, + voltage: 5.2, + node: "a", + }, + { + index: {}, + }, + { + timestamp: 1516642894000, + temperature: 201, + voltage: 5.8, + node: "b", + }, + { + index: {}, + }, + { + timestamp: 1516556494000, + temperature: 202, + voltage: 5.1, + node: "a", + }, + { + index: {}, + }, + { + timestamp: 1516470094000, + temperature: 198, + voltage: 5.6, + node: "b", + }, + { + index: {}, + }, + { + timestamp: 1516383694000, + temperature: 200, + voltage: 4.2, + node: "c", + }, + { + index: {}, + }, + { + timestamp: 1516297294000, + temperature: 202, + voltage: 4, + node: "c", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/dda949d20d07a9edbe64cefc623df945.asciidoc b/docs/doc_examples/dda949d20d07a9edbe64cefc623df945.asciidoc new file mode 100644 index 000000000..695caaf60 --- /dev/null +++ b/docs/doc_examples/dda949d20d07a9edbe64cefc623df945.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my_test_scores", + properties: { + total_score: { + type: "long", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ddcfa47381d47078dbec651e31b69949.asciidoc b/docs/doc_examples/ddcfa47381d47078dbec651e31b69949.asciidoc new file mode 100644 index 000000000..984c274da --- /dev/null +++ b/docs/doc_examples/ddcfa47381d47078dbec651e31b69949.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-data-stream", + query: + '\n library where process.name == "regsvr32.exe" and dll.name == "scrobj.dll"\n ', +}); +console.log(response); +---- diff --git a/docs/doc_examples/dddb6a6ebd145f8411c5b4910d332f87.asciidoc b/docs/doc_examples/dddb6a6ebd145f8411c5b4910d332f87.asciidoc new file mode 100644 index 000000000..e8049b2cb --- /dev/null +++ b/docs/doc_examples/dddb6a6ebd145f8411c5b4910d332f87.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.esql.query({ + query: "FROM mv | EVAL b + 2, a + b | LIMIT 4", +}); +console.log(response); +---- diff --git a/docs/doc_examples/dde283eab92608e7bfbfa09c6482a12e.asciidoc b/docs/doc_examples/dde283eab92608e7bfbfa09c6482a12e.asciidoc new file mode 100644 index 000000000..eb773d5b0 --- /dev/null +++ b/docs/doc_examples/dde283eab92608e7bfbfa09c6482a12e.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.invalidateApiKey({ + realm_name: "native1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/dde92fdf3469349ffe2c81764333543a.asciidoc b/docs/doc_examples/dde92fdf3469349ffe2c81764333543a.asciidoc new file mode 100644 index 000000000..d4834c92a --- /dev/null +++ b/docs/doc_examples/dde92fdf3469349ffe2c81764333543a.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.createFrom({ + source: "my-index", + dest: "my-new-index", + create_from: { + remove_index_blocks: false, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ddf375e4b6175d830fa4097ea0b41536.asciidoc b/docs/doc_examples/ddf375e4b6175d830fa4097ea0b41536.asciidoc new file mode 100644 index 000000000..0209a5c00 --- /dev/null +++ b/docs/doc_examples/ddf375e4b6175d830fa4097ea0b41536.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "DELETE", + path: "/_internal/desired_nodes", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ddf56782ecc7eaeb3115e150c4830013.asciidoc b/docs/doc_examples/ddf56782ecc7eaeb3115e150c4830013.asciidoc new file mode 100644 index 000000000..261dbdf37 --- /dev/null +++ b/docs/doc_examples/ddf56782ecc7eaeb3115e150c4830013.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.updateByQuery({ + index: "my-index-000001", + slice: { + id: 0, + max: 2, + }, + script: { + source: "ctx._source['extra'] = 'test'", + }, +}); +console.log(response); + +const response1 = await client.updateByQuery({ + index: "my-index-000001", + slice: { + id: 1, + max: 2, + }, + script: { + source: "ctx._source['extra'] = 'test'", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/de139866a220124360e5e27d1a736ea4.asciidoc b/docs/doc_examples/de139866a220124360e5e27d1a736ea4.asciidoc index 3d0a76dd4..1318fe886 100644 --- a/docs/doc_examples/de139866a220124360e5e27d1a736ea4.asciidoc +++ b/docs/doc_examples/de139866a220124360e5e27d1a736ea4.asciidoc @@ -4,30 +4,27 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - term: { - product: 'chocolate' - } + query: { + term: { + product: "chocolate", }, - sort: [ - { - 'offer.price': { - mode: 'avg', - order: 'asc', - nested: { - path: 'offer', - filter: { - term: { - 'offer.color': 'blue' - } - } - } - } - } - ] - } -}) -console.log(response) + }, + sort: [ + { + "offer.price": { + mode: "avg", + order: "asc", + nested: { + path: "offer", + filter: { + term: { + "offer.color": "blue", + }, + }, + }, + }, + }, + ], +}); +console.log(response); ---- - diff --git a/docs/doc_examples/de176bc4788ea286fff9e92418a43ea8.asciidoc b/docs/doc_examples/de176bc4788ea286fff9e92418a43ea8.asciidoc deleted file mode 100644 index 995280ef7..000000000 --- a/docs/doc_examples/de176bc4788ea286fff9e92418a43ea8.asciidoc +++ /dev/null @@ -1,35 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response0 = await client.indices.create({ - index: 'test' -}) -console.log(response0) - -const response1 = await client.indices.create({ - index: 'test_2' -}) -console.log(response1) - -const response2 = await client.indices.updateAliases({ - body: { - actions: [ - { - add: { - index: 'test_2', - alias: 'test' - } - }, - { - remove_index: { - index: 'test' - } - } - ] - } -}) -console.log(response2) ----- - diff --git a/docs/doc_examples/de2f59887737de3a27716177b60393a2.asciidoc b/docs/doc_examples/de2f59887737de3a27716177b60393a2.asciidoc new file mode 100644 index 000000000..e93b1c272 --- /dev/null +++ b/docs/doc_examples/de2f59887737de3a27716177b60393a2.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + index: "analyze_sample", + field: "obj1.field1", + text: "this is a test", +}); +console.log(response); +---- diff --git a/docs/doc_examples/de876505acc75d371d1f6f484c449197.asciidoc b/docs/doc_examples/de876505acc75d371d1f6f484c449197.asciidoc new file mode 100644 index 000000000..373b13fdb --- /dev/null +++ b/docs/doc_examples/de876505acc75d371d1f6f484c449197.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "test", + settings: { + "index.write.wait_for_active_shards": "2", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/de90249caeac6f1601a7e7e9f98f1bec.asciidoc b/docs/doc_examples/de90249caeac6f1601a7e7e9f98f1bec.asciidoc new file mode 100644 index 000000000..6cb361f3e --- /dev/null +++ b/docs/doc_examples/de90249caeac6f1601a7e7e9f98f1bec.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.queryApiKeys({ + with_limited_by: "true", + query: { + ids: { + values: ["VuaCfGcBCdbkQm-e5aOx"], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dea22bb4997e368950f0fc80f2a5f304.asciidoc b/docs/doc_examples/dea22bb4997e368950f0fc80f2a5f304.asciidoc new file mode 100644 index 000000000..789ac12d6 --- /dev/null +++ b/docs/doc_examples/dea22bb4997e368950f0fc80f2a5f304.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getFieldMapping({ + index: "my-index-000001", + fields: "employee-id", +}); +console.log(response); +---- diff --git a/docs/doc_examples/dea4ac54c63a10c62eccd7b7f6543b86.asciidoc b/docs/doc_examples/dea4ac54c63a10c62eccd7b7f6543b86.asciidoc new file mode 100644 index 000000000..3ae915d08 --- /dev/null +++ b/docs/doc_examples/dea4ac54c63a10c62eccd7b7f6543b86.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "place", + id: 1, + document: { + suggest: { + input: ["timmy's", "starbucks", "dunkin donuts"], + contexts: { + place_type: ["cafe", "food"], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dead0682932ea6ec33c1197017bcb209.asciidoc b/docs/doc_examples/dead0682932ea6ec33c1197017bcb209.asciidoc new file mode 100644 index 000000000..0cfd8d93c --- /dev/null +++ b/docs/doc_examples/dead0682932ea6ec33c1197017bcb209.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_locations", + query: { + bool: { + must: { + match_all: {}, + }, + filter: { + geo_bounding_box: { + "pin.location": { + top_left: "dr5r9ydj2y73", + bottom_right: "drj7teegpus6", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dec2af498a7e5892e8fcd09ae779c8f0.asciidoc b/docs/doc_examples/dec2af498a7e5892e8fcd09ae779c8f0.asciidoc new file mode 100644 index 000000000..e5d6dd96c --- /dev/null +++ b/docs/doc_examples/dec2af498a7e5892e8fcd09ae779c8f0.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "ip_addresses", + size: 0, + aggs: { + ip_ranges: { + ip_range: { + field: "ip", + ranges: [ + { + mask: "10.0.0.0/25", + }, + { + mask: "10.0.0.127/25", + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dee3023098d9e63aa9e113beea5686da.asciidoc b/docs/doc_examples/dee3023098d9e63aa9e113beea5686da.asciidoc new file mode 100644 index 000000000..cf5b294bf --- /dev/null +++ b/docs/doc_examples/dee3023098d9e63aa9e113beea5686da.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.put({ + name: "my_search_application", + search_application: { + indices: ["index1"], + template: { + script: { + lang: "mustache", + source: + '\n {\n "knn": {\n "field": "{{knn_field}}",\n "query_vector": {{#toJson}}query_vector{{/toJson}},\n "k": "{{k}}",\n "num_candidates": {{num_candidates}}\n },\n "fields": {{#toJson}}fields{{/toJson}}\n }\n ', + params: { + knn_field: "image-vector", + query_vector: [], + k: 10, + num_candidates: 100, + fields: ["title", "file-type"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/df04e2e9af66d5e30b1bfdbd458cab13.asciidoc b/docs/doc_examples/df04e2e9af66d5e30b1bfdbd458cab13.asciidoc new file mode 100644 index 000000000..3d7b78f05 --- /dev/null +++ b/docs/doc_examples/df04e2e9af66d5e30b1bfdbd458cab13.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.nodes({ + v: "true", + h: "heap.max", +}); +console.log(response); +---- diff --git a/docs/doc_examples/df0d27d3abd286b75aef7ddcf0e6c66c.asciidoc b/docs/doc_examples/df0d27d3abd286b75aef7ddcf0e6c66c.asciidoc new file mode 100644 index 000000000..fcf3cf69f --- /dev/null +++ b/docs/doc_examples/df0d27d3abd286b75aef7ddcf0e6c66c.asciidoc @@ -0,0 +1,38 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + index: { + analysis: { + analyzer: { + my_synonyms: { + tokenizer: "whitespace", + filter: ["synonym"], + }, + }, + filter: { + synonym: { + type: "synonym_graph", + synonyms_path: "analysis/synonym.txt", + updateable: true, + }, + }, + }, + }, + }, + mappings: { + properties: { + text: { + type: "text", + analyzer: "standard", + search_analyzer: "my_synonyms", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/df103a3df9b353357e72f9180ef421a1.asciidoc b/docs/doc_examples/df103a3df9b353357e72f9180ef421a1.asciidoc new file mode 100644 index 000000000..af7aa30b7 --- /dev/null +++ b/docs/doc_examples/df103a3df9b353357e72f9180ef421a1.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + genres: { + rare_terms: { + field: "genre", + include: "swi*", + exclude: "electro*", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/df1336e768fb6fc1826a5afa30a57285.asciidoc b/docs/doc_examples/df1336e768fb6fc1826a5afa30a57285.asciidoc new file mode 100644 index 000000000..7d322eefb --- /dev/null +++ b/docs/doc_examples/df1336e768fb6fc1826a5afa30a57285.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-data-stream", + document: { + "@timestamp": "2099-03-08T11:06:07.000Z", + user: { + id: "8a4f500d", + }, + message: "Login successful", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/df17f920b0deab3529b98df88b781f55.asciidoc b/docs/doc_examples/df17f920b0deab3529b98df88b781f55.asciidoc deleted file mode 100644 index b3e06489a..000000000 --- a/docs/doc_examples/df17f920b0deab3529b98df88b781f55.asciidoc +++ /dev/null @@ -1,40 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - function_score: { - functions: [ - { - gauss: { - price: { - origin: '0', - scale: '20' - } - } - }, - { - gauss: { - location: { - origin: '11, 12', - scale: '2km' - } - } - } - ], - query: { - match: { - properties: 'balcony' - } - }, - score_mode: 'multiply' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/df34c8ebaaa59a3ee0e3f28e2443bc30.asciidoc b/docs/doc_examples/df34c8ebaaa59a3ee0e3f28e2443bc30.asciidoc new file mode 100644 index 000000000..0c820eea2 --- /dev/null +++ b/docs/doc_examples/df34c8ebaaa59a3ee0e3f28e2443bc30.asciidoc @@ -0,0 +1,83 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index", + mappings: { + properties: { + comments: { + type: "nested", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index", + id: 1, + refresh: "true", + document: { + comments: [ + { + author: "kimchy", + }, + ], + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "my-index", + id: 2, + refresh: "true", + document: { + comments: [ + { + author: "kimchy", + }, + { + author: "nik9000", + }, + ], + }, +}); +console.log(response2); + +const response3 = await client.index({ + index: "my-index", + id: 3, + refresh: "true", + document: { + comments: [ + { + author: "nik9000", + }, + ], + }, +}); +console.log(response3); + +const response4 = await client.search({ + index: "my-index", + query: { + nested: { + path: "comments", + query: { + bool: { + must_not: [ + { + term: { + "comments.author": "nik9000", + }, + }, + ], + }, + }, + }, + }, +}); +console.log(response4); +---- diff --git a/docs/doc_examples/df7dbac966b67404b8bfa9cdda5ef480.asciidoc b/docs/doc_examples/df7dbac966b67404b8bfa9cdda5ef480.asciidoc new file mode 100644 index 000000000..d6506f70e --- /dev/null +++ b/docs/doc_examples/df7dbac966b67404b8bfa9cdda5ef480.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.ackWatch({ + watch_id: "my_watch", +}); +console.log(response); +---- diff --git a/docs/doc_examples/df7ed126d8c92ddd3655c59ce4f305c9.asciidoc b/docs/doc_examples/df7ed126d8c92ddd3655c59ce4f305c9.asciidoc new file mode 100644 index 000000000..924b9da1c --- /dev/null +++ b/docs/doc_examples/df7ed126d8c92ddd3655c59ce4f305c9.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.threadPool({ + thread_pool_patterns: "generic", + v: "true", + h: "id,name,active,rejected,completed", +}); +console.log(response); +---- diff --git a/docs/doc_examples/df81b88a2192dd6f9912e0c948a44487.asciidoc b/docs/doc_examples/df81b88a2192dd6f9912e0c948a44487.asciidoc new file mode 100644 index 000000000..d4a4521d5 --- /dev/null +++ b/docs/doc_examples/df81b88a2192dd6f9912e0c948a44487.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "sparse_embedding", + inference_id: "elser_embeddings", + inference_config: { + service: "elasticsearch", + service_settings: { + num_allocations: 1, + num_threads: 1, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/df82a9cb21a7557f3ddba2509f76f608.asciidoc b/docs/doc_examples/df82a9cb21a7557f3ddba2509f76f608.asciidoc new file mode 100644 index 000000000..de3179aa1 --- /dev/null +++ b/docs/doc_examples/df82a9cb21a7557f3ddba2509f76f608.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "whitespace", + filter: ["fingerprint"], + text: "zebra jumps over resting resting dog", +}); +console.log(response); +---- diff --git a/docs/doc_examples/dfa16b7300d225e013f23625f44c087b.asciidoc b/docs/doc_examples/dfa16b7300d225e013f23625f44c087b.asciidoc new file mode 100644 index 000000000..90ef0a673 --- /dev/null +++ b/docs/doc_examples/dfa16b7300d225e013f23625f44c087b.asciidoc @@ -0,0 +1,65 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "index", + settings: { + number_of_shards: 1, + similarity: { + scripted_tfidf: { + type: "scripted", + script: { + source: + "double tf = Math.sqrt(doc.freq); double idf = Math.log((field.docCount+1.0)/(term.docFreq+1.0)) + 1.0; double norm = 1/Math.sqrt(doc.length); return query.boost * tf * idf * norm;", + }, + }, + }, + }, + mappings: { + properties: { + field: { + type: "text", + similarity: "scripted_tfidf", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "index", + id: 1, + document: { + field: "foo bar foo", + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "index", + id: 2, + document: { + field: "bar baz", + }, +}); +console.log(response2); + +const response3 = await client.indices.refresh({ + index: "index", +}); +console.log(response3); + +const response4 = await client.search({ + index: "index", + explain: "true", + query: { + query_string: { + query: "foo^1.7", + default_field: "field", + }, + }, +}); +console.log(response4); +---- diff --git a/docs/doc_examples/dfa75000edf4b960ed9002595a051871.asciidoc b/docs/doc_examples/dfa75000edf4b960ed9002595a051871.asciidoc new file mode 100644 index 000000000..8e62c4d26 --- /dev/null +++ b/docs/doc_examples/dfa75000edf4b960ed9002595a051871.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.stop(); +console.log(response); +---- diff --git a/docs/doc_examples/dfac8d098b50aa0181161bcd17b38ef4.asciidoc b/docs/doc_examples/dfac8d098b50aa0181161bcd17b38ef4.asciidoc deleted file mode 100644 index 614fea4b9..000000000 --- a/docs/doc_examples/dfac8d098b50aa0181161bcd17b38ef4.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.putSettings({ - index: 'twitter', - body: { - index: { - refresh_interval: '-1' - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/dfb1fe96d806a644214d06f9b4b87878.asciidoc b/docs/doc_examples/dfb1fe96d806a644214d06f9b4b87878.asciidoc deleted file mode 100644 index 3cc9e1ed8..000000000 --- a/docs/doc_examples/dfb1fe96d806a644214d06f9b4b87878.asciidoc +++ /dev/null @@ -1,19 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.deleteByQuery({ - index: 'twitter', - scroll_size: '5000', - body: { - query: { - term: { - user: 'kimchy' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/dfb20907cfc5ac520ea3b1dba5f00811.asciidoc b/docs/doc_examples/dfb20907cfc5ac520ea3b1dba5f00811.asciidoc new file mode 100644 index 000000000..04facbcb5 --- /dev/null +++ b/docs/doc_examples/dfb20907cfc5ac520ea3b1dba5f00811.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: ".watcher-history*", + sort: [ + { + "result.execution_time": "desc", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/dfb641d2d3155669ad6fb5a424dabf4f.asciidoc b/docs/doc_examples/dfb641d2d3155669ad6fb5a424dabf4f.asciidoc new file mode 100644 index 000000000..aca2d7530 --- /dev/null +++ b/docs/doc_examples/dfb641d2d3155669ad6fb5a424dabf4f.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.getStatus(); +console.log(response); +---- diff --git a/docs/doc_examples/dfbf53781adc6640493d49931a352167.asciidoc b/docs/doc_examples/dfbf53781adc6640493d49931a352167.asciidoc new file mode 100644 index 000000000..94e9cf226 --- /dev/null +++ b/docs/doc_examples/dfbf53781adc6640493d49931a352167.asciidoc @@ -0,0 +1,45 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + enabled: false, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: "session_1", + document: { + user_id: "kimchy", + session_data: { + arbitrary_object: { + some_array: [ + "foo", + "bar", + { + baz: 2, + }, + ], + }, + }, + last_updated: "2015-12-06T18:20:22", + }, +}); +console.log(response1); + +const response2 = await client.get({ + index: "my-index-000001", + id: "session_1", +}); +console.log(response2); + +const response3 = await client.indices.getMapping({ + index: "my-index-000001", +}); +console.log(response3); +---- diff --git a/docs/doc_examples/dfcc83efefaddccfe5dce0695c2266ef.asciidoc b/docs/doc_examples/dfcc83efefaddccfe5dce0695c2266ef.asciidoc new file mode 100644 index 000000000..1486e6f12 --- /dev/null +++ b/docs/doc_examples/dfcc83efefaddccfe5dce0695c2266ef.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + obj1: { + type: "nested", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dfcdcd3ea6753dcc391a4a52cf640527.asciidoc b/docs/doc_examples/dfcdcd3ea6753dcc391a4a52cf640527.asciidoc new file mode 100644 index 000000000..fd702383f --- /dev/null +++ b/docs/doc_examples/dfcdcd3ea6753dcc391a4a52cf640527.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "PUT", + path: "/_internal/desired_nodes/Ywkh3INLQcuPT49f6kcppA/101", + body: { + nodes: [ + { + settings: { + "node.name": "instance-000187", + "node.external_id": "instance-000187", + "node.roles": ["data_hot", "master"], + "node.attr.data": "hot", + "node.attr.logical_availability_zone": "zone-0", + }, + processors_range: { + min: 8, + max: 10, + }, + memory: "58gb", + storage: "2tb", + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dfce1be1d035aff0b8fdf4a8839f7795.asciidoc b/docs/doc_examples/dfce1be1d035aff0b8fdf4a8839f7795.asciidoc new file mode 100644 index 000000000..c46ec2537 --- /dev/null +++ b/docs/doc_examples/dfce1be1d035aff0b8fdf4a8839f7795.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.updateTrainedModelDeployment({ + model_id: "elastic__distilbert-base-uncased-finetuned-conll03-english", + adaptive_allocations: { + enabled: true, + min_number_of_allocations: 3, + max_number_of_allocations: 10, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dfdf82b8d99436582f150117695190b3.asciidoc b/docs/doc_examples/dfdf82b8d99436582f150117695190b3.asciidoc new file mode 100644 index 000000000..8d4d07d06 --- /dev/null +++ b/docs/doc_examples/dfdf82b8d99436582f150117695190b3.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "child_example", + id: 1, + document: { + join: { + name: "question", + }, + body: "I have Windows 2003 server and i bought a new Windows 2008 server...", + title: + "Whats the best way to file transfer my site from server to a newer one?", + tags: ["windows-server-2003", "windows-server-2008", "file-transfer"], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/dfef545b1e2c247bafd1347e8e807ac1.asciidoc b/docs/doc_examples/dfef545b1e2c247bafd1347e8e807ac1.asciidoc deleted file mode 100644 index e05ce2602..000000000 --- a/docs/doc_examples/dfef545b1e2c247bafd1347e8e807ac1.asciidoc +++ /dev/null @@ -1,23 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'test', - body: { - settings: { - number_of_shards: 1 - }, - mappings: { - properties: { - field1: { - type: 'text' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/dff61a76d5ef9ca8cbe59a416269a84b.asciidoc b/docs/doc_examples/dff61a76d5ef9ca8cbe59a416269a84b.asciidoc new file mode 100644 index 000000000..f3a92dcab --- /dev/null +++ b/docs/doc_examples/dff61a76d5ef9ca8cbe59a416269a84b.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.deletePipeline({ + id: "my-pipeline-id", +}); +console.log(response); +---- diff --git a/docs/doc_examples/dffbbdc4025e5777c647d8818847b960.asciidoc b/docs/doc_examples/dffbbdc4025e5777c647d8818847b960.asciidoc new file mode 100644 index 000000000..d6249f4cf --- /dev/null +++ b/docs/doc_examples/dffbbdc4025e5777c647d8818847b960.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getApiKey({ + id: "VuaCfGcBCdbkQm-e5aOx", + owner: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e017c2de6f93a8dd97f5c6e002dd5c4f.asciidoc b/docs/doc_examples/e017c2de6f93a8dd97f5c6e002dd5c4f.asciidoc new file mode 100644 index 000000000..153d1a4ff --- /dev/null +++ b/docs/doc_examples/e017c2de6f93a8dd97f5c6e002dd5c4f.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.postCalendarEvents({ + calendar_id: "dst-germany", + events: [ + { + description: "Fall 2024", + start_time: 1729994400000, + end_time: 1730167200000, + skip_result: false, + skip_model_update: false, + force_time_shift: -3600, + }, + { + description: "Spring 2025", + start_time: 1743296400000, + end_time: 1743469200000, + skip_result: false, + skip_model_update: false, + force_time_shift: 3600, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/e04267ffc50d916800b919c6cdc9622a.asciidoc b/docs/doc_examples/e04267ffc50d916800b919c6cdc9622a.asciidoc new file mode 100644 index 000000000..5893b52d1 --- /dev/null +++ b/docs/doc_examples/e04267ffc50d916800b919c6cdc9622a.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + "index.mapping.ignore_above": 256, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e0734215054e1ff5df712ce3a826cdba.asciidoc b/docs/doc_examples/e0734215054e1ff5df712ce3a826cdba.asciidoc new file mode 100644 index 000000000..926699d94 --- /dev/null +++ b/docs/doc_examples/e0734215054e1ff5df712ce3a826cdba.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.delete({ + index: "my-index", +}); +console.log(response); + +const response1 = await client.indices.deleteDataStream({ + name: "logs-my_app-default", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/e08fb1435dc659c24badf25b676efb68.asciidoc b/docs/doc_examples/e08fb1435dc659c24badf25b676efb68.asciidoc new file mode 100644 index 000000000..0a05a4f32 --- /dev/null +++ b/docs/doc_examples/e08fb1435dc659c24badf25b676efb68.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + body_text: { + type: "text", + index_prefixes: {}, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e095fc96504efecc588f97673912e3d3.asciidoc b/docs/doc_examples/e095fc96504efecc588f97673912e3d3.asciidoc new file mode 100644 index 000000000..a4c8438a0 --- /dev/null +++ b/docs/doc_examples/e095fc96504efecc588f97673912e3d3.asciidoc @@ -0,0 +1,54 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.putJob({ + job_id: "test-job1", + pretty: "true", + analysis_config: { + bucket_span: "15m", + detectors: [ + { + detector_description: "Sum of bytes", + function: "sum", + field_name: "bytes", + }, + ], + }, + data_description: { + time_field: "timestamp", + time_format: "epoch_ms", + }, + analysis_limits: { + model_memory_limit: "11MB", + }, + model_plot_config: { + enabled: true, + annotations_enabled: true, + }, + results_index_name: "test-job1", + datafeed_config: { + indices: ["kibana_sample_data_logs"], + query: { + bool: { + must: [ + { + match_all: {}, + }, + ], + }, + }, + runtime_mappings: { + hour_of_day: { + type: "long", + script: { + source: "emit(doc['timestamp'].value.getHour());", + }, + }, + }, + datafeed_id: "datafeed-test-job1", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e09d30195108bd6a1f6857394a6123ea.asciidoc b/docs/doc_examples/e09d30195108bd6a1f6857394a6123ea.asciidoc new file mode 100644 index 000000000..b6d3ff1e0 --- /dev/null +++ b/docs/doc_examples/e09d30195108bd6a1f6857394a6123ea.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: ["reverse"], + text: "quick fox jumps", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e09ee13ce253c7892dd5ef076fbfbba5.asciidoc b/docs/doc_examples/e09ee13ce253c7892dd5ef076fbfbba5.asciidoc new file mode 100644 index 000000000..69b1d067e --- /dev/null +++ b/docs/doc_examples/e09ee13ce253c7892dd5ef076fbfbba5.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_custom_analyzer: { + tokenizer: "standard", + filter: ["keyword_repeat", "stemmer", "remove_duplicates"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e0a7c730ef0f22e3edffe9a254bc56e7.asciidoc b/docs/doc_examples/e0a7c730ef0f22e3edffe9a254bc56e7.asciidoc new file mode 100644 index 000000000..fa26514b1 --- /dev/null +++ b/docs/doc_examples/e0a7c730ef0f22e3edffe9a254bc56e7.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + source: { + index: "my-index-000001", + slice: { + id: 0, + max: 2, + }, + }, + dest: { + index: "my-new-index-000001", + }, +}); +console.log(response); + +const response1 = await client.reindex({ + source: { + index: "my-index-000001", + slice: { + id: 1, + max: 2, + }, + }, + dest: { + index: "my-new-index-000001", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/e0b2f56c34e33ff52f8f9658be2f7ca1.asciidoc b/docs/doc_examples/e0b2f56c34e33ff52f8f9658be2f7ca1.asciidoc new file mode 100644 index 000000000..cc38f9896 --- /dev/null +++ b/docs/doc_examples/e0b2f56c34e33ff52f8f9658be2f7ca1.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.stats({ + index: "index1,index2", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e0bbfb368eae307e9508ab8d6e9cf23c.asciidoc b/docs/doc_examples/e0bbfb368eae307e9508ab8d6e9cf23c.asciidoc new file mode 100644 index 000000000..552adb14c --- /dev/null +++ b/docs/doc_examples/e0bbfb368eae307e9508ab8d6e9cf23c.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.fielddata({ + v: "true", + fields: "body", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e0d4a800de2d8f4062e69433586c38db.asciidoc b/docs/doc_examples/e0d4a800de2d8f4062e69433586c38db.asciidoc new file mode 100644 index 000000000..ea1945982 --- /dev/null +++ b/docs/doc_examples/e0d4a800de2d8f4062e69433586c38db.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.samlCompleteLogout({ + realm: "saml1", + ids: ["_1c368075e0b3..."], + query_string: + "SAMLResponse=fZHLasMwEEVbfb1bf...&SigAlg=http%3A%2F%2Fwww.w3.org%2F2000%2F09%2Fxmldsig%23rsa-sha1&Signature=CuCmFn%2BLqnaZGZJqK...", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e0d6e02b998bdea99c9c08dcc3630c5e.asciidoc b/docs/doc_examples/e0d6e02b998bdea99c9c08dcc3630c5e.asciidoc deleted file mode 100644 index f3f0bb5cc..000000000 --- a/docs/doc_examples/e0d6e02b998bdea99c9c08dcc3630c5e.asciidoc +++ /dev/null @@ -1,19 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - match: { - message: { - query: 'this is a test' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/e0fcef99656799de6b88117d56f131e2.asciidoc b/docs/doc_examples/e0fcef99656799de6b88117d56f131e2.asciidoc new file mode 100644 index 000000000..03c15eafc --- /dev/null +++ b/docs/doc_examples/e0fcef99656799de6b88117d56f131e2.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.explain({ + index: "my-index-000001", + id: 0, + query: { + match: { + message: "elasticsearch", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e1220f2c28db6ef0233e26e6bd3866fa.asciidoc b/docs/doc_examples/e1220f2c28db6ef0233e26e6bd3866fa.asciidoc new file mode 100644 index 000000000..6f338681d --- /dev/null +++ b/docs/doc_examples/e1220f2c28db6ef0233e26e6bd3866fa.asciidoc @@ -0,0 +1,44 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + top_tags: { + terms: { + field: "type", + size: 3, + }, + aggs: { + top_sales_hits: { + top_hits: { + sort: [ + { + date: { + order: "desc", + }, + }, + ], + _source: { + includes: ["date", "price"], + }, + size: 1, + }, + }, + "having.top_salary": { + bucket_selector: { + buckets_path: { + tp: "top_sales_hits[_source.price]", + }, + script: "params.tp < 180", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e12f2d2ddca387630e7855a6db952da2.asciidoc b/docs/doc_examples/e12f2d2ddca387630e7855a6db952da2.asciidoc new file mode 100644 index 000000000..5672d4c40 --- /dev/null +++ b/docs/doc_examples/e12f2d2ddca387630e7855a6db952da2.asciidoc @@ -0,0 +1,41 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + runtime_mappings: { + "price.euros": { + type: "double", + script: { + source: + "\n emit(doc['price'].value * params.conversion_rate)\n ", + params: { + conversion_rate: 0.835526591, + }, + }, + }, + }, + aggs: { + price_ranges: { + range: { + field: "price.euros", + ranges: [ + { + to: 100, + }, + { + from: 100, + to: 200, + }, + { + from: 200, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e1337c6b76defd5a46d05220f9d9c9fc.asciidoc b/docs/doc_examples/e1337c6b76defd5a46d05220f9d9c9fc.asciidoc new file mode 100644 index 000000000..44211f46b --- /dev/null +++ b/docs/doc_examples/e1337c6b76defd5a46d05220f9d9c9fc.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.getToken({ + grant_type: "password", + username: "test_admin", + password: "x-pack-test-password", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e14a5a5a1c880031486bfff43031fa3a.asciidoc b/docs/doc_examples/e14a5a5a1c880031486bfff43031fa3a.asciidoc new file mode 100644 index 000000000..627606897 --- /dev/null +++ b/docs/doc_examples/e14a5a5a1c880031486bfff43031fa3a.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.stats({ + metric: "breaker", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e16a353e619b935c5c70769b1b9fa100.asciidoc b/docs/doc_examples/e16a353e619b935c5c70769b1b9fa100.asciidoc new file mode 100644 index 000000000..2e4b4802e --- /dev/null +++ b/docs/doc_examples/e16a353e619b935c5c70769b1b9fa100.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + aggs: { + my_buckets: { + composite: { + sources: [ + { + tile: { + geotile_grid: { + field: "location", + precision: 8, + }, + }, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e17e8852ec3f31781e1364f4dffeb6d0.asciidoc b/docs/doc_examples/e17e8852ec3f31781e1364f4dffeb6d0.asciidoc deleted file mode 100644 index 6d97c3961..000000000 --- a/docs/doc_examples/e17e8852ec3f31781e1364f4dffeb6d0.asciidoc +++ /dev/null @@ -1,17 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - query_string: { - query: '(content:this OR name:this) AND (content:that OR name:that)' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/e1874cc7cd22b6860ca8b11bde3c70c1.asciidoc b/docs/doc_examples/e1874cc7cd22b6860ca8b11bde3c70c1.asciidoc new file mode 100644 index 000000000..ae53bc087 --- /dev/null +++ b/docs/doc_examples/e1874cc7cd22b6860ca8b11bde3c70c1.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "index2", + query: { + query_string: { + query: "running with scissors", + fields: ["comment", "comment.english"], + }, + }, + highlight: { + order: "score", + fields: { + comment: { + type: "fvh", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e194e9cbe3eb2305f4f7cdda0cf529bd.asciidoc b/docs/doc_examples/e194e9cbe3eb2305f4f7cdda0cf529bd.asciidoc new file mode 100644 index 000000000..75b53db1d --- /dev/null +++ b/docs/doc_examples/e194e9cbe3eb2305f4f7cdda0cf529bd.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + typed_keys: "true", + suggest: { + text: "some test mssage", + "my-first-suggester": { + term: { + field: "message", + }, + }, + "my-second-suggester": { + phrase: { + field: "message", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e19f5e3724d9f3f36a817b9a811ca42e.asciidoc b/docs/doc_examples/e19f5e3724d9f3f36a817b9a811ca42e.asciidoc new file mode 100644 index 000000000..26bc23dc8 --- /dev/null +++ b/docs/doc_examples/e19f5e3724d9f3f36a817b9a811ca42e.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + my_date_histo: { + date_histogram: { + field: "timestamp", + calendar_interval: "day", + }, + aggs: { + the_sum: { + sum: { + field: "lemmings", + }, + }, + the_deriv: { + derivative: { + buckets_path: "the_sum", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e1c08f5774e81da31cd75aa1bdc2c548.asciidoc b/docs/doc_examples/e1c08f5774e81da31cd75aa1bdc2c548.asciidoc new file mode 100644 index 000000000..75701ade6 --- /dev/null +++ b/docs/doc_examples/e1c08f5774e81da31cd75aa1bdc2c548.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + bool: { + should: [ + { + percolate: { + field: "query", + document: { + message: "bonsai tree", + }, + name: "query1", + }, + }, + { + percolate: { + field: "query", + document: { + message: "tulip flower", + }, + name: "query2", + }, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e1d6ecab4148b09f4c605474157e7dbd.asciidoc b/docs/doc_examples/e1d6ecab4148b09f4c605474157e7dbd.asciidoc new file mode 100644 index 000000000..2d21ab128 --- /dev/null +++ b/docs/doc_examples/e1d6ecab4148b09f4c605474157e7dbd.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getSettings({ + index: "my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e1f20ee96ce80edcc35b647cef731e15.asciidoc b/docs/doc_examples/e1f20ee96ce80edcc35b647cef731e15.asciidoc new file mode 100644 index 000000000..89f63d5b9 --- /dev/null +++ b/docs/doc_examples/e1f20ee96ce80edcc35b647cef731e15.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: "my_id", + pipeline: "user_lookup", + document: { + email: "mardy.brown@asciidocsmith.com", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e1f6ea7c0937cf7e6ea7e8209e52e8bb.asciidoc b/docs/doc_examples/e1f6ea7c0937cf7e6ea7e8209e52e8bb.asciidoc new file mode 100644 index 000000000..29722ee03 --- /dev/null +++ b/docs/doc_examples/e1f6ea7c0937cf7e6ea7e8209e52e8bb.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "events", + size: 10, + sort: [ + { + timestamp: "desc", + }, + ], + track_total_hits: false, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e21e1c26dc8687e7bf7bd2bf019a6698.asciidoc b/docs/doc_examples/e21e1c26dc8687e7bf7bd2bf019a6698.asciidoc deleted file mode 100644 index 8517fa362..000000000 --- a/docs/doc_examples/e21e1c26dc8687e7bf7bd2bf019a6698.asciidoc +++ /dev/null @@ -1,17 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.deleteByQuery({ - index: 'twitter', - conflicts: 'proceed', - body: { - query: { - match_all: {} - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/e22a1da3c622611be6855e534c0709ae.asciidoc b/docs/doc_examples/e22a1da3c622611be6855e534c0709ae.asciidoc new file mode 100644 index 000000000..26c4b9b0a --- /dev/null +++ b/docs/doc_examples/e22a1da3c622611be6855e534c0709ae.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "POST", + path: "/_query_rules/my-ruleset/_test", + body: { + match_criteria: { + query_string: "puggles", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e26c96978096ccc592849cca9db67ffc.asciidoc b/docs/doc_examples/e26c96978096ccc592849cca9db67ffc.asciidoc new file mode 100644 index 000000000..f01887bd3 --- /dev/null +++ b/docs/doc_examples/e26c96978096ccc592849cca9db67ffc.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "my-index-000001", + settings: { + "index.requests.cache.enable": true, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e26e8bfa68aa4ab265b22304c38c3aef.asciidoc b/docs/doc_examples/e26e8bfa68aa4ab265b22304c38c3aef.asciidoc new file mode 100644 index 000000000..ab6b687b0 --- /dev/null +++ b/docs/doc_examples/e26e8bfa68aa4ab265b22304c38c3aef.asciidoc @@ -0,0 +1,90 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "sample_data", + mappings: { + properties: { + client_ip: { + type: "ip", + }, + message: { + type: "keyword", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "sample_data", + operations: [ + { + index: {}, + }, + { + "@timestamp": "2023-10-23T12:15:03.360Z", + client_ip: "172.21.2.162", + message: "Connected to 10.1.0.3", + event_duration: 3450233, + }, + { + index: {}, + }, + { + "@timestamp": "2023-10-23T12:27:28.948Z", + client_ip: "172.21.2.113", + message: "Connected to 10.1.0.2", + event_duration: 2764889, + }, + { + index: {}, + }, + { + "@timestamp": "2023-10-23T13:33:34.937Z", + client_ip: "172.21.0.5", + message: "Disconnected", + event_duration: 1232382, + }, + { + index: {}, + }, + { + "@timestamp": "2023-10-23T13:51:54.732Z", + client_ip: "172.21.3.15", + message: "Connection error", + event_duration: 725448, + }, + { + index: {}, + }, + { + "@timestamp": "2023-10-23T13:52:55.015Z", + client_ip: "172.21.3.15", + message: "Connection error", + event_duration: 8268153, + }, + { + index: {}, + }, + { + "@timestamp": "2023-10-23T13:53:55.832Z", + client_ip: "172.21.3.15", + message: "Connection error", + event_duration: 5033755, + }, + { + index: {}, + }, + { + "@timestamp": "2023-10-23T13:55:01.543Z", + client_ip: "172.21.3.15", + message: "Connected to 10.1.0.1", + event_duration: 1756467, + }, + ], +}); +console.log(response1); +---- diff --git a/docs/doc_examples/e270f3f721a5712cd11a5ca03554f5b0.asciidoc b/docs/doc_examples/e270f3f721a5712cd11a5ca03554f5b0.asciidoc index 74cdf053b..c5ce59e19 100644 --- a/docs/doc_examples/e270f3f721a5712cd11a5ca03554f5b0.asciidoc +++ b/docs/doc_examples/e270f3f721a5712cd11a5ca03554f5b0.asciidoc @@ -4,20 +4,14 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - multi_match: { - query: 'Will Smith', - type: 'best_fields', - fields: [ - 'first_name', - 'last_name' - ], - operator: 'and' - } - } - } -}) -console.log(response) + query: { + multi_match: { + query: "Will Smith", + type: "best_fields", + fields: ["first_name", "last_name"], + operator: "and", + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/e273060a675c959fd5f3cde27c8aff07.asciidoc b/docs/doc_examples/e273060a675c959fd5f3cde27c8aff07.asciidoc new file mode 100644 index 000000000..fb7327c66 --- /dev/null +++ b/docs/doc_examples/e273060a675c959fd5f3cde27c8aff07.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "index", + mappings: { + properties: { + foo: { + type: "integer", + index: false, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e2750d69bcb6d4c7e16e704cd0fb3530.asciidoc b/docs/doc_examples/e2750d69bcb6d4c7e16e704cd0fb3530.asciidoc new file mode 100644 index 000000000..22ace2fd1 --- /dev/null +++ b/docs/doc_examples/e2750d69bcb6d4c7e16e704cd0fb3530.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "test", + mappings: { + properties: { + pagerank: { + type: "rank_feature", + }, + url_length: { + type: "rank_feature", + positive_score_impact: false, + }, + topics: { + type: "rank_features", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e2883c88b5ceca9fce1e70e716d80025.asciidoc b/docs/doc_examples/e2883c88b5ceca9fce1e70e716d80025.asciidoc new file mode 100644 index 000000000..d08785c3f --- /dev/null +++ b/docs/doc_examples/e2883c88b5ceca9fce1e70e716d80025.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + my_version: { + type: "version", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e2a042c629429855c3bcaefffb26b7fa.asciidoc b/docs/doc_examples/e2a042c629429855c3bcaefffb26b7fa.asciidoc deleted file mode 100644 index 9021864be..000000000 --- a/docs/doc_examples/e2a042c629429855c3bcaefffb26b7fa.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'my_index', - body: { - mappings: { - properties: { - date: { - type: 'date', - format: 'yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/e2a22c6fd58cc0becf4c383134a08f8b.asciidoc b/docs/doc_examples/e2a22c6fd58cc0becf4c383134a08f8b.asciidoc new file mode 100644 index 000000000..7ed084f76 --- /dev/null +++ b/docs/doc_examples/e2a22c6fd58cc0becf4c383134a08f8b.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + intervals: { + my_text: { + match: { + query: "salty", + filter: { + contained_by: { + match: { + query: "hot porridge", + }, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e2a753029b450942a3228e3003a55a7d.asciidoc b/docs/doc_examples/e2a753029b450942a3228e3003a55a7d.asciidoc new file mode 100644 index 000000000..e8feda761 --- /dev/null +++ b/docs/doc_examples/e2a753029b450942a3228e3003a55a7d.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putDataLifecycle({ + name: "my-weather-sensor-data-stream", + downsampling: [ + { + after: "1d", + fixed_interval: "10m", + }, + { + after: "7d", + fixed_interval: "1d", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/e2a7d127b82ddebb690a959dcd0cbc09.asciidoc b/docs/doc_examples/e2a7d127b82ddebb690a959dcd0cbc09.asciidoc new file mode 100644 index 000000000..498327da9 --- /dev/null +++ b/docs/doc_examples/e2a7d127b82ddebb690a959dcd0cbc09.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "elision_example", + settings: { + analysis: { + analyzer: { + whitespace_elision: { + tokenizer: "whitespace", + filter: ["elision"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e2b4867a9f72bda87ebaa3608d3fba4c.asciidoc b/docs/doc_examples/e2b4867a9f72bda87ebaa3608d3fba4c.asciidoc new file mode 100644 index 000000000..fb0c3c9ac --- /dev/null +++ b/docs/doc_examples/e2b4867a9f72bda87ebaa3608d3fba4c.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.renderSearchTemplate({ + source: { + query: { + range: { + "user.effective.date": { + gte: "{{date.min}}", + lte: "{{date.max}}", + format: + "{{#join delimiter='||'}}date.formats{{/join delimiter='||'}}", + }, + }, + }, + }, + params: { + date: { + min: "2098", + max: "06/05/2099", + formats: ["dd/MM/yyyy", "yyyy"], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e2bcc8f4ed2b4de82729e7a5a7c8f634.asciidoc b/docs/doc_examples/e2bcc8f4ed2b4de82729e7a5a7c8f634.asciidoc new file mode 100644 index 000000000..4f8760f0e --- /dev/null +++ b/docs/doc_examples/e2bcc8f4ed2b4de82729e7a5a7c8f634.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.synonyms.getSynonymsSets(); +console.log(response); +---- diff --git a/docs/doc_examples/e2d8cf24a12053eb09fec7087cdab43a.asciidoc b/docs/doc_examples/e2d8cf24a12053eb09fec7087cdab43a.asciidoc new file mode 100644 index 000000000..3da189a80 --- /dev/null +++ b/docs/doc_examples/e2d8cf24a12053eb09fec7087cdab43a.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sales_per_month: { + date_histogram: { + field: "date", + calendar_interval: "month", + }, + aggs: { + sales: { + sum: { + field: "price", + }, + }, + percent_of_total_sales: { + normalize: { + buckets_path: "sales", + method: "percent_of_sum", + format: "00.00%", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e2ec9e867f7141b304b53ebc59098f2a.asciidoc b/docs/doc_examples/e2ec9e867f7141b304b53ebc59098f2a.asciidoc new file mode 100644 index 000000000..12f8bfcc0 --- /dev/null +++ b/docs/doc_examples/e2ec9e867f7141b304b53ebc59098f2a.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.updateApiKey({ + id: "VuaCfGcBCdbkQm-e5aOx", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e3019fd5f23458ae49ad9854c97d321c.asciidoc b/docs/doc_examples/e3019fd5f23458ae49ad9854c97d321c.asciidoc new file mode 100644 index 000000000..327abd471 --- /dev/null +++ b/docs/doc_examples/e3019fd5f23458ae49ad9854c97d321c.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.oidcPrepareAuthentication({ + realm: "oidc1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e308899a306e61d1a590868308689955.asciidoc b/docs/doc_examples/e308899a306e61d1a590868308689955.asciidoc new file mode 100644 index 000000000..28a832c3a --- /dev/null +++ b/docs/doc_examples/e308899a306e61d1a590868308689955.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "ip_location", + description: "Add ip geolocation info", + processors: [ + { + ip_location: { + field: "ip", + target_field: "geo", + database_file: "GeoLite2-Country.mmdb", + }, + }, + ], +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: "my_id", + pipeline: "ip_location", + document: { + ip: "89.160.20.128", + }, +}); +console.log(response1); + +const response2 = await client.get({ + index: "my-index-000001", + id: "my_id", +}); +console.log(response2); +---- diff --git a/docs/doc_examples/e30ea6e3823a139d7693d8cce1920a06.asciidoc b/docs/doc_examples/e30ea6e3823a139d7693d8cce1920a06.asciidoc index 6317ce7bb..28c54a237 100644 --- a/docs/doc_examples/e30ea6e3823a139d7693d8cce1920a06.asciidoc +++ b/docs/doc_examples/e30ea6e3823a139d7693d8cce1920a06.asciidoc @@ -4,18 +4,12 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - multi_match: { - query: 'this is a test', - fields: [ - 'subject^3', - 'message' - ] - } - } - } -}) -console.log(response) + query: { + multi_match: { + query: "this is a test", + fields: ["subject^3", "message"], + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/e316271f668c9889bf548311fb421f1e.asciidoc b/docs/doc_examples/e316271f668c9889bf548311fb421f1e.asciidoc new file mode 100644 index 000000000..331097c1f --- /dev/null +++ b/docs/doc_examples/e316271f668c9889bf548311fb421f1e.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + ip_addresses: { + terms: { + field: "destination_ip", + missing: "0.0.0.0", + value_type: "ip", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e317a8380dfbc76c4e7f23d0997b3518.asciidoc b/docs/doc_examples/e317a8380dfbc76c4e7f23d0997b3518.asciidoc new file mode 100644 index 000000000..13b20ac31 --- /dev/null +++ b/docs/doc_examples/e317a8380dfbc76c4e7f23d0997b3518.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "action.destructive_requires_name": null, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e324ea1547635180c31c1adf77870ba2.asciidoc b/docs/doc_examples/e324ea1547635180c31c1adf77870ba2.asciidoc new file mode 100644 index 000000000..6db19aaad --- /dev/null +++ b/docs/doc_examples/e324ea1547635180c31c1adf77870ba2.asciidoc @@ -0,0 +1,41 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putComponentTemplate({ + name: "destination_template", + template: { + settings: { + index: { + number_of_replicas: 2, + number_of_shards: 2, + mode: "time_series", + routing_path: ["metricset"], + }, + }, + mappings: { + properties: { + "@timestamp": { + type: "date", + }, + metricset: { + type: "keyword", + time_series_dimension: true, + }, + k8s: { + properties: { + tx: { + type: "long", + }, + rx: { + type: "long", + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e35abc9403e4aef7d538ab29ccc363b3.asciidoc b/docs/doc_examples/e35abc9403e4aef7d538ab29ccc363b3.asciidoc new file mode 100644 index 000000000..812f46fca --- /dev/null +++ b/docs/doc_examples/e35abc9403e4aef7d538ab29ccc363b3.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "POST", + path: "/_internal/prevalidate_node_removal", + querystring: { + names: "node1,node2", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e3678142aec988e2ff0ae5d934dc39e9.asciidoc b/docs/doc_examples/e3678142aec988e2ff0ae5d934dc39e9.asciidoc new file mode 100644 index 000000000..ccdf39576 --- /dev/null +++ b/docs/doc_examples/e3678142aec988e2ff0ae5d934dc39e9.asciidoc @@ -0,0 +1,102 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + location: { + type: "geo_point", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + text: "Geopoint as an object using GeoJSON format", + location: { + type: "Point", + coordinates: [-71.34, 41.12], + }, + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "my-index-000001", + id: 2, + document: { + text: "Geopoint as a WKT POINT primitive", + location: "POINT (-71.34 41.12)", + }, +}); +console.log(response2); + +const response3 = await client.index({ + index: "my-index-000001", + id: 3, + document: { + text: "Geopoint as an object with 'lat' and 'lon' keys", + location: { + lat: 41.12, + lon: -71.34, + }, + }, +}); +console.log(response3); + +const response4 = await client.index({ + index: "my-index-000001", + id: 4, + document: { + text: "Geopoint as an array", + location: [-71.34, 41.12], + }, +}); +console.log(response4); + +const response5 = await client.index({ + index: "my-index-000001", + id: 5, + document: { + text: "Geopoint as a string", + location: "41.12,-71.34", + }, +}); +console.log(response5); + +const response6 = await client.index({ + index: "my-index-000001", + id: 6, + document: { + text: "Geopoint as a geohash", + location: "drm3btev3e86", + }, +}); +console.log(response6); + +const response7 = await client.search({ + index: "my-index-000001", + query: { + geo_bounding_box: { + location: { + top_left: { + lat: 42, + lon: -72, + }, + bottom_right: { + lat: 40, + lon: -74, + }, + }, + }, + }, +}); +console.log(response7); +---- diff --git a/docs/doc_examples/e375c7da666276c4df6664c6821cd5f4.asciidoc b/docs/doc_examples/e375c7da666276c4df6664c6821cd5f4.asciidoc new file mode 100644 index 000000000..da7018754 --- /dev/null +++ b/docs/doc_examples/e375c7da666276c4df6664c6821cd5f4.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-rank-vectors-float", + mappings: { + properties: { + my_vector: { + type: "rank_vectors", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-rank-vectors-float", + id: 1, + document: { + my_vector: [ + [0.5, 10, 6], + [-0.5, 10, 10], + ], + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/e3a6462ca79c101314da0680c97678cd.asciidoc b/docs/doc_examples/e3a6462ca79c101314da0680c97678cd.asciidoc new file mode 100644 index 000000000..5634f8005 --- /dev/null +++ b/docs/doc_examples/e3a6462ca79c101314da0680c97678cd.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match_all: {}, + }, + script_fields: { + test1: { + script: { + lang: "painless", + source: "doc['price'].value * 2", + }, + }, + test2: { + script: { + lang: "painless", + source: "doc['price'].value * params.factor", + params: { + factor: 2, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e3b3a8ae12ab947ad3ba96eb228402ca.asciidoc b/docs/doc_examples/e3b3a8ae12ab947ad3ba96eb228402ca.asciidoc new file mode 100644 index 000000000..4e8f1c68e --- /dev/null +++ b/docs/doc_examples/e3b3a8ae12ab947ad3ba96eb228402ca.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + "index.store.preload": ["nvd", "dvd"], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e3f2f6ee3e312b8a90634827ae954d70.asciidoc b/docs/doc_examples/e3f2f6ee3e312b8a90634827ae954d70.asciidoc new file mode 100644 index 000000000..955bb8caf --- /dev/null +++ b/docs/doc_examples/e3f2f6ee3e312b8a90634827ae954d70.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: { + type: "GeometryCollection", + geometries: [ + { + type: "Point", + coordinates: [100, 0], + }, + { + type: "LineString", + coordinates: [ + [101, 0], + [102, 1], + ], + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e3fe842951dc873d7d00c8f6a010c53f.asciidoc b/docs/doc_examples/e3fe842951dc873d7d00c8f6a010c53f.asciidoc new file mode 100644 index 000000000..33c2f68ce --- /dev/null +++ b/docs/doc_examples/e3fe842951dc873d7d00c8f6a010c53f.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.tasks.list({ + human: "true", + detailed: "true", + actions: "indices:data/write/search", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e4193867485595c9c92f909a052d2a90.asciidoc b/docs/doc_examples/e4193867485595c9c92f909a052d2a90.asciidoc new file mode 100644 index 000000000..ab10339a7 --- /dev/null +++ b/docs/doc_examples/e4193867485595c9c92f909a052d2a90.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + "my-join-field": { + type: "join", + relations: { + parent: "child", + }, + }, + tag: { + type: "keyword", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e41a9bac42d0c1cb103674ae9039b7af.asciidoc b/docs/doc_examples/e41a9bac42d0c1cb103674ae9039b7af.asciidoc new file mode 100644 index 000000000..533bee458 --- /dev/null +++ b/docs/doc_examples/e41a9bac42d0c1cb103674ae9039b7af.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + numeric_detection: true, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + my_float: "1.0", + my_integer: "1", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/e441cb3be3c2f007621ee1f8c9a2e0ef.asciidoc b/docs/doc_examples/e441cb3be3c2f007621ee1f8c9a2e0ef.asciidoc new file mode 100644 index 000000000..d8168a881 --- /dev/null +++ b/docs/doc_examples/e441cb3be3c2f007621ee1f8c9a2e0ef.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + statistics: { + matrix_stats: { + fields: ["poverty", "income"], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e451900efbd8be50c2b8347a83816aa6.asciidoc b/docs/doc_examples/e451900efbd8be50c2b8347a83816aa6.asciidoc new file mode 100644 index 000000000..d4aa9c554 --- /dev/null +++ b/docs/doc_examples/e451900efbd8be50c2b8347a83816aa6.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sales_per_month: { + date_histogram: { + field: "date", + calendar_interval: "month", + }, + aggs: { + sales: { + sum: { + field: "price", + }, + }, + }, + }, + stats_monthly_sales: { + extended_stats_bucket: { + buckets_path: "sales_per_month>sales", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e46c83db1580e14be844079cd008f518.asciidoc b/docs/doc_examples/e46c83db1580e14be844079cd008f518.asciidoc new file mode 100644 index 000000000..9e529c789 --- /dev/null +++ b/docs/doc_examples/e46c83db1580e14be844079cd008f518.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "my-index-000001", + settings: { + index: { + "routing.allocation.enable": "all", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e47a71a2e314dbbee5db8142a23957ce.asciidoc b/docs/doc_examples/e47a71a2e314dbbee5db8142a23957ce.asciidoc new file mode 100644 index 000000000..a75f8d567 --- /dev/null +++ b/docs/doc_examples/e47a71a2e314dbbee5db8142a23957ce.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "my-pipeline", + processors: [ + { + set: { + description: "Index the ingest timestamp as 'event.ingested'", + field: "event.ingested", + value: "{{{_ingest.timestamp}}}", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/e48e7da65c2b32d724fd7e3bfa175c6f.asciidoc b/docs/doc_examples/e48e7da65c2b32d724fd7e3bfa175c6f.asciidoc new file mode 100644 index 000000000..41a817618 --- /dev/null +++ b/docs/doc_examples/e48e7da65c2b32d724fd7e3bfa175c6f.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.getOverallBuckets({ + job_id: "job-*", + overall_score: 80, + start: 1403532000000, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e494162e83ce041c56b2e2bc29d33474.asciidoc b/docs/doc_examples/e494162e83ce041c56b2e2bc29d33474.asciidoc new file mode 100644 index 000000000..c6ab0c547 --- /dev/null +++ b/docs/doc_examples/e494162e83ce041c56b2e2bc29d33474.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-data-stream", + query: + '\n sequence by process.pid with maxspan=1h\n [ process where process.name == "regsvr32.exe" ]\n [ file where stringContains(file.name, "scrobj.dll") ]\n until [ process where event.type == "termination" ]\n ', +}); +console.log(response); +---- diff --git a/docs/doc_examples/e4b2b5e0aaedf3cbbcde3d61eb1f13fc.asciidoc b/docs/doc_examples/e4b2b5e0aaedf3cbbcde3d61eb1f13fc.asciidoc new file mode 100644 index 000000000..7b6bdf9f7 --- /dev/null +++ b/docs/doc_examples/e4b2b5e0aaedf3cbbcde3d61eb1f13fc.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "test", + id: 4, + refresh: "wait_for", + document: { + test: "test", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e4b38973c74037335378d8480f1ce894.asciidoc b/docs/doc_examples/e4b38973c74037335378d8480f1ce894.asciidoc new file mode 100644 index 000000000..42726dbd2 --- /dev/null +++ b/docs/doc_examples/e4b38973c74037335378d8480f1ce894.asciidoc @@ -0,0 +1,42 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.simulate.ingest({ + docs: [ + { + _index: "my-index", + _id: "123", + _source: { + foo: "foo", + }, + }, + { + _index: "my-index", + _id: "456", + _source: { + bar: "rab", + }, + }, + ], + component_template_substitutions: { + "my-mappings_template": { + template: { + mappings: { + dynamic: "strict", + properties: { + foo: { + type: "keyword", + }, + bar: { + type: "keyword", + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e4b64b8277af259a52c8d3940157b5fa.asciidoc b/docs/doc_examples/e4b64b8277af259a52c8d3940157b5fa.asciidoc new file mode 100644 index 000000000..75685cab4 --- /dev/null +++ b/docs/doc_examples/e4b64b8277af259a52c8d3940157b5fa.asciidoc @@ -0,0 +1,51 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.putTransform({ + transform_id: "data_log", + source: { + index: "kibana_sample_data_logs", + }, + dest: { + index: "data-logs-by-client", + }, + pivot: { + group_by: { + "machine.os": { + terms: { + field: "machine.os.keyword", + }, + }, + "machine.ip": { + terms: { + field: "clientip", + }, + }, + }, + aggregations: { + "time_frame.lte": { + max: { + field: "timestamp", + }, + }, + "time_frame.gte": { + min: { + field: "timestamp", + }, + }, + time_length: { + bucket_script: { + buckets_path: { + min: "time_frame.gte.value", + max: "time_frame.lte.value", + }, + script: "params.max - params.min", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e4b6a6a921c97b4c0bbe97bd89f4cf33.asciidoc b/docs/doc_examples/e4b6a6a921c97b4c0bbe97bd89f4cf33.asciidoc new file mode 100644 index 000000000..bdbb31513 --- /dev/null +++ b/docs/doc_examples/e4b6a6a921c97b4c0bbe97bd89f4cf33.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.promoteDataStream({ + name: "my-data-stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e4be53736bcc02b03068fd72fdbfe271.asciidoc b/docs/doc_examples/e4be53736bcc02b03068fd72fdbfe271.asciidoc index bb1a7d6bb..3fdbb2aa8 100644 --- a/docs/doc_examples/e4be53736bcc02b03068fd72fdbfe271.asciidoc +++ b/docs/doc_examples/e4be53736bcc02b03068fd72fdbfe271.asciidoc @@ -4,15 +4,12 @@ [source, js] ---- const response = await client.indices.putMapping({ - index: 'publications', - body: { - properties: { - title: { - type: 'text' - } - } - } -}) -console.log(response) + index: "publications", + properties: { + title: { + type: "text", + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/e4d1f01c025fb797a1d87f372760eabf.asciidoc b/docs/doc_examples/e4d1f01c025fb797a1d87f372760eabf.asciidoc new file mode 100644 index 000000000..246793183 --- /dev/null +++ b/docs/doc_examples/e4d1f01c025fb797a1d87f372760eabf.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.tasks.list({ + human: "true", + detailed: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e4de6035653e8202c43631f02d244661.asciidoc b/docs/doc_examples/e4de6035653e8202c43631f02d244661.asciidoc new file mode 100644 index 000000000..23f0dd981 --- /dev/null +++ b/docs/doc_examples/e4de6035653e8202c43631f02d244661.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "cluster_one:my-index-000001", + size: 1, + query: { + match: { + "user.id": "kimchy", + }, + }, + _source: ["user.id", "message", "http.response.status_code"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/e4ea514eb9a01716d9bbc5aa04ee0252.asciidoc b/docs/doc_examples/e4ea514eb9a01716d9bbc5aa04ee0252.asciidoc new file mode 100644 index 000000000..7207d8204 --- /dev/null +++ b/docs/doc_examples/e4ea514eb9a01716d9bbc5aa04ee0252.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.queryUser({ + query: { + prefix: { + roles: "other", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e51a86b666f447cda5f634547a8e1a4a.asciidoc b/docs/doc_examples/e51a86b666f447cda5f634547a8e1a4a.asciidoc new file mode 100644 index 000000000..d09d2747f --- /dev/null +++ b/docs/doc_examples/e51a86b666f447cda5f634547a8e1a4a.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.createDataStream({ + name: "my-data-stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e551ea38a2d8f8deac110b33304200cc.asciidoc b/docs/doc_examples/e551ea38a2d8f8deac110b33304200cc.asciidoc new file mode 100644 index 000000000..362c609dc --- /dev/null +++ b/docs/doc_examples/e551ea38a2d8f8deac110b33304200cc.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + query: { + multi_match: { + fields: ["title", "content"], + query: "the quick brown fox", + }, + }, + rescore: { + learning_to_rank: { + model_id: "ltr-model", + params: { + query_text: "the quick brown fox", + }, + }, + window_size: 100, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e567e6dbf86300142573c73789c8fce4.asciidoc b/docs/doc_examples/e567e6dbf86300142573c73789c8fce4.asciidoc deleted file mode 100644 index a59112030..000000000 --- a/docs/doc_examples/e567e6dbf86300142573c73789c8fce4.asciidoc +++ /dev/null @@ -1,13 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'new_twitter', - size: '0', - filter_path: 'hits.total' -}) -console.log(response) ----- - diff --git a/docs/doc_examples/e586d1d2a997133e039fd352a42a72b3.asciidoc b/docs/doc_examples/e586d1d2a997133e039fd352a42a72b3.asciidoc new file mode 100644 index 000000000..9f0f29ea4 --- /dev/null +++ b/docs/doc_examples/e586d1d2a997133e039fd352a42a72b3.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "job-candidates", + query: { + terms_set: { + programming_languages: { + terms: ["c++", "java", "php"], + minimum_should_match_field: "required_matches", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e58833449d01379df20ad06dc28144d8.asciidoc b/docs/doc_examples/e58833449d01379df20ad06dc28144d8.asciidoc new file mode 100644 index 000000000..93af1e480 --- /dev/null +++ b/docs/doc_examples/e58833449d01379df20ad06dc28144d8.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.updateByQuery({ + index: "my-index-000001", + conflicts: "proceed", + query: { + term: { + "user.id": "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e58b7965c3a314c34bc444c6db3b1b79.asciidoc b/docs/doc_examples/e58b7965c3a314c34bc444c6db3b1b79.asciidoc new file mode 100644 index 000000000..ded25932b --- /dev/null +++ b/docs/doc_examples/e58b7965c3a314c34bc444c6db3b1b79.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getSettings({ + index: "my-index-000001", + name: "index.routing.allocation.enable", + flat_settings: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e5901f48eb8a419b878fc2cb815d8691.asciidoc b/docs/doc_examples/e5901f48eb8a419b878fc2cb815d8691.asciidoc new file mode 100644 index 000000000..15f694447 --- /dev/null +++ b/docs/doc_examples/e5901f48eb8a419b878fc2cb815d8691.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "indices.recovery.max_bytes_per_sec": "50mb", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e5c710b08a545522d50b4ce35503bc46.asciidoc b/docs/doc_examples/e5c710b08a545522d50b4ce35503bc46.asciidoc new file mode 100644 index 000000000..fa2981ef5 --- /dev/null +++ b/docs/doc_examples/e5c710b08a545522d50b4ce35503bc46.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-data-stream", + pipeline: "my-pipeline", + document: { + "@timestamp": "2099-03-07T11:04:05.000Z", + "my-keyword-field": "foo", + }, +}); +console.log(response); + +const response1 = await client.bulk({ + index: "my-data-stream", + pipeline: "my-pipeline", + operations: [ + { + create: {}, + }, + { + "@timestamp": "2099-03-07T11:04:06.000Z", + "my-keyword-field": "foo", + }, + { + create: {}, + }, + { + "@timestamp": "2099-03-07T11:04:07.000Z", + "my-keyword-field": "bar", + }, + ], +}); +console.log(response1); +---- diff --git a/docs/doc_examples/e5d2172b524332196cac0f031c043659.asciidoc b/docs/doc_examples/e5d2172b524332196cac0f031c043659.asciidoc deleted file mode 100644 index eae6cccc9..000000000 --- a/docs/doc_examples/e5d2172b524332196cac0f031c043659.asciidoc +++ /dev/null @@ -1,19 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'twitter', - body: { - settings: { - index: { - number_of_shards: 3, - number_of_replicas: 2 - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/e5f50b31f165462d883ecbff45f74985.asciidoc b/docs/doc_examples/e5f50b31f165462d883ecbff45f74985.asciidoc index 1fa83ccb6..cf38e87ae 100644 --- a/docs/doc_examples/e5f50b31f165462d883ecbff45f74985.asciidoc +++ b/docs/doc_examples/e5f50b31f165462d883ecbff45f74985.asciidoc @@ -4,31 +4,25 @@ [source, js] ---- const response = await client.indices.putTemplate({ - name: 'template_1', - body: { - index_patterns: [ - 'te*', - 'bar*' - ], - settings: { - number_of_shards: 1 + name: "template_1", + index_patterns: ["te*", "bar*"], + settings: { + number_of_shards: 1, + }, + mappings: { + _source: { + enabled: false, }, - mappings: { - _source: { - enabled: false + properties: { + host_name: { + type: "keyword", }, - properties: { - host_name: { - type: 'keyword' - }, - created_at: { - type: 'date', - format: 'EEE MMM dd HH:mm:ss Z yyyy' - } - } - } - } -}) -console.log(response) + created_at: { + type: "date", + format: "EEE MMM dd HH:mm:ss Z yyyy", + }, + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/e5f89a04f50df707a0a53ec0f2eecbbd.asciidoc b/docs/doc_examples/e5f89a04f50df707a0a53ec0f2eecbbd.asciidoc new file mode 100644 index 000000000..74dcb596e --- /dev/null +++ b/docs/doc_examples/e5f89a04f50df707a0a53ec0f2eecbbd.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.get({ + index: "my-index-000001", + id: 0, + _source_includes: "*.id", + _source_excludes: "entities", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e5f8f83df37ab2296dc4bfed95d7aba7.asciidoc b/docs/doc_examples/e5f8f83df37ab2296dc4bfed95d7aba7.asciidoc new file mode 100644 index 000000000..48076f9c2 --- /dev/null +++ b/docs/doc_examples/e5f8f83df37ab2296dc4bfed95d7aba7.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "cluster.routing.allocation.enable": "all", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e608cd0c034f6c245ea87f425e09ce2f.asciidoc b/docs/doc_examples/e608cd0c034f6c245ea87f425e09ce2f.asciidoc new file mode 100644 index 000000000..08f05042e --- /dev/null +++ b/docs/doc_examples/e608cd0c034f6c245ea87f425e09ce2f.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + span_term: { + "user.id": "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e60b7f75ca806f2c74927c3d9409a986.asciidoc b/docs/doc_examples/e60b7f75ca806f2c74927c3d9409a986.asciidoc new file mode 100644 index 000000000..e6c658b67 --- /dev/null +++ b/docs/doc_examples/e60b7f75ca806f2c74927c3d9409a986.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "mapping3", + roles: ["ldap-user"], + enabled: true, + rules: { + field: { + "realm.name": "ldap1", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e60c2bf89fdf38187709d04dd1c55330.asciidoc b/docs/doc_examples/e60c2bf89fdf38187709d04dd1c55330.asciidoc new file mode 100644 index 000000000..2ff61b02f --- /dev/null +++ b/docs/doc_examples/e60c2bf89fdf38187709d04dd1c55330.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + more_like_this: { + fields: ["title", "description"], + like: "Once upon a time", + min_term_freq: 1, + max_query_terms: 12, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e60ded7becfd5b2ccaef5bad2aaa93f5.asciidoc b/docs/doc_examples/e60ded7becfd5b2ccaef5bad2aaa93f5.asciidoc new file mode 100644 index 000000000..eb8c44fbb --- /dev/null +++ b/docs/doc_examples/e60ded7becfd5b2ccaef5bad2aaa93f5.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + products: { + terms: { + field: "product", + size: 5, + show_term_doc_count_error: true, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e61b5abe85000cc954a42e2cd74f3a26.asciidoc b/docs/doc_examples/e61b5abe85000cc954a42e2cd74f3a26.asciidoc new file mode 100644 index 000000000..272176613 --- /dev/null +++ b/docs/doc_examples/e61b5abe85000cc954a42e2cd74f3a26.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.putCalendar({ + calendar_id: "planned-outages", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e6369e7cef82d881af593d5526bf79bd.asciidoc b/docs/doc_examples/e6369e7cef82d881af593d5526bf79bd.asciidoc new file mode 100644 index 000000000..38d057377 --- /dev/null +++ b/docs/doc_examples/e6369e7cef82d881af593d5526bf79bd.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + span_term: { + "user.id": { + value: "kimchy", + boost: 2, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e63775a2ff22b945ab9d5f630b80c506.asciidoc b/docs/doc_examples/e63775a2ff22b945ab9d5f630b80c506.asciidoc new file mode 100644 index 000000000..27156eddb --- /dev/null +++ b/docs/doc_examples/e63775a2ff22b945ab9d5f630b80c506.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.health({ + index: "my-index-000001", + level: "shards", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e63cf08350e9381f519c2835843be7cd.asciidoc b/docs/doc_examples/e63cf08350e9381f519c2835843be7cd.asciidoc new file mode 100644 index 000000000..492ee50b1 --- /dev/null +++ b/docs/doc_examples/e63cf08350e9381f519c2835843be7cd.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + dynamic_date_formats: ["yyyy/MM||MM/dd/yyyy"], + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + create_date: "09/25/2015", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/e642be44a62a89cf4afb2db28220c9a9.asciidoc b/docs/doc_examples/e642be44a62a89cf4afb2db28220c9a9.asciidoc new file mode 100644 index 000000000..3bdd9984b --- /dev/null +++ b/docs/doc_examples/e642be44a62a89cf4afb2db28220c9a9.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "ingest.geoip.downloader.enabled": true, + "indices.lifecycle.history_index_enabled": true, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e650d73c57ab313e686fec01e3b0c90f.asciidoc b/docs/doc_examples/e650d73c57ab313e686fec01e3b0c90f.asciidoc new file mode 100644 index 000000000..33c7ef397 --- /dev/null +++ b/docs/doc_examples/e650d73c57ab313e686fec01e3b0c90f.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + source: { + index: "my-index-000001", + }, + dest: { + index: "my-new-index-000001", + version_type: "external", + }, + script: { + source: + "if (ctx._source.foo == 'bar') {ctx._version++; ctx._source.remove('foo')}", + lang: "painless", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e697ef947f3fb7835f7fadb9125b1043.asciidoc b/docs/doc_examples/e697ef947f3fb7835f7fadb9125b1043.asciidoc new file mode 100644 index 000000000..aee888618 --- /dev/null +++ b/docs/doc_examples/e697ef947f3fb7835f7fadb9125b1043.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.query({ + format: "txt", + query: "SELECT * FROM library ORDER BY page_count DESC", + filter: { + range: { + page_count: { + gte: 100, + lte: 200, + }, + }, + }, + fetch_size: 5, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e6b972611c0ec8ab4c240f33f323d85b.asciidoc b/docs/doc_examples/e6b972611c0ec8ab4c240f33f323d85b.asciidoc new file mode 100644 index 000000000..0e09b23e2 --- /dev/null +++ b/docs/doc_examples/e6b972611c0ec8ab4c240f33f323d85b.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + size: 0, + aggs: { + by_day: { + date_histogram: { + field: "date", + calendar_interval: "day", + time_zone: "-01:00", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e6ccd979c34ba03007e625c6ec3e71a9.asciidoc b/docs/doc_examples/e6ccd979c34ba03007e625c6ec3e71a9.asciidoc new file mode 100644 index 000000000..0a8f5ebdc --- /dev/null +++ b/docs/doc_examples/e6ccd979c34ba03007e625c6ec3e71a9.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getAlias(); +console.log(response); +---- diff --git a/docs/doc_examples/e6dcc2911d2416a65eaec9846b956e15.asciidoc b/docs/doc_examples/e6dcc2911d2416a65eaec9846b956e15.asciidoc new file mode 100644 index 000000000..673ac434a --- /dev/null +++ b/docs/doc_examples/e6dcc2911d2416a65eaec9846b956e15.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.refresh({ + index: "my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e6e47da87079a8b67f767a2a01878cf2.asciidoc b/docs/doc_examples/e6e47da87079a8b67f767a2a01878cf2.asciidoc new file mode 100644 index 000000000..4868fe215 --- /dev/null +++ b/docs/doc_examples/e6e47da87079a8b67f767a2a01878cf2.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "my-pipeline", + processors: [ + { + set: { + description: "Use geo_point dynamic template for address field", + field: "_dynamic_templates", + value: { + address: "geo_point", + }, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/e6f6d3aeea7ecea47cfd5c3d727f7004.asciidoc b/docs/doc_examples/e6f6d3aeea7ecea47cfd5c3d727f7004.asciidoc new file mode 100644 index 000000000..a0bffb528 --- /dev/null +++ b/docs/doc_examples/e6f6d3aeea7ecea47cfd5c3d727f7004.asciidoc @@ -0,0 +1,44 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "retrievers_example", + retriever: { + rrf: { + retrievers: [ + { + standard: { + query: { + query_string: { + query: "(information retrieval) OR (artificial intelligence)", + default_field: "text", + }, + }, + }, + }, + { + knn: { + field: "vector", + query_vector: [0.23, 0.67, 0.89], + k: 3, + num_candidates: 5, + }, + }, + ], + rank_window_size: 10, + rank_constant: 1, + }, + }, + collapse: { + field: "year", + inner_hits: { + name: "topic related documents", + _source: ["year"], + }, + }, + _source: false, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e6faae2e272ee57727f38e55a3de5bb2.asciidoc b/docs/doc_examples/e6faae2e272ee57727f38e55a3de5bb2.asciidoc new file mode 100644 index 000000000..5099b0b3e --- /dev/null +++ b/docs/doc_examples/e6faae2e272ee57727f38e55a3de5bb2.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + highlight: { + fields: [ + { + title: {}, + }, + { + text: {}, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e715fb8c792bf09ac98f0ceca99beb84.asciidoc b/docs/doc_examples/e715fb8c792bf09ac98f0ceca99beb84.asciidoc new file mode 100644 index 000000000..1b37d265b --- /dev/null +++ b/docs/doc_examples/e715fb8c792bf09ac98f0ceca99beb84.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.migration.deprecations({ + index: ".ml-anomalies-*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e71d300cd87f09a9527cf45395dd7eb1.asciidoc b/docs/doc_examples/e71d300cd87f09a9527cf45395dd7eb1.asciidoc new file mode 100644 index 000000000..3dd4b3ef8 --- /dev/null +++ b/docs/doc_examples/e71d300cd87f09a9527cf45395dd7eb1.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.slm.executeRetention(); +console.log(response); +---- diff --git a/docs/doc_examples/e77c2f41a7eca765b0c5f734a66d919f.asciidoc b/docs/doc_examples/e77c2f41a7eca765b0c5f734a66d919f.asciidoc new file mode 100644 index 000000000..76351698d --- /dev/null +++ b/docs/doc_examples/e77c2f41a7eca765b0c5f734a66d919f.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "attachment", + description: "Extract attachment information", + processors: [ + { + attachment: { + field: "data", + properties: ["content", "title"], + remove_binary: true, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/e784fc00894635470adfd78a0c46b427.asciidoc b/docs/doc_examples/e784fc00894635470adfd78a0c46b427.asciidoc new file mode 100644 index 000000000..6572b4a1f --- /dev/null +++ b/docs/doc_examples/e784fc00894635470adfd78a0c46b427.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putComponentTemplate({ + name: "template_1", + template: { + settings: { + number_of_shards: 1, + }, + mappings: { + _source: { + enabled: false, + }, + properties: { + host_name: { + type: "keyword", + }, + created_at: { + type: "date", + format: "EEE MMM dd HH:mm:ss Z yyyy", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e7cfe670b4177d1011076f845ec2916c.asciidoc b/docs/doc_examples/e7cfe670b4177d1011076f845ec2916c.asciidoc new file mode 100644 index 000000000..760160b38 --- /dev/null +++ b/docs/doc_examples/e7cfe670b4177d1011076f845ec2916c.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "data_streams.lifecycle.retention.default": "7d", + "data_streams.lifecycle.retention.max": "90d", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e7d819634d765cde269e2669e2dc677f.asciidoc b/docs/doc_examples/e7d819634d765cde269e2669e2dc677f.asciidoc new file mode 100644 index 000000000..8dbb9e88a --- /dev/null +++ b/docs/doc_examples/e7d819634d765cde269e2669e2dc677f.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.invalidateApiKey({ + username: "myuser", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e7e95022867c72a6563137f066dd2973.asciidoc b/docs/doc_examples/e7e95022867c72a6563137f066dd2973.asciidoc new file mode 100644 index 000000000..3ff6b6cac --- /dev/null +++ b/docs/doc_examples/e7e95022867c72a6563137f066dd2973.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + hotspots: { + geohash_grid: { + field: "location", + precision: 5, + }, + aggs: { + significant_crime_types: { + significant_terms: { + field: "crime_type", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e7eca57a5bf5a53cbbe2463bce11495b.asciidoc b/docs/doc_examples/e7eca57a5bf5a53cbbe2463bce11495b.asciidoc new file mode 100644 index 000000000..9ce8d6c4b --- /dev/null +++ b/docs/doc_examples/e7eca57a5bf5a53cbbe2463bce11495b.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + types_count: { + value_count: { + field: "type", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e8211247c280a3fbbbdd32850b743b7b.asciidoc b/docs/doc_examples/e8211247c280a3fbbbdd32850b743b7b.asciidoc new file mode 100644 index 000000000..eb43ead35 --- /dev/null +++ b/docs/doc_examples/e8211247c280a3fbbbdd32850b743b7b.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.putDataFrameAnalytics({ + id: "house_price_regression_analysis", + source: { + index: "houses_sold_last_10_yrs", + }, + dest: { + index: "house_price_predictions", + }, + analysis: { + regression: { + dependent_variable: "price", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e821d27a8b810821707ba860e31f8b78.asciidoc b/docs/doc_examples/e821d27a8b810821707ba860e31f8b78.asciidoc new file mode 100644 index 000000000..941d4af41 --- /dev/null +++ b/docs/doc_examples/e821d27a8b810821707ba860e31f8b78.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-index-000001", + properties: { + city: { + type: "text", + fields: { + raw: { + type: "keyword", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e827a9040e137410d62d10bb3b3cbb71.asciidoc b/docs/doc_examples/e827a9040e137410d62d10bb3b3cbb71.asciidoc new file mode 100644 index 000000000..b4e52ceb7 --- /dev/null +++ b/docs/doc_examples/e827a9040e137410d62d10bb3b3cbb71.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.getWatch({ + id: "my_watch", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e82c33def91faddcfeed7b02cd258605.asciidoc b/docs/doc_examples/e82c33def91faddcfeed7b02cd258605.asciidoc new file mode 100644 index 000000000..610783b30 --- /dev/null +++ b/docs/doc_examples/e82c33def91faddcfeed7b02cd258605.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "products", + aggs: { + genres_and_products: { + multi_terms: { + terms: [ + { + field: "genre", + }, + { + field: "product", + missing: "Product Z", + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e84e23232c7ecc8d6377ec2c16a60269.asciidoc b/docs/doc_examples/e84e23232c7ecc8d6377ec2c16a60269.asciidoc new file mode 100644 index 000000000..219d478d6 --- /dev/null +++ b/docs/doc_examples/e84e23232c7ecc8d6377ec2c16a60269.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "test", + aliases: { + alias_1: {}, + alias_2: { + filter: { + term: { + "user.id": "kimchy", + }, + }, + routing: "shard-1", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e88a057a13e191e4d5faa22edf2ae8ed.asciidoc b/docs/doc_examples/e88a057a13e191e4d5faa22edf2ae8ed.asciidoc new file mode 100644 index 000000000..b5fa22aa5 --- /dev/null +++ b/docs/doc_examples/e88a057a13e191e4d5faa22edf2ae8ed.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.getSettings({ + filter_path: "**.xpack.profiling.templates.enabled", + include_defaults: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e891e1d4805172da45a81f62b6b44aca.asciidoc b/docs/doc_examples/e891e1d4805172da45a81f62b6b44aca.asciidoc new file mode 100644 index 000000000..d35dec2fa --- /dev/null +++ b/docs/doc_examples/e891e1d4805172da45a81f62b6b44aca.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + size: 0, + runtime_mappings: { + normalized_genre: { + type: "keyword", + script: + "\n String genre = doc['genre'].value;\n if (doc['product'].value.startsWith('Anthology')) {\n emit(genre + ' anthology');\n } else {\n emit(genre);\n }\n ", + }, + }, + aggs: { + genres: { + terms: { + field: "normalized_genre", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e89bf0d893b7bf43c2d9b44db6cfe21b.asciidoc b/docs/doc_examples/e89bf0d893b7bf43c2d9b44db6cfe21b.asciidoc new file mode 100644 index 000000000..fac95eaa4 --- /dev/null +++ b/docs/doc_examples/e89bf0d893b7bf43c2d9b44db6cfe21b.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "test", + query: { + rank_feature: { + field: "pagerank", + log: { + scaling_factor: 4, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e8a2726eea5545355d1d0835d4599f55.asciidoc b/docs/doc_examples/e8a2726eea5545355d1d0835d4599f55.asciidoc new file mode 100644 index 000000000..19dd77247 --- /dev/null +++ b/docs/doc_examples/e8a2726eea5545355d1d0835d4599f55.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + term: { + ip_addr: "2001:db8::/48", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e8bb5c57bdeff22be8e5f39a99dfe70e.asciidoc b/docs/doc_examples/e8bb5c57bdeff22be8e5f39a99dfe70e.asciidoc new file mode 100644 index 000000000..0cebca89a --- /dev/null +++ b/docs/doc_examples/e8bb5c57bdeff22be8e5f39a99dfe70e.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "stackoverflow", + size: 0, + query: { + query_string: { + query: "tags:kibana OR tags:javascript", + }, + }, + aggs: { + sample: { + sampler: { + shard_size: 200, + }, + aggs: { + keywords: { + significant_terms: { + field: "tags", + exclude: ["kibana", "javascript"], + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e8c348cabe15dfe58ab4c3cc13a963fe.asciidoc b/docs/doc_examples/e8c348cabe15dfe58ab4c3cc13a963fe.asciidoc new file mode 100644 index 000000000..50aa4b1e8 --- /dev/null +++ b/docs/doc_examples/e8c348cabe15dfe58ab4c3cc13a963fe.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchShards({ + index: "my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e8cbe2269f3dff6b231e73119e81511d.asciidoc b/docs/doc_examples/e8cbe2269f3dff6b231e73119e81511d.asciidoc new file mode 100644 index 000000000..9bb62d070 --- /dev/null +++ b/docs/doc_examples/e8cbe2269f3dff6b231e73119e81511d.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + exists: { + field: "user", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e8e451bc8c45bcf16df43804c4fc8329.asciidoc b/docs/doc_examples/e8e451bc8c45bcf16df43804c4fc8329.asciidoc deleted file mode 100644 index 84fe964c2..000000000 --- a/docs/doc_examples/e8e451bc8c45bcf16df43804c4fc8329.asciidoc +++ /dev/null @@ -1,31 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - track_scores: true, - sort: [ - { - post_date: { - order: 'desc' - } - }, - { - name: 'desc' - }, - { - age: 'desc' - } - ], - query: { - term: { - user: 'kimchy' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/e8ea65153d7775f25b08dfdfe6954498.asciidoc b/docs/doc_examples/e8ea65153d7775f25b08dfdfe6954498.asciidoc new file mode 100644 index 000000000..271527227 --- /dev/null +++ b/docs/doc_examples/e8ea65153d7775f25b08dfdfe6954498.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + simple_query_string: { + query: "Will Smith", + fields: ["title", "*_name"], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e8f1c9ee003d115ec8f55e57990df6e4.asciidoc b/docs/doc_examples/e8f1c9ee003d115ec8f55e57990df6e4.asciidoc new file mode 100644 index 000000000..0ec3637d4 --- /dev/null +++ b/docs/doc_examples/e8f1c9ee003d115ec8f55e57990df6e4.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.getCategories({ + job_id: "esxi_log", + page: { + size: 1, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e905543b281e9c41395304da76ed2ea3.asciidoc b/docs/doc_examples/e905543b281e9c41395304da76ed2ea3.asciidoc new file mode 100644 index 000000000..188e1d435 --- /dev/null +++ b/docs/doc_examples/e905543b281e9c41395304da76ed2ea3.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.delete({ + index: ".watches", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e930a572e8ddfdecc13498c04007b9e3.asciidoc b/docs/doc_examples/e930a572e8ddfdecc13498c04007b9e3.asciidoc new file mode 100644 index 000000000..6b81643ec --- /dev/null +++ b/docs/doc_examples/e930a572e8ddfdecc13498c04007b9e3.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "openai-embeddings", + mappings: { + properties: { + content_embedding: { + type: "dense_vector", + dims: 1536, + element_type: "float", + similarity: "dot_product", + }, + content: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e93ff228ab3e63738e1c83fdfb7424b9.asciidoc b/docs/doc_examples/e93ff228ab3e63738e1c83fdfb7424b9.asciidoc new file mode 100644 index 000000000..0e82192f7 --- /dev/null +++ b/docs/doc_examples/e93ff228ab3e63738e1c83fdfb7424b9.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match: { + "user.id": "kimchy", + }, + }, + highlight: { + pre_tags: [""], + post_tags: [""], + fields: { + body: {}, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e95ba581b298cd7bb598374afbfed315.asciidoc b/docs/doc_examples/e95ba581b298cd7bb598374afbfed315.asciidoc new file mode 100644 index 000000000..99b9c40c9 --- /dev/null +++ b/docs/doc_examples/e95ba581b298cd7bb598374afbfed315.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.asyncSearch.get({ + id: "", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e95e61988dc3073a007f7b7445dd233b.asciidoc b/docs/doc_examples/e95e61988dc3073a007f7b7445dd233b.asciidoc new file mode 100644 index 000000000..c9aaddee0 --- /dev/null +++ b/docs/doc_examples/e95e61988dc3073a007f7b7445dd233b.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "dsl-data-stream-template", + index_patterns: ["dsl-data-stream*"], + data_stream: {}, + priority: 500, + template: { + settings: { + "index.lifecycle.name": "pre-dsl-ilm-policy", + "index.lifecycle.prefer_ilm": false, + }, + lifecycle: { + data_retention: "7d", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e9625da419bff6470ffd9927c59ca159.asciidoc b/docs/doc_examples/e9625da419bff6470ffd9927c59ca159.asciidoc new file mode 100644 index 000000000..d3ff499f4 --- /dev/null +++ b/docs/doc_examples/e9625da419bff6470ffd9927c59ca159.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.threadPool({ + v: "true", + h: "id,name,queue,active,rejected,completed", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e9738fe09a99080506a07945795e8eda.asciidoc b/docs/doc_examples/e9738fe09a99080506a07945795e8eda.asciidoc new file mode 100644 index 000000000..9b7a8182a --- /dev/null +++ b/docs/doc_examples/e9738fe09a99080506a07945795e8eda.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: ["stop"], + text: "a quick fox jumps over the lazy dog", +}); +console.log(response); +---- diff --git a/docs/doc_examples/e99c45a47dc0ba7440aea8a9a99c84fa.asciidoc b/docs/doc_examples/e99c45a47dc0ba7440aea8a9a99c84fa.asciidoc new file mode 100644 index 000000000..1f5b22c33 --- /dev/null +++ b/docs/doc_examples/e99c45a47dc0ba7440aea8a9a99c84fa.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "news", + query: { + match: { + content: "Bird flu", + }, + }, + aggregations: { + my_sample: { + sampler: { + shard_size: 100, + }, + aggregations: { + keywords: { + significant_text: { + field: "content", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e9a0b450af6219772631703d602c7092.asciidoc b/docs/doc_examples/e9a0b450af6219772631703d602c7092.asciidoc new file mode 100644 index 000000000..4073cd41b --- /dev/null +++ b/docs/doc_examples/e9a0b450af6219772631703d602c7092.asciidoc @@ -0,0 +1,41 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + query: { + text_expansion: { + "ml.tokens": { + model_id: ".elser_model_2", + model_text: "How is the weather in Jamaica?", + pruning_config: { + tokens_freq_ratio_threshold: 5, + tokens_weight_threshold: 0.4, + only_score_pruned_tokens: false, + }, + }, + }, + }, + rescore: { + window_size: 100, + query: { + rescore_query: { + text_expansion: { + "ml.tokens": { + model_id: ".elser_model_2", + model_text: "How is the weather in Jamaica?", + pruning_config: { + tokens_freq_ratio_threshold: 5, + tokens_weight_threshold: 0.4, + only_score_pruned_tokens: true, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e9c2e15b36372d5281c879d336322b6c.asciidoc b/docs/doc_examples/e9c2e15b36372d5281c879d336322b6c.asciidoc deleted file mode 100644 index 5c3a8c543..000000000 --- a/docs/doc_examples/e9c2e15b36372d5281c879d336322b6c.asciidoc +++ /dev/null @@ -1,22 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.reindex({ - body: { - source: { - index: 'twitter', - _source: [ - 'user', - '_doc' - ] - }, - dest: { - index: 'new_twitter' - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/e9f9e184499a793828233e536fac0487.asciidoc b/docs/doc_examples/e9f9e184499a793828233e536fac0487.asciidoc new file mode 100644 index 000000000..a8dbb9fcf --- /dev/null +++ b/docs/doc_examples/e9f9e184499a793828233e536fac0487.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.deleteByQuery({ + index: "my-index-000001", + scroll_size: 5000, + query: { + term: { + "user.id": "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e9fc47015922d51c2b05e502ce9c622e.asciidoc b/docs/doc_examples/e9fc47015922d51c2b05e502ce9c622e.asciidoc new file mode 100644 index 000000000..7ec14029d --- /dev/null +++ b/docs/doc_examples/e9fc47015922d51c2b05e502ce9c622e.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "completion", + inference_id: "google_ai_studio_completion", + inference_config: { + service: "googleaistudio", + service_settings: { + api_key: "", + model_id: "", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/e9fe3b53b5b6e1ff9566b5237c0fa513.asciidoc b/docs/doc_examples/e9fe3b53b5b6e1ff9566b5237c0fa513.asciidoc new file mode 100644 index 000000000..62c0a1363 --- /dev/null +++ b/docs/doc_examples/e9fe3b53b5b6e1ff9566b5237c0fa513.asciidoc @@ -0,0 +1,46 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "child_example", + id: 2, + routing: 1, + document: { + join: { + name: "answer", + parent: "1", + }, + owner: { + location: "Norfolk, United Kingdom", + display_name: "Sam", + id: 48, + }, + body: "Unfortunately you're pretty much limited to FTP...", + creation_date: "2009-05-04T13:45:37.030", + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "child_example", + id: 3, + routing: 1, + refresh: "true", + document: { + join: { + name: "answer", + parent: "1", + }, + owner: { + location: "Norfolk, United Kingdom", + display_name: "Troll", + id: 49, + }, + body: "Use Linux...", + creation_date: "2009-05-05T13:45:37.030", + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/ea020ea32d5cd35e577c61a120f92451.asciidoc b/docs/doc_examples/ea020ea32d5cd35e577c61a120f92451.asciidoc new file mode 100644 index 000000000..bfd1e2c92 --- /dev/null +++ b/docs/doc_examples/ea020ea32d5cd35e577c61a120f92451.asciidoc @@ -0,0 +1,38 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "my-data-stream", + operations: [ + { + create: {}, + }, + { + "@timestamp": "2099-05-06T16:21:15.000Z", + message: + '192.0.2.42 - - [06/May/2099:16:21:15 +0000] "GET /images/bg.jpg HTTP/1.0" 200 24736', + }, + { + create: {}, + }, + { + "@timestamp": "2099-05-06T16:25:42.000Z", + message: + '192.0.2.255 - - [06/May/2099:16:25:42 +0000] "GET /favicon.ico HTTP/1.0" 200 3638', + }, + ], +}); +console.log(response); + +const response1 = await client.index({ + index: "my-data-stream", + document: { + "@timestamp": "2099-05-06T16:21:15.000Z", + message: + '192.0.2.42 - - [06/May/2099:16:21:15 +0000] "GET /images/bg.jpg HTTP/1.0" 200 24736', + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/ea29029884a5fd9a8d8830d25884bf07.asciidoc b/docs/doc_examples/ea29029884a5fd9a8d8830d25884bf07.asciidoc new file mode 100644 index 000000000..b00988659 --- /dev/null +++ b/docs/doc_examples/ea29029884a5fd9a8d8830d25884bf07.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + parent_id: { + type: "my-child", + id: "1", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ea313059c18d6edbd28c3f743a5e7c1c.asciidoc b/docs/doc_examples/ea313059c18d6edbd28c3f743a5e7c1c.asciidoc new file mode 100644 index 000000000..d7a847278 --- /dev/null +++ b/docs/doc_examples/ea313059c18d6edbd28c3f743a5e7c1c.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match: { + city: "madrid", + }, + }, + aggs: { + tags: { + significant_terms: { + field: "tag", + background_filter: { + term: { + text: "spain", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ea5391267ced860c00214c096e08c8d4.asciidoc b/docs/doc_examples/ea5391267ced860c00214c096e08c8d4.asciidoc new file mode 100644 index 000000000..fda39b10d --- /dev/null +++ b/docs/doc_examples/ea5391267ced860c00214c096e08c8d4.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "my-index-000001", + settings: { + index: { + number_of_replicas: 2, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ea5b4d2d87fd4e040afad18903c44869.asciidoc b/docs/doc_examples/ea5b4d2d87fd4e040afad18903c44869.asciidoc new file mode 100644 index 000000000..e7e8a4292 --- /dev/null +++ b/docs/doc_examples/ea5b4d2d87fd4e040afad18903c44869.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_locations", + query: { + bool: { + must: { + match_all: {}, + }, + filter: { + geo_bounding_box: { + "pin.location": { + top_left: { + lat: 40.73, + lon: -74.1, + }, + bottom_right: { + lat: 40.01, + lon: -71.12, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ea61aa2531ea73ccc0acd2d41f0518eb.asciidoc b/docs/doc_examples/ea61aa2531ea73ccc0acd2d41f0518eb.asciidoc new file mode 100644 index 000000000..83955e4ec --- /dev/null +++ b/docs/doc_examples/ea61aa2531ea73ccc0acd2d41f0518eb.asciidoc @@ -0,0 +1,41 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + pagerank: { + type: "rank_feature", + }, + url_length: { + type: "rank_feature", + positive_score_impact: false, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + pagerank: 8, + url_length: 22, + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + query: { + rank_feature: { + field: "pagerank", + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/ea66a620c23337545e409c120c4ed5d9.asciidoc b/docs/doc_examples/ea66a620c23337545e409c120c4ed5d9.asciidoc new file mode 100644 index 000000000..115c615f8 --- /dev/null +++ b/docs/doc_examples/ea66a620c23337545e409c120c4ed5d9.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.explainLifecycle({ + index: ".ds-timeseries-*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ea68e3428cc2ca3455bf312d09451489.asciidoc b/docs/doc_examples/ea68e3428cc2ca3455bf312d09451489.asciidoc new file mode 100644 index 000000000..aa41e2ba4 --- /dev/null +++ b/docs/doc_examples/ea68e3428cc2ca3455bf312d09451489.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "product-index", + mappings: { + properties: { + "product-vector": { + type: "dense_vector", + dims: 5, + index: false, + }, + price: { + type: "long", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ea690283f301c6ce957efad93d7d5c5d.asciidoc b/docs/doc_examples/ea690283f301c6ce957efad93d7d5c5d.asciidoc new file mode 100644 index 000000000..115a9adf0 --- /dev/null +++ b/docs/doc_examples/ea690283f301c6ce957efad93d7d5c5d.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "length_example", + settings: { + analysis: { + analyzer: { + standard_length: { + tokenizer: "standard", + filter: ["length"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ea8c4229afa6dd4f1321355542be9912.asciidoc b/docs/doc_examples/ea8c4229afa6dd4f1321355542be9912.asciidoc new file mode 100644 index 000000000..c4744fb4e --- /dev/null +++ b/docs/doc_examples/ea8c4229afa6dd4f1321355542be9912.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "attachment", + description: "Extract attachment information", + processors: [ + { + attachment: { + field: "data", + indexed_chars: 11, + indexed_chars_field: "max_size", + remove_binary: true, + }, + }, + ], +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: "my_id", + pipeline: "attachment", + document: { + data: "e1xydGYxXGFuc2kNCkxvcmVtIGlwc3VtIGRvbG9yIHNpdCBhbWV0DQpccGFyIH0=", + }, +}); +console.log(response1); + +const response2 = await client.get({ + index: "my-index-000001", + id: "my_id", +}); +console.log(response2); +---- diff --git a/docs/doc_examples/ea92390651e8ecad0c890658985343c5.asciidoc b/docs/doc_examples/ea92390651e8ecad0c890658985343c5.asciidoc new file mode 100644 index 000000000..e71ac87a0 --- /dev/null +++ b/docs/doc_examples/ea92390651e8ecad0c890658985343c5.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.slm.putLifecycle({ + policy_id: "hourly-snapshots", + name: "", + schedule: "0 0 * * * ?", + repository: "my_repository", + config: { + indices: "*", + include_global_state: true, + }, + retention: { + expire_after: "1d", + min_count: 1, + max_count: 24, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/eab3cad0257c539c5efd2689aa52f242.asciidoc b/docs/doc_examples/eab3cad0257c539c5efd2689aa52f242.asciidoc new file mode 100644 index 000000000..32089e3ef --- /dev/null +++ b/docs/doc_examples/eab3cad0257c539c5efd2689aa52f242.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.dataStreamsStats({ + name: "my-data-stream", + human: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/eac3bc428d03eb4926fa51f74b9bc4d5.asciidoc b/docs/doc_examples/eac3bc428d03eb4926fa51f74b9bc4d5.asciidoc new file mode 100644 index 000000000..f5decb0b8 --- /dev/null +++ b/docs/doc_examples/eac3bc428d03eb4926fa51f74b9bc4d5.asciidoc @@ -0,0 +1,61 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match: { + comment: { + query: "foo bar", + }, + }, + }, + rescore: { + window_size: 50, + query: { + rescore_query: { + match_phrase: { + comment: { + query: "foo bar", + slop: 1, + }, + }, + }, + rescore_query_weight: 10, + }, + }, + _source: false, + highlight: { + order: "score", + fields: { + comment: { + fragment_size: 150, + number_of_fragments: 3, + highlight_query: { + bool: { + must: { + match: { + comment: { + query: "foo bar", + }, + }, + }, + should: { + match_phrase: { + comment: { + query: "foo bar", + slop: 1, + boost: 10, + }, + }, + }, + minimum_should_match: 0, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ead4d875877d618594d0cdbdd9b7998b.asciidoc b/docs/doc_examples/ead4d875877d618594d0cdbdd9b7998b.asciidoc new file mode 100644 index 000000000..2a73cee7d --- /dev/null +++ b/docs/doc_examples/ead4d875877d618594d0cdbdd9b7998b.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.deleteVotingConfigExclusions(); +console.log(response); + +const response1 = await client.cluster.deleteVotingConfigExclusions({ + wait_for_removal: "false", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/eada8af6588584ac88f1e5b15f4a5c2a.asciidoc b/docs/doc_examples/eada8af6588584ac88f1e5b15f4a5c2a.asciidoc new file mode 100644 index 000000000..bc3b41783 --- /dev/null +++ b/docs/doc_examples/eada8af6588584ac88f1e5b15f4a5c2a.asciidoc @@ -0,0 +1,44 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "metrics_index", + id: 1, + document: { + "network.name": "net-1", + latency_histo: { + values: [0.1, 0.2, 0.3, 0.4, 0.5], + counts: [3, 7, 23, 12, 6], + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "metrics_index", + id: 2, + document: { + "network.name": "net-2", + latency_histo: { + values: [0.1, 0.2, 0.3, 0.4, 0.5], + counts: [8, 17, 8, 7, 6], + }, + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "metrics_index", + size: 0, + aggs: { + total_requests: { + value_count: { + field: "latency_histo", + }, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/eae8931d01b3b878dd0c45214121e662.asciidoc b/docs/doc_examples/eae8931d01b3b878dd0c45214121e662.asciidoc new file mode 100644 index 000000000..f36471ccb --- /dev/null +++ b/docs/doc_examples/eae8931d01b3b878dd0c45214121e662.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_locations", + query: { + geo_bounding_box: { + "pin.location": { + top_left: "dr", + bottom_right: "dr", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/eaf53b05959cc6b7fb09579baf34de68.asciidoc b/docs/doc_examples/eaf53b05959cc6b7fb09579baf34de68.asciidoc new file mode 100644 index 000000000..400427410 --- /dev/null +++ b/docs/doc_examples/eaf53b05959cc6b7fb09579baf34de68.asciidoc @@ -0,0 +1,40 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + sales_per_month: { + date_histogram: { + field: "date", + calendar_interval: "month", + }, + aggs: { + sale_type: { + terms: { + field: "type", + }, + aggs: { + sales: { + sum: { + field: "price", + }, + }, + }, + }, + hat_vs_bag_ratio: { + bucket_script: { + buckets_path: { + hats: "sale_type['hat']>sales", + bags: "sale_type['bag']>sales", + }, + script: "params.hats / params.bags", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/eaf6a846ded090fd6ac48269ad2b328b.asciidoc b/docs/doc_examples/eaf6a846ded090fd6ac48269ad2b328b.asciidoc new file mode 100644 index 000000000..8f88b4f8d --- /dev/null +++ b/docs/doc_examples/eaf6a846ded090fd6ac48269ad2b328b.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + "index.lifecycle.name": "my_policy", + "index.lifecycle.rollover_alias": "my_data", + }, + aliases: { + my_data: { + is_write_index: true, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/eafdabe80b21b90495555fa6d9089412.asciidoc b/docs/doc_examples/eafdabe80b21b90495555fa6d9089412.asciidoc new file mode 100644 index 000000000..fd4633709 --- /dev/null +++ b/docs/doc_examples/eafdabe80b21b90495555fa6d9089412.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.clearCachedServiceTokens({ + namespace: "elastic", + service: "fleet-server", + name: "token1,token2", +}); +console.log(response); +---- diff --git a/docs/doc_examples/eb09235533a1c65a0627ba05f7d4ad4d.asciidoc b/docs/doc_examples/eb09235533a1c65a0627ba05f7d4ad4d.asciidoc new file mode 100644 index 000000000..56ae5af9d --- /dev/null +++ b/docs/doc_examples/eb09235533a1c65a0627ba05f7d4ad4d.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "place", + id: 1, + document: { + suggest: { + input: "timmy's", + contexts: { + location: [ + { + lat: 43.6624803, + lon: -79.3863353, + }, + { + lat: 43.6624718, + lon: -79.3873227, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/eb141f8df8ead40ff7440b623ea92267.asciidoc b/docs/doc_examples/eb141f8df8ead40ff7440b623ea92267.asciidoc new file mode 100644 index 000000000..55f84f169 --- /dev/null +++ b/docs/doc_examples/eb141f8df8ead40ff7440b623ea92267.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "good_example_index", + mappings: { + properties: { + field_1: { + type: "text", + copy_to: ["field_2", "field_3"], + }, + field_2: { + type: "text", + }, + field_3: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/eb14cedd3bdda9ffef3c118f3d528dcd.asciidoc b/docs/doc_examples/eb14cedd3bdda9ffef3c118f3d528dcd.asciidoc new file mode 100644 index 000000000..fea94f9a6 --- /dev/null +++ b/docs/doc_examples/eb14cedd3bdda9ffef3c118f3d528dcd.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.update({ + index: "test", + id: 1, + script: "ctx._source.new_field = 'value_of_new_field'", +}); +console.log(response); +---- diff --git a/docs/doc_examples/eb30ba547e4a7b8f54f33ab259aca523.asciidoc b/docs/doc_examples/eb30ba547e4a7b8f54f33ab259aca523.asciidoc deleted file mode 100644 index 08353b44f..000000000 --- a/docs/doc_examples/eb30ba547e4a7b8f54f33ab259aca523.asciidoc +++ /dev/null @@ -1,15 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.update({ - index: 'test', - id: '1', - body: { - script: "ctx._source.new_field = 'value_of_new_field'" - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/eb33a7e5a0fe83fdaa0f79354f659428.asciidoc b/docs/doc_examples/eb33a7e5a0fe83fdaa0f79354f659428.asciidoc new file mode 100644 index 000000000..954f2ae31 --- /dev/null +++ b/docs/doc_examples/eb33a7e5a0fe83fdaa0f79354f659428.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-index-000001", + runtime: { + client_ip: { + type: "ip", + script: { + source: + 'String m = doc["message"].value; int end = m.indexOf(" "); emit(m.substring(0, end));', + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/eb4e43b47867b54214a8630172dd0e21.asciidoc b/docs/doc_examples/eb4e43b47867b54214a8630172dd0e21.asciidoc new file mode 100644 index 000000000..3caf88096 --- /dev/null +++ b/docs/doc_examples/eb4e43b47867b54214a8630172dd0e21.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.deleteForecast({ + job_id: "total-requests", + forecast_id: "_all", +}); +console.log(response); +---- diff --git a/docs/doc_examples/eb54506fbc71a7d250e86b22d0600114.asciidoc b/docs/doc_examples/eb54506fbc71a7d250e86b22d0600114.asciidoc new file mode 100644 index 000000000..027c4ff88 --- /dev/null +++ b/docs/doc_examples/eb54506fbc71a7d250e86b22d0600114.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.connector.list({ + service_type: "sharepoint_online,google_drive", +}); +console.log(response); +---- diff --git a/docs/doc_examples/eb5486d2fe4283475bf9e0e09280be16.asciidoc b/docs/doc_examples/eb5486d2fe4283475bf9e0e09280be16.asciidoc new file mode 100644 index 000000000..adf2ca795 --- /dev/null +++ b/docs/doc_examples/eb5486d2fe4283475bf9e0e09280be16.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + warm: { + actions: { + forcemerge: { + max_num_segments: 1, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/eb5987b58dae90c3a8a1609410be0570.asciidoc b/docs/doc_examples/eb5987b58dae90c3a8a1609410be0570.asciidoc new file mode 100644 index 000000000..3d37bbfbb --- /dev/null +++ b/docs/doc_examples/eb5987b58dae90c3a8a1609410be0570.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "indonesian_example", + settings: { + analysis: { + filter: { + indonesian_stop: { + type: "stop", + stopwords: "_indonesian_", + }, + indonesian_keywords: { + type: "keyword_marker", + keywords: ["contoh"], + }, + indonesian_stemmer: { + type: "stemmer", + language: "indonesian", + }, + }, + analyzer: { + rebuilt_indonesian: { + tokenizer: "standard", + filter: [ + "lowercase", + "indonesian_stop", + "indonesian_keywords", + "indonesian_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/eb6d62f1d855a8e8fe9eab2656d47504.asciidoc b/docs/doc_examples/eb6d62f1d855a8e8fe9eab2656d47504.asciidoc new file mode 100644 index 000000000..45a7c10da --- /dev/null +++ b/docs/doc_examples/eb6d62f1d855a8e8fe9eab2656d47504.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "test", + suggest: { + text: "obel prize", + simple_phrase: { + phrase: { + field: "title.trigram", + size: 1, + direct_generator: [ + { + field: "title.trigram", + suggest_mode: "always", + }, + { + field: "title.reverse", + suggest_mode: "always", + pre_filter: "reverse", + post_filter: "reverse", + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/eb964d8d7f27c057a4542448ba5b74e4.asciidoc b/docs/doc_examples/eb964d8d7f27c057a4542448ba5b74e4.asciidoc new file mode 100644 index 000000000..7a8337ffa --- /dev/null +++ b/docs/doc_examples/eb964d8d7f27c057a4542448ba5b74e4.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.get({ + repository: "my_repository", + snapshot: "snapshot*", + size: 2, + sort: "name", + after: "c25hcHNob3RfMixteV9yZXBvc2l0b3J5LHNuYXBzaG90XzI=", +}); +console.log(response); +---- diff --git a/docs/doc_examples/eb96d7dd5f3116a50f7a86b729f1a934.asciidoc b/docs/doc_examples/eb96d7dd5f3116a50f7a86b729f1a934.asciidoc new file mode 100644 index 000000000..9f2187174 --- /dev/null +++ b/docs/doc_examples/eb96d7dd5f3116a50f7a86b729f1a934.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.connector.updateScheduling({ + connector_id: "my-connector", + scheduling: { + full: { + enabled: true, + interval: "0 10 0 * * ?", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/eb9a41f7fc8bdf5559bb9db822ae3a65.asciidoc b/docs/doc_examples/eb9a41f7fc8bdf5559bb9db822ae3a65.asciidoc new file mode 100644 index 000000000..a9cdeb962 --- /dev/null +++ b/docs/doc_examples/eb9a41f7fc8bdf5559bb9db822ae3a65.asciidoc @@ -0,0 +1,59 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.bulkPutRole({ + roles: { + my_admin_role: { + cluster: ["bad_cluster_privilege"], + indices: [ + { + names: ["index1", "index2"], + privileges: ["all"], + field_security: { + grant: ["title", "body"], + }, + query: '{"match": {"title": "foo"}}', + }, + ], + applications: [ + { + application: "myapp", + privileges: ["admin", "read"], + resources: ["*"], + }, + ], + run_as: ["other_user"], + metadata: { + version: 1, + }, + }, + my_user_role: { + cluster: ["all"], + indices: [ + { + names: ["index1"], + privileges: ["read"], + field_security: { + grant: ["title", "body"], + }, + query: '{"match": {"title": "foo"}}', + }, + ], + applications: [ + { + application: "myapp", + privileges: ["admin", "read"], + resources: ["*"], + }, + ], + run_as: ["other_user"], + metadata: { + version: 1, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ebb1c7554e91adb4552599f3e5de1865.asciidoc b/docs/doc_examples/ebb1c7554e91adb4552599f3e5de1865.asciidoc new file mode 100644 index 000000000..9c53844b2 --- /dev/null +++ b/docs/doc_examples/ebb1c7554e91adb4552599f3e5de1865.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + index: { + number_of_routing_shards: 30, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ebb6b59fbc9325c17e45f524602d6be2.asciidoc b/docs/doc_examples/ebb6b59fbc9325c17e45f524602d6be2.asciidoc deleted file mode 100644 index 061466a85..000000000 --- a/docs/doc_examples/ebb6b59fbc9325c17e45f524602d6be2.asciidoc +++ /dev/null @@ -1,18 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.deleteByQuery({ - index: 'twitter', - body: { - query: { - match: { - message: 'some message' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/ebd76a45e153c4656c5871e23b7b5508.asciidoc b/docs/doc_examples/ebd76a45e153c4656c5871e23b7b5508.asciidoc new file mode 100644 index 000000000..71d69b8bd --- /dev/null +++ b/docs/doc_examples/ebd76a45e153c4656c5871e23b7b5508.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.deletePrivileges({ + application: "myapp", + name: "read", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ebef3dc8ed1766d433a5cffc40fde7ae.asciidoc b/docs/doc_examples/ebef3dc8ed1766d433a5cffc40fde7ae.asciidoc new file mode 100644 index 000000000..5a6237882 --- /dev/null +++ b/docs/doc_examples/ebef3dc8ed1766d433a5cffc40fde7ae.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.removePolicy({ + index: "logs-my_app-default", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ec0e50f78390b8622cef4e0b0cd45967.asciidoc b/docs/doc_examples/ec0e50f78390b8622cef4e0b0cd45967.asciidoc new file mode 100644 index 000000000..39544c478 --- /dev/null +++ b/docs/doc_examples/ec0e50f78390b8622cef4e0b0cd45967.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-data-stream", + query: + '\n process where (process.name == "cmd.exe" and process.pid != 2013)\n ', +}); +console.log(response); +---- diff --git a/docs/doc_examples/ec135f0cc0d3f526df68000b2a95c65b.asciidoc b/docs/doc_examples/ec135f0cc0d3f526df68000b2a95c65b.asciidoc new file mode 100644 index 000000000..d130ff537 --- /dev/null +++ b/docs/doc_examples/ec135f0cc0d3f526df68000b2a95c65b.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.createFrom({ + source: ".ml-anomalies-custom-example", + dest: ".reindexed-v9-ml-anomalies-custom-example", + create_from: null, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ec195297eb804cba1cb19c9926773059.asciidoc b/docs/doc_examples/ec195297eb804cba1cb19c9926773059.asciidoc new file mode 100644 index 000000000..4fcadbc73 --- /dev/null +++ b/docs/doc_examples/ec195297eb804cba1cb19c9926773059.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "mylogs-pre-ilm*", + settings: { + index: { + lifecycle: { + name: "mylogs_policy_existing", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ec27afee074001b0e4e393611010842b.asciidoc b/docs/doc_examples/ec27afee074001b0e4e393611010842b.asciidoc deleted file mode 100644 index 8fe4fee7b..000000000 --- a/docs/doc_examples/ec27afee074001b0e4e393611010842b.asciidoc +++ /dev/null @@ -1,24 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - function_score: { - gauss: { - date: { - origin: '2013-09-17', - scale: '10d', - offset: '5d', - decay: 0.5 - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/ec420b28e327f332c9e99d6040c4eb3f.asciidoc b/docs/doc_examples/ec420b28e327f332c9e99d6040c4eb3f.asciidoc new file mode 100644 index 000000000..80f12e99a --- /dev/null +++ b/docs/doc_examples/ec420b28e327f332c9e99d6040c4eb3f.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "users", + id: 0, + pipeline: "postal_lookup", + document: { + first_name: "Mardy", + last_name: "Brown", + geo_location: "POINT (13.5 52.5)", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ec44999b6618ac6bbacb23eb08c0fa88.asciidoc b/docs/doc_examples/ec44999b6618ac6bbacb23eb08c0fa88.asciidoc new file mode 100644 index 000000000..b4eb794c2 --- /dev/null +++ b/docs/doc_examples/ec44999b6618ac6bbacb23eb08c0fa88.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + runtime_mappings: { + gc_size: { + type: "keyword", + script: + "\n Map gc=dissect('[%{@timestamp}][%{code}][%{desc}] %{ident} used %{usize}, capacity %{csize}, committed %{comsize}, reserved %{rsize}').extract(doc[\"gc.keyword\"].value);\n if (gc != null) emit(\"used\" + ' ' + gc.usize + ', ' + \"capacity\" + ' ' + gc.csize + ', ' + \"committed\" + ' ' + gc.comsize);\n ", + }, + }, + size: 1, + aggs: { + sizes: { + terms: { + field: "gc_size", + size: 10, + }, + }, + }, + fields: ["gc_size"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/ec473de07fe89bcbac1f8e278617fe46.asciidoc b/docs/doc_examples/ec473de07fe89bcbac1f8e278617fe46.asciidoc deleted file mode 100644 index 9d15fe856..000000000 --- a/docs/doc_examples/ec473de07fe89bcbac1f8e278617fe46.asciidoc +++ /dev/null @@ -1,26 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - function_score: { - query: { - match: { - message: 'elasticsearch' - } - }, - script_score: { - script: { - source: "Math.log(2 + doc['likes'].value)" - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/ec4b43c3ebd8816799fa004596b2f0cb.asciidoc b/docs/doc_examples/ec4b43c3ebd8816799fa004596b2f0cb.asciidoc new file mode 100644 index 000000000..5c1d8b6ed --- /dev/null +++ b/docs/doc_examples/ec4b43c3ebd8816799fa004596b2f0cb.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "*", + settings: { + "index.indexing.slowlog.include.user": true, + "index.indexing.slowlog.threshold.index.warn": "30s", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ec5a2ce156c36aaa267fa31dd9367307.asciidoc b/docs/doc_examples/ec5a2ce156c36aaa267fa31dd9367307.asciidoc new file mode 100644 index 000000000..311487347 --- /dev/null +++ b/docs/doc_examples/ec5a2ce156c36aaa267fa31dd9367307.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "set_ingest_time", + description: "Set ingest timestamp.", + processors: [ + { + set: { + field: "event.ingested", + value: "{{{_ingest.timestamp}}}", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/ec69543e39c1f6afb5aff6fb9adc400d.asciidoc b/docs/doc_examples/ec69543e39c1f6afb5aff6fb9adc400d.asciidoc new file mode 100644 index 000000000..8a6eb97c8 --- /dev/null +++ b/docs/doc_examples/ec69543e39c1f6afb5aff6fb9adc400d.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "index1", + refresh: "true", + operations: [ + { + index: { + _id: "doc1", + }, + }, + { + comment: "run with scissors", + }, + { + index: { + _id: "doc2", + }, + }, + { + comment: "running with scissors", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/ec736c31f49c54e5424efa2e53b22906.asciidoc b/docs/doc_examples/ec736c31f49c54e5424efa2e53b22906.asciidoc new file mode 100644 index 000000000..9dd498c1a --- /dev/null +++ b/docs/doc_examples/ec736c31f49c54e5424efa2e53b22906.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "user_agent", + description: "Add user agent information", + processors: [ + { + user_agent: { + field: "agent", + }, + }, + ], +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: "my_id", + pipeline: "user_agent", + document: { + agent: + "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36", + }, +}); +console.log(response1); + +const response2 = await client.get({ + index: "my-index-000001", + id: "my_id", +}); +console.log(response2); +---- diff --git a/docs/doc_examples/ec8f176ebf436d5719bdeca4a9ea8220.asciidoc b/docs/doc_examples/ec8f176ebf436d5719bdeca4a9ea8220.asciidoc new file mode 100644 index 000000000..454c2c49c --- /dev/null +++ b/docs/doc_examples/ec8f176ebf436d5719bdeca4a9ea8220.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "products", + runtime_mappings: { + "genre.length": { + type: "long", + script: "emit(doc['genre'].value.length())", + }, + }, + aggs: { + genres_and_products: { + multi_terms: { + terms: [ + { + field: "genre.length", + }, + { + field: "product", + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ecc57597f6b791d1151ad79d9f4ce67b.asciidoc b/docs/doc_examples/ecc57597f6b791d1151ad79d9f4ce67b.asciidoc new file mode 100644 index 000000000..57ec2e036 --- /dev/null +++ b/docs/doc_examples/ecc57597f6b791d1151ad79d9f4ce67b.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sales_over_time: { + date_histogram: { + field: "date", + calendar_interval: "1M", + format: "yyyy-MM-dd", + keyed: true, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ece01f9382e450f669c0e0925e5b30e5.asciidoc b/docs/doc_examples/ece01f9382e450f669c0e0925e5b30e5.asciidoc new file mode 100644 index 000000000..d33db1d05 --- /dev/null +++ b/docs/doc_examples/ece01f9382e450f669c0e0925e5b30e5.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + range: { + date_range: { + field: "date", + format: "MM-yyy", + ranges: [ + { + to: "now-10M/M", + }, + { + from: "now-10M/M", + }, + ], + keyed: true, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ecfd0d94dd14ef05dfa861f22544b388.asciidoc b/docs/doc_examples/ecfd0d94dd14ef05dfa861f22544b388.asciidoc new file mode 100644 index 000000000..3622004ac --- /dev/null +++ b/docs/doc_examples/ecfd0d94dd14ef05dfa861f22544b388.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.connector.updateError({ + connector_id: "my-connector", + error: "Houston, we have a problem!", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ed01b542bb56b1521ea8d5a3c67aa891.asciidoc b/docs/doc_examples/ed01b542bb56b1521ea8d5a3c67aa891.asciidoc new file mode 100644 index 000000000..4fccf4f6e --- /dev/null +++ b/docs/doc_examples/ed01b542bb56b1521ea8d5a3c67aa891.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.createRepository({ + name: "my_gcs_repository", + repository: { + type: "gcs", + settings: { + bucket: "my_bucket", + client: "my_alternate_client", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ed01d27b8f80bb4ea54bf4e32b8d6258.asciidoc b/docs/doc_examples/ed01d27b8f80bb4ea54bf4e32b8d6258.asciidoc new file mode 100644 index 000000000..a5283d696 --- /dev/null +++ b/docs/doc_examples/ed01d27b8f80bb4ea54bf4e32b8d6258.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "museums", + size: 0, + aggs: { + rings_around_amsterdam: { + geo_distance: { + field: "location", + origin: "POINT (4.894 52.3760)", + ranges: [ + { + to: 100000, + key: "first_ring", + }, + { + from: 100000, + to: 300000, + key: "second_ring", + }, + { + from: 300000, + key: "third_ring", + }, + ], + keyed: true, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ed09432c6069e41409f0a5e0d1d3842a.asciidoc b/docs/doc_examples/ed09432c6069e41409f0a5e0d1d3842a.asciidoc new file mode 100644 index 000000000..6e565f33e --- /dev/null +++ b/docs/doc_examples/ed09432c6069e41409f0a5e0d1d3842a.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.reloadSearchAnalyzers({ + index: "my-index-000001", +}); +console.log(response); + +const response1 = await client.indices.clearCache({ + index: "my-index-000001", + request: "true", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/ed12eeadb4e530b53c4975dadaa06054.asciidoc b/docs/doc_examples/ed12eeadb4e530b53c4975dadaa06054.asciidoc new file mode 100644 index 000000000..2fe91ad53 --- /dev/null +++ b/docs/doc_examples/ed12eeadb4e530b53c4975dadaa06054.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.processorGrok({ + ecs_compatibility: "v1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ed250b74bc77c15bb794f55a12d762c3.asciidoc b/docs/doc_examples/ed250b74bc77c15bb794f55a12d762c3.asciidoc new file mode 100644 index 000000000..18ac24524 --- /dev/null +++ b/docs/doc_examples/ed250b74bc77c15bb794f55a12d762c3.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.info({ + filter_path: "**.mlockall", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ed27843eff311f3011b679e97e6fda50.asciidoc b/docs/doc_examples/ed27843eff311f3011b679e97e6fda50.asciidoc new file mode 100644 index 000000000..c6ecce6fa --- /dev/null +++ b/docs/doc_examples/ed27843eff311f3011b679e97e6fda50.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.restore({ + repository: "my_repository", + snapshot: "my_snapshot_2099.05.06", + indices: "my-index,logs-my_app-default", + index_settings: { + "index.number_of_replicas": 1, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ed3bdf4d6799b43526851e92b6a60c55.asciidoc b/docs/doc_examples/ed3bdf4d6799b43526851e92b6a60c55.asciidoc new file mode 100644 index 000000000..ed11c2ffd --- /dev/null +++ b/docs/doc_examples/ed3bdf4d6799b43526851e92b6a60c55.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getFieldMapping({ + index: "publications", + fields: "author.id,abstract,name", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ed5bfa68d01e079aac94de78dc5caddf.asciidoc b/docs/doc_examples/ed5bfa68d01e079aac94de78dc5caddf.asciidoc new file mode 100644 index 000000000..dfa1e1323 --- /dev/null +++ b/docs/doc_examples/ed5bfa68d01e079aac94de78dc5caddf.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.master({ + v: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ed5c3b45e8de912faba44507d827eb93.asciidoc b/docs/doc_examples/ed5c3b45e8de912faba44507d827eb93.asciidoc new file mode 100644 index 000000000..0ce9c3072 --- /dev/null +++ b/docs/doc_examples/ed5c3b45e8de912faba44507d827eb93.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + sort: [ + { + _geo_distance: { + "pin.location": "POINT (-70 40)", + order: "asc", + unit: "km", + }, + }, + ], + query: { + term: { + user: "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ed60daeaec351fc8b3f39a3dfad6fc4e.asciidoc b/docs/doc_examples/ed60daeaec351fc8b3f39a3dfad6fc4e.asciidoc new file mode 100644 index 000000000..724c1f0c0 --- /dev/null +++ b/docs/doc_examples/ed60daeaec351fc8b3f39a3dfad6fc4e.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "amazon-bedrock-embeddings", + mappings: { + properties: { + content_embedding: { + type: "dense_vector", + dims: 1024, + element_type: "float", + similarity: "dot_product", + }, + content: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ed688d86eeaa4d7969acb0f574eb917f.asciidoc b/docs/doc_examples/ed688d86eeaa4d7969acb0f574eb917f.asciidoc new file mode 100644 index 000000000..1dce69225 --- /dev/null +++ b/docs/doc_examples/ed688d86eeaa4d7969acb0f574eb917f.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my_queries1", + id: 1, + refresh: "true", + document: { + query: { + term: { + "my_field.prefix": "abc", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ed6b996ea389e0955a01c2e67f4c8339.asciidoc b/docs/doc_examples/ed6b996ea389e0955a01c2e67f4c8339.asciidoc new file mode 100644 index 000000000..5e627dec3 --- /dev/null +++ b/docs/doc_examples/ed6b996ea389e0955a01c2e67f4c8339.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.fieldCaps({ + index: "my-index-000001", + fields: "my-field", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ed7fa1971ac322aeccd6391ab32d0490.asciidoc b/docs/doc_examples/ed7fa1971ac322aeccd6391ab32d0490.asciidoc new file mode 100644 index 000000000..da6e6d943 --- /dev/null +++ b/docs/doc_examples/ed7fa1971ac322aeccd6391ab32d0490.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.nodes({ + v: "true", + h: "name,master,node.role,disk.used_percent,disk.used,disk.avail,disk.total", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ed85ed833bec7286a0dfbe64077c5715.asciidoc b/docs/doc_examples/ed85ed833bec7286a0dfbe64077c5715.asciidoc new file mode 100644 index 000000000..08ac02a73 --- /dev/null +++ b/docs/doc_examples/ed85ed833bec7286a0dfbe64077c5715.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "danish_example", + settings: { + analysis: { + filter: { + danish_stop: { + type: "stop", + stopwords: "_danish_", + }, + danish_keywords: { + type: "keyword_marker", + keywords: ["eksempel"], + }, + danish_stemmer: { + type: "stemmer", + language: "danish", + }, + }, + analyzer: { + rebuilt_danish: { + tokenizer: "standard", + filter: [ + "lowercase", + "danish_stop", + "danish_keywords", + "danish_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/edae616e1244babf6032aecc6aaaf836.asciidoc b/docs/doc_examples/edae616e1244babf6032aecc6aaaf836.asciidoc new file mode 100644 index 000000000..72c6aa669 --- /dev/null +++ b/docs/doc_examples/edae616e1244babf6032aecc6aaaf836.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + sort: [ + { + _geo_distance: { + "pin.location": { + lat: 40, + lon: -70, + }, + order: "asc", + unit: "km", + }, + }, + ], + query: { + term: { + user: "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/edb25dc0162b039d477cb06aed2d6275.asciidoc b/docs/doc_examples/edb25dc0162b039d477cb06aed2d6275.asciidoc new file mode 100644 index 000000000..31c711aa2 --- /dev/null +++ b/docs/doc_examples/edb25dc0162b039d477cb06aed2d6275.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + query: { + bool: { + should: [ + { + sparse_vector: { + field: "ml.inference.title_expanded.predicted_value", + inference_id: "my-elser-model", + query: "How is the weather in Jamaica?", + boost: 1, + }, + }, + { + sparse_vector: { + field: "ml.inference.description_expanded.predicted_value", + inference_id: "my-elser-model", + query: "How is the weather in Jamaica?", + boost: 1, + }, + }, + { + multi_match: { + query: "How is the weather in Jamaica?", + fields: ["title", "description"], + boost: 4, + }, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/edb5cad890208014ecd91f3f739ce193.asciidoc b/docs/doc_examples/edb5cad890208014ecd91f3f739ce193.asciidoc new file mode 100644 index 000000000..2bb41773d --- /dev/null +++ b/docs/doc_examples/edb5cad890208014ecd91f3f739ce193.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.rollover({ + alias: "metrics-weather_sensors-dev", +}); +console.log(response); +---- diff --git a/docs/doc_examples/edcfadbfb14d97a2f5e6e21ef7039818.asciidoc b/docs/doc_examples/edcfadbfb14d97a2f5e6e21ef7039818.asciidoc new file mode 100644 index 000000000..f3065ff0d --- /dev/null +++ b/docs/doc_examples/edcfadbfb14d97a2f5e6e21ef7039818.asciidoc @@ -0,0 +1,40 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + function_score: { + query: { + match_all: {}, + }, + boost: "5", + functions: [ + { + filter: { + match: { + test: "bar", + }, + }, + random_score: {}, + weight: 23, + }, + { + filter: { + match: { + test: "cat", + }, + }, + weight: 42, + }, + ], + max_boost: 42, + score_mode: "max", + boost_mode: "multiply", + min_score: 42, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ee08328cd157d547de19b4abe867b23e.asciidoc b/docs/doc_examples/ee08328cd157d547de19b4abe867b23e.asciidoc new file mode 100644 index 000000000..a2bbbd116 --- /dev/null +++ b/docs/doc_examples/ee08328cd157d547de19b4abe867b23e.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getAlias({ + name: "logs", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ee0fd67acc807f1bddf5e9807c06e7eb.asciidoc b/docs/doc_examples/ee0fd67acc807f1bddf5e9807c06e7eb.asciidoc new file mode 100644 index 000000000..9e94a56b4 --- /dev/null +++ b/docs/doc_examples/ee0fd67acc807f1bddf5e9807c06e7eb.asciidoc @@ -0,0 +1,95 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + query: { + weighted_tokens: { + query_expansion_field: { + tokens: { + "2161": 0.4679, + "2621": 0.307, + "2782": 0.1299, + "2851": 0.1056, + "3088": 0.3041, + "3376": 0.1038, + "3467": 0.4873, + "3684": 0.8958, + "4380": 0.334, + "4542": 0.4636, + "4633": 2.2805, + "4785": 1.2628, + "4860": 1.0655, + "5133": 1.0709, + "7139": 1.0016, + "7224": 0.2486, + "7387": 0.0985, + "7394": 0.0542, + "8915": 0.369, + "9156": 2.8947, + "10505": 0.2771, + "11464": 0.3996, + "13525": 0.0088, + "14178": 0.8161, + "16893": 0.1376, + "17851": 1.5348, + "19939": 0.6012, + }, + pruning_config: { + tokens_freq_ratio_threshold: 5, + tokens_weight_threshold: 0.4, + only_score_pruned_tokens: false, + }, + }, + }, + }, + rescore: { + window_size: 100, + query: { + rescore_query: { + weighted_tokens: { + query_expansion_field: { + tokens: { + "2161": 0.4679, + "2621": 0.307, + "2782": 0.1299, + "2851": 0.1056, + "3088": 0.3041, + "3376": 0.1038, + "3467": 0.4873, + "3684": 0.8958, + "4380": 0.334, + "4542": 0.4636, + "4633": 2.2805, + "4785": 1.2628, + "4860": 1.0655, + "5133": 1.0709, + "7139": 1.0016, + "7224": 0.2486, + "7387": 0.0985, + "7394": 0.0542, + "8915": 0.369, + "9156": 2.8947, + "10505": 0.2771, + "11464": 0.3996, + "13525": 0.0088, + "14178": 0.8161, + "16893": 0.1376, + "17851": 1.5348, + "19939": 0.6012, + }, + pruning_config: { + tokens_freq_ratio_threshold: 5, + tokens_weight_threshold: 0.4, + only_score_pruned_tokens: true, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ee223e604bb695cad2517d28ae63ac34.asciidoc b/docs/doc_examples/ee223e604bb695cad2517d28ae63ac34.asciidoc new file mode 100644 index 000000000..907093605 --- /dev/null +++ b/docs/doc_examples/ee223e604bb695cad2517d28ae63ac34.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "example-index", + retriever: { + rrf: { + retrievers: [ + { + standard: { + query: { + term: { + text: "shoes", + }, + }, + }, + }, + { + knn: { + field: "vector", + query_vector: [1.25, 2, 3.5], + k: 50, + num_candidates: 100, + }, + }, + ], + rank_window_size: 50, + rank_constant: 20, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ee2d97090d617ed8aa2a87ea33556dd7.asciidoc b/docs/doc_examples/ee2d97090d617ed8aa2a87ea33556dd7.asciidoc new file mode 100644 index 000000000..6b15589c5 --- /dev/null +++ b/docs/doc_examples/ee2d97090d617ed8aa2a87ea33556dd7.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "whitespace", + filter: ["truncate"], + text: "the quinquennial extravaganza carried on", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ee577c4c7cc723e99569ea2d1137adba.asciidoc b/docs/doc_examples/ee577c4c7cc723e99569ea2d1137adba.asciidoc new file mode 100644 index 000000000..f643646c4 --- /dev/null +++ b/docs/doc_examples/ee577c4c7cc723e99569ea2d1137adba.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.clearCachedRoles({ + name: "my_admin_role", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ee634d59def6302134d24fa90e18b609.asciidoc b/docs/doc_examples/ee634d59def6302134d24fa90e18b609.asciidoc new file mode 100644 index 000000000..7c7070cf7 --- /dev/null +++ b/docs/doc_examples/ee634d59def6302134d24fa90e18b609.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.autoscaling.putAutoscalingPolicy({ + name: "my_autoscaling_policy", + policy: { + roles: ["ml"], + deciders: { + ml: { + num_anomaly_jobs_in_queue: 5, + num_analytics_jobs_in_queue: 3, + down_scale_delay: "30m", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ee90d1fb22b59d30da339d825303b912.asciidoc b/docs/doc_examples/ee90d1fb22b59d30da339d825303b912.asciidoc new file mode 100644 index 000000000..40f37b043 --- /dev/null +++ b/docs/doc_examples/ee90d1fb22b59d30da339d825303b912.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putPrivileges({ + privileges: { + app01: { + read: { + actions: ["action:login", "data:read/*"], + }, + write: { + actions: ["action:login", "data:write/*"], + }, + }, + app02: { + all: { + actions: ["*"], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/eeb35b759bd239bb773c8ebd5fe63d05.asciidoc b/docs/doc_examples/eeb35b759bd239bb773c8ebd5fe63d05.asciidoc new file mode 100644 index 000000000..429756a85 --- /dev/null +++ b/docs/doc_examples/eeb35b759bd239bb773c8ebd5fe63d05.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "museums", + size: 0, + aggs: { + cities: { + terms: { + field: "city.keyword", + }, + aggs: { + centroid: { + geo_centroid: { + field: "location", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/eec051555c8050d017d3fe38ea59e3a0.asciidoc b/docs/doc_examples/eec051555c8050d017d3fe38ea59e3a0.asciidoc new file mode 100644 index 000000000..e6563395a --- /dev/null +++ b/docs/doc_examples/eec051555c8050d017d3fe38ea59e3a0.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + from: 40, + size: 20, + query: { + term: { + "user.id": "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/eed37703cfe8fec093ed5a42210a6ffd.asciidoc b/docs/doc_examples/eed37703cfe8fec093ed5a42210a6ffd.asciidoc new file mode 100644 index 000000000..4ca308a64 --- /dev/null +++ b/docs/doc_examples/eed37703cfe8fec093ed5a42210a6ffd.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.rollup.putJob({ + id: "sensor", + index_pattern: "sensor-*", + rollup_index: "sensor_rollup", + cron: "*/30 * * * * ?", + page_size: 1000, + groups: { + date_histogram: { + field: "timestamp", + fixed_interval: "60m", + }, + terms: { + fields: ["node"], + }, + }, + metrics: [ + { + field: "temperature", + metrics: ["min", "max", "sum"], + }, + { + field: "voltage", + metrics: ["avg"], + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/eee6110831c08b9c1b3f56b24656e95b.asciidoc b/docs/doc_examples/eee6110831c08b9c1b3f56b24656e95b.asciidoc new file mode 100644 index 000000000..d414b928c --- /dev/null +++ b/docs/doc_examples/eee6110831c08b9c1b3f56b24656e95b.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "text_embedding", + inference_id: "hugging-face-embeddings", + inference_config: { + service: "hugging_face", + service_settings: { + api_key: "", + url: "", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/eef9deff7f9799d1f7657bb7e2afb7f1.asciidoc b/docs/doc_examples/eef9deff7f9799d1f7657bb7e2afb7f1.asciidoc new file mode 100644 index 000000000..535a66e39 --- /dev/null +++ b/docs/doc_examples/eef9deff7f9799d1f7657bb7e2afb7f1.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.delete({ + index: "*", + expand_wildcards: "all", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ef0f4fa4272c47ff62fb7b422cf975e7.asciidoc b/docs/doc_examples/ef0f4fa4272c47ff62fb7b422cf975e7.asciidoc deleted file mode 100644 index 7f7ab93ab..000000000 --- a/docs/doc_examples/ef0f4fa4272c47ff62fb7b422cf975e7.asciidoc +++ /dev/null @@ -1,24 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - sort: [ - { - price: { - missing: '_last' - } - } - ], - query: { - term: { - product: 'chocolate' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/ef10e8d07d9fae945e035d5dee1e9754.asciidoc b/docs/doc_examples/ef10e8d07d9fae945e035d5dee1e9754.asciidoc new file mode 100644 index 000000000..ebdd4ee45 --- /dev/null +++ b/docs/doc_examples/ef10e8d07d9fae945e035d5dee1e9754.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: [ + { + type: "synonym_graph", + synonyms: ["dns, domain name system"], + }, + "flatten_graph", + ], + text: "domain name system is fragile", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ef22234b97cc06d7dd620b4ce7c97b31.asciidoc b/docs/doc_examples/ef22234b97cc06d7dd620b4ce7c97b31.asciidoc new file mode 100644 index 000000000..5e496f0cf --- /dev/null +++ b/docs/doc_examples/ef22234b97cc06d7dd620b4ce7c97b31.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + max_docs: 1, + source: { + index: "my-index-000001", + }, + dest: { + index: "my-new-index-000001", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ef33b3b373f7040b874146599db5d557.asciidoc b/docs/doc_examples/ef33b3b373f7040b874146599db5d557.asciidoc new file mode 100644 index 000000000..cce76bc0b --- /dev/null +++ b/docs/doc_examples/ef33b3b373f7040b874146599db5d557.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "keyword", + filter: ["lowercase"], + text: "this is a test", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ef3666b5d288faefbcbc4a25e8f506da.asciidoc b/docs/doc_examples/ef3666b5d288faefbcbc4a25e8f506da.asciidoc new file mode 100644 index 000000000..39568eb32 --- /dev/null +++ b/docs/doc_examples/ef3666b5d288faefbcbc4a25e8f506da.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.count({ + index: "my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ef46c42d473b2acc151a6a41272e0f14.asciidoc b/docs/doc_examples/ef46c42d473b2acc151a6a41272e0f14.asciidoc new file mode 100644 index 000000000..5fbb3c8b7 --- /dev/null +++ b/docs/doc_examples/ef46c42d473b2acc151a6a41272e0f14.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + dynamic: "runtime", + runtime: { + day_of_week: { + type: "keyword", + script: { + source: + "emit(doc['@timestamp'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ENGLISH))", + }, + }, + }, + properties: { + "@timestamp": { + type: "date", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ef643bab44e7de6ddddde23a2eece5c7.asciidoc b/docs/doc_examples/ef643bab44e7de6ddddde23a2eece5c7.asciidoc new file mode 100644 index 000000000..3f692157d --- /dev/null +++ b/docs/doc_examples/ef643bab44e7de6ddddde23a2eece5c7.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "books", + document: { + name: "The Great Gatsby", + author: "F. Scott Fitzgerald", + release_date: "1925-04-10", + page_count: 180, + language: "EN", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ef779b87b3b0fb6e6bae9c8875e3a1cf.asciidoc b/docs/doc_examples/ef779b87b3b0fb6e6bae9c8875e3a1cf.asciidoc new file mode 100644 index 000000000..7578ffc46 --- /dev/null +++ b/docs/doc_examples/ef779b87b3b0fb6e6bae9c8875e3a1cf.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + runtime_mappings: { + "date.promoted_is_tomorrow": { + type: "date", + script: + "\n long date = doc['date'].value.toInstant().toEpochMilli();\n if (doc['promoted'].value) {\n date += 86400;\n }\n emit(date);\n ", + }, + }, + aggs: { + sales_over_time: { + date_histogram: { + field: "date.promoted_is_tomorrow", + calendar_interval: "1M", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ef867e563cbffe7866769a096b5d7a92.asciidoc b/docs/doc_examples/ef867e563cbffe7866769a096b5d7a92.asciidoc new file mode 100644 index 000000000..5f0cb29b7 --- /dev/null +++ b/docs/doc_examples/ef867e563cbffe7866769a096b5d7a92.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sales_per_month: { + date_histogram: { + field: "date", + calendar_interval: "month", + }, + aggs: { + sales: { + sum: { + field: "price", + }, + }, + cumulative_sales: { + cumulative_sum: { + buckets_path: "sales", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ef8f30e85e12e9a5a8817d28977598e4.asciidoc b/docs/doc_examples/ef8f30e85e12e9a5a8817d28977598e4.asciidoc new file mode 100644 index 000000000..5971b624b --- /dev/null +++ b/docs/doc_examples/ef8f30e85e12e9a5a8817d28977598e4.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + aggs: { + price_ranges: { + range: { + field: "price", + ranges: [ + { + to: 100, + }, + { + from: 100, + to: 200, + }, + { + from: 200, + }, + ], + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ef9111c1648d7820925f12e07d1346c5.asciidoc b/docs/doc_examples/ef9111c1648d7820925f12e07d1346c5.asciidoc deleted file mode 100644 index 384df2da6..000000000 --- a/docs/doc_examples/ef9111c1648d7820925f12e07d1346c5.asciidoc +++ /dev/null @@ -1,25 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'my_index', - body: { - mappings: { - properties: { - my_field: { - type: 'text', - fields: { - keyword: { - type: 'keyword' - } - } - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/ef9c29759459904fef162acd223462c4.asciidoc b/docs/doc_examples/ef9c29759459904fef162acd223462c4.asciidoc new file mode 100644 index 000000000..674ca8a30 --- /dev/null +++ b/docs/doc_examples/ef9c29759459904fef162acd223462c4.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.stats({ + metric: "ingest", + filter_path: "nodes.*.ingest", +}); +console.log(response); +---- diff --git a/docs/doc_examples/efa146bf81a9351ba42b92a6decbcfee.asciidoc b/docs/doc_examples/efa146bf81a9351ba42b92a6decbcfee.asciidoc new file mode 100644 index 000000000..78f36d9c1 --- /dev/null +++ b/docs/doc_examples/efa146bf81a9351ba42b92a6decbcfee.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-index", + runtime: { + "http.response": { + type: "long", + script: + '\n String response=dissect(\'%{clientip} %{ident} %{auth} [%{@timestamp}] "%{verb} %{request} HTTP/%{httpversion}" %{response} %{size}\').extract(doc["message"].value)?.response;\n if (response != null) emit(Integer.parseInt(response));\n ', + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/efa924638043f3a6b23ccb824d757eba.asciidoc b/docs/doc_examples/efa924638043f3a6b23ccb824d757eba.asciidoc new file mode 100644 index 000000000..70c4d55ae --- /dev/null +++ b/docs/doc_examples/efa924638043f3a6b23ccb824d757eba.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "mv", + refresh: "true", + operations: [ + { + index: {}, + }, + { + a: 1, + b: [2, 1], + }, + { + index: {}, + }, + { + a: 2, + b: 3, + }, + ], +}); +console.log(response); + +const response1 = await client.esql.query({ + query: "FROM mv | LIMIT 2", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/efbd4936cca1a752493d8fa2ba6ad1a3.asciidoc b/docs/doc_examples/efbd4936cca1a752493d8fa2ba6ad1a3.asciidoc new file mode 100644 index 000000000..0dca28ff5 --- /dev/null +++ b/docs/doc_examples/efbd4936cca1a752493d8fa2ba6ad1a3.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + runtime: { + day_of_week: { + type: "keyword", + script: { + source: + "emit(doc['@timestamp'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ENGLISH))", + }, + }, + }, + properties: { + "@timestamp": { + type: "date", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/eff2fc92d46eb3c8f4d424eed18f54a2.asciidoc b/docs/doc_examples/eff2fc92d46eb3c8f4d424eed18f54a2.asciidoc new file mode 100644 index 000000000..8ea87e2e1 --- /dev/null +++ b/docs/doc_examples/eff2fc92d46eb3c8f4d424eed18f54a2.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + function_score: { + query: { + match_all: {}, + }, + boost: "5", + random_score: {}, + boost_mode: "multiply", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/eff8ecaed1ed084909c64450fc363a20.asciidoc b/docs/doc_examples/eff8ecaed1ed084909c64450fc363a20.asciidoc new file mode 100644 index 000000000..e612615ea --- /dev/null +++ b/docs/doc_examples/eff8ecaed1ed084909c64450fc363a20.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + transient: { + "indices.recovery.max_bytes_per_sec": null, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f03352bb1129938a89f97e4b650038dd.asciidoc b/docs/doc_examples/f03352bb1129938a89f97e4b650038dd.asciidoc new file mode 100644 index 000000000..f39a62d6f --- /dev/null +++ b/docs/doc_examples/f03352bb1129938a89f97e4b650038dd.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "amazon_bedrock_embeddings_pipeline", + processors: [ + { + inference: { + model_id: "amazon_bedrock_embeddings", + input_output: { + input_field: "content", + output_field: "content_embedding", + }, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/f04e1284d09ceb4443d67b2ef9c7f476.asciidoc b/docs/doc_examples/f04e1284d09ceb4443d67b2ef9c7f476.asciidoc new file mode 100644 index 000000000..983f0e8c1 --- /dev/null +++ b/docs/doc_examples/f04e1284d09ceb4443d67b2ef9c7f476.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.delete({ + repository: "my_repository", + snapshot: "my_snapshot", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f0816beb8ac21cb0940858b72f6b1946.asciidoc b/docs/doc_examples/f0816beb8ac21cb0940858b72f6b1946.asciidoc new file mode 100644 index 000000000..1ae0987ba --- /dev/null +++ b/docs/doc_examples/f0816beb8ac21cb0940858b72f6b1946.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.fielddata({ + fields: "body,soul", + v: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f085fb032dae56a3b104ab874eaea2ad.asciidoc b/docs/doc_examples/f085fb032dae56a3b104ab874eaea2ad.asciidoc deleted file mode 100644 index e3f1b5d1a..000000000 --- a/docs/doc_examples/f085fb032dae56a3b104ab874eaea2ad.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - aggs: { - tags: { - terms: { - field: 'tags', - missing: 'N/A' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/f097c02541056f3c0fc855e7bbeef8a8.asciidoc b/docs/doc_examples/f097c02541056f3c0fc855e7bbeef8a8.asciidoc new file mode 100644 index 000000000..af615dc9b --- /dev/null +++ b/docs/doc_examples/f097c02541056f3c0fc855e7bbeef8a8.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "swedish_example", + settings: { + analysis: { + filter: { + swedish_stop: { + type: "stop", + stopwords: "_swedish_", + }, + swedish_keywords: { + type: "keyword_marker", + keywords: ["exempel"], + }, + swedish_stemmer: { + type: "stemmer", + language: "swedish", + }, + }, + analyzer: { + rebuilt_swedish: { + tokenizer: "standard", + filter: [ + "lowercase", + "swedish_stop", + "swedish_keywords", + "swedish_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f09817fd13ff3dce52eb79d0722409c3.asciidoc b/docs/doc_examples/f09817fd13ff3dce52eb79d0722409c3.asciidoc new file mode 100644 index 000000000..d844fc1ee --- /dev/null +++ b/docs/doc_examples/f09817fd13ff3dce52eb79d0722409c3.asciidoc @@ -0,0 +1,49 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "new_index", + mappings: { + properties: { + query: { + type: "percolator", + }, + body: { + type: "text", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.reindex({ + refresh: "true", + source: { + index: "index", + }, + dest: { + index: "new_index", + }, +}); +console.log(response1); + +const response2 = await client.indices.updateAliases({ + actions: [ + { + remove: { + index: "index", + alias: "queries", + }, + }, + { + add: { + index: "new_index", + alias: "queries", + }, + }, + ], +}); +console.log(response2); +---- diff --git a/docs/doc_examples/f0bfc8d7ab4eb94ea5fdf2e087d8cf5b.asciidoc b/docs/doc_examples/f0bfc8d7ab4eb94ea5fdf2e087d8cf5b.asciidoc new file mode 100644 index 000000000..14f0cc66e --- /dev/null +++ b/docs/doc_examples/f0bfc8d7ab4eb94ea5fdf2e087d8cf5b.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "latency", + size: 0, + runtime_mappings: { + "load_time.seconds": { + type: "long", + script: { + source: "emit(doc['load_time'].value / params.timeUnit)", + params: { + timeUnit: 1000, + }, + }, + }, + }, + aggs: { + load_time_boxplot: { + boxplot: { + field: "load_time.seconds", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f0c3235d8fce641d6ff8ce90ab7b7b8b.asciidoc b/docs/doc_examples/f0c3235d8fce641d6ff8ce90ab7b7b8b.asciidoc new file mode 100644 index 000000000..abc598b1e --- /dev/null +++ b/docs/doc_examples/f0c3235d8fce641d6ff8ce90ab7b7b8b.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.mtermvectors({ + index: "my-index-000001", + ids: ["1", "2"], + parameters: { + fields: ["message"], + term_statistics: true, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f0e21e03a07c8fa0209b0aafdb3791e6.asciidoc b/docs/doc_examples/f0e21e03a07c8fa0209b0aafdb3791e6.asciidoc deleted file mode 100644 index 054b26dd6..000000000 --- a/docs/doc_examples/f0e21e03a07c8fa0209b0aafdb3791e6.asciidoc +++ /dev/null @@ -1,26 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.updateAliases({ - body: { - actions: [ - { - add: { - index: 'test1', - alias: 'alias1' - } - }, - { - add: { - index: 'test2', - alias: 'alias1' - } - } - ] - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/f10ab582387b2c157917a60205c993f7.asciidoc b/docs/doc_examples/f10ab582387b2c157917a60205c993f7.asciidoc new file mode 100644 index 000000000..26a38e4e8 --- /dev/null +++ b/docs/doc_examples/f10ab582387b2c157917a60205c993f7.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + latency: { + type: "long", + meta: { + unit: "ms", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f128a9dff5051b47efe2c53c4454a68f.asciidoc b/docs/doc_examples/f128a9dff5051b47efe2c53c4454a68f.asciidoc new file mode 100644 index 000000000..c0c85c658 --- /dev/null +++ b/docs/doc_examples/f128a9dff5051b47efe2c53c4454a68f.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.rollover({ + alias: "my-data-stream", + conditions: { + max_age: "7d", + max_docs: 1000, + max_primary_shard_size: "50gb", + max_primary_shard_docs: "2000", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f14d0e4a280fee540e8e5f0fc4d0e9f1.asciidoc b/docs/doc_examples/f14d0e4a280fee540e8e5f0fc4d0e9f1.asciidoc new file mode 100644 index 000000000..71dd1a095 --- /dev/null +++ b/docs/doc_examples/f14d0e4a280fee540e8e5f0fc4d0e9f1.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_locations", + size: 0, + aggs: { + grouped: { + geotile_grid: { + field: "location", + precision: 6, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f1508a2221152842894819e762e63491.asciidoc b/docs/doc_examples/f1508a2221152842894819e762e63491.asciidoc new file mode 100644 index 000000000..61e99fb3d --- /dev/null +++ b/docs/doc_examples/f1508a2221152842894819e762e63491.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.query({ + format: "json", + keep_on_completion: true, + wait_for_completion_timeout: "2s", + query: "SELECT * FROM library ORDER BY page_count DESC", + fetch_size: 5, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f160561efab38e40c2feebf5a2542ab5.asciidoc b/docs/doc_examples/f160561efab38e40c2feebf5a2542ab5.asciidoc new file mode 100644 index 000000000..313a79df9 --- /dev/null +++ b/docs/doc_examples/f160561efab38e40c2feebf5a2542ab5.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.nodes.stats({ + metric: "ingest", + filter_path: "nodes.*.ingest.pipelines", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f18248c181690b81d090275b072f0070.asciidoc b/docs/doc_examples/f18248c181690b81d090275b072f0070.asciidoc new file mode 100644 index 000000000..7753354ba --- /dev/null +++ b/docs/doc_examples/f18248c181690b81d090275b072f0070.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-data-stream", + keep_alive: "2d", + wait_for_completion_timeout: "2s", + query: '\n process where process.name == "cmd.exe"\n ', +}); +console.log(response); +---- diff --git a/docs/doc_examples/f187ac2dc35425cb0ef48f328cc7e435.asciidoc b/docs/doc_examples/f187ac2dc35425cb0ef48f328cc7e435.asciidoc new file mode 100644 index 000000000..0498b4759 --- /dev/null +++ b/docs/doc_examples/f187ac2dc35425cb0ef48f328cc7e435.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putUser({ + username: "cross-search-user", + password: "l0ng-r4nd0m-p@ssw0rd", + roles: ["remote-search"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/f1b24217b1d9ba6ea5e4fa6e6f412022.asciidoc b/docs/doc_examples/f1b24217b1d9ba6ea5e4fa6e6f412022.asciidoc new file mode 100644 index 000000000..31cf0b9d3 --- /dev/null +++ b/docs/doc_examples/f1b24217b1d9ba6ea5e4fa6e6f412022.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.rerank({ + inference_id: "cohere_rerank", + input: ["luke", "like", "leia", "chewy", "r2d2", "star", "wars"], + query: "star wars main character", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f1bf0c03581b79c3324cfa3246a60e4d.asciidoc b/docs/doc_examples/f1bf0c03581b79c3324cfa3246a60e4d.asciidoc new file mode 100644 index 000000000..78223bc2b --- /dev/null +++ b/docs/doc_examples/f1bf0c03581b79c3324cfa3246a60e4d.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-byte-quantized-index", + mappings: { + properties: { + my_vector: { + type: "dense_vector", + dims: 64, + index: true, + index_options: { + type: "bbq_hnsw", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f1bf3edbd9e6c7e01b00c74c99a58b61.asciidoc b/docs/doc_examples/f1bf3edbd9e6c7e01b00c74c99a58b61.asciidoc new file mode 100644 index 000000000..9a4eac016 --- /dev/null +++ b/docs/doc_examples/f1bf3edbd9e6c7e01b00c74c99a58b61.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + cluster: { + remote: { + cluster_one: { + seeds: ["127.0.0.1:9300"], + }, + cluster_two: { + seeds: ["127.0.0.1:9301"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f1d2b8169160adfd27f32988113f0f9f.asciidoc b/docs/doc_examples/f1d2b8169160adfd27f32988113f0f9f.asciidoc new file mode 100644 index 000000000..c51090492 --- /dev/null +++ b/docs/doc_examples/f1d2b8169160adfd27f32988113f0f9f.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_analyzer: { + tokenizer: "keyword", + filter: ["word_delimiter"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f1dc6f69453867ffafe86e998dd464d9.asciidoc b/docs/doc_examples/f1dc6f69453867ffafe86e998dd464d9.asciidoc new file mode 100644 index 000000000..5ce51ecf1 --- /dev/null +++ b/docs/doc_examples/f1dc6f69453867ffafe86e998dd464d9.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "file-path-test", + query: { + term: { + "file_path.tree_reversed": { + value: "my_photo1.jpg", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f1e2af6dbb30fc5335e7d0b5507a2a93.asciidoc b/docs/doc_examples/f1e2af6dbb30fc5335e7d0b5507a2a93.asciidoc new file mode 100644 index 000000000..ebf51b9ca --- /dev/null +++ b/docs/doc_examples/f1e2af6dbb30fc5335e7d0b5507a2a93.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.resetJob({ + job_id: "total-requests", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f2175feadc2abe545899889e6d4ffcad.asciidoc b/docs/doc_examples/f2175feadc2abe545899889e6d4ffcad.asciidoc new file mode 100644 index 000000000..c8e0b3205 --- /dev/null +++ b/docs/doc_examples/f2175feadc2abe545899889e6d4ffcad.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.slm.getLifecycle({ + policy_id: "daily-snapshots", + human: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f235544a883fd04bed2dc369b0c450f3.asciidoc b/docs/doc_examples/f235544a883fd04bed2dc369b0c450f3.asciidoc new file mode 100644 index 000000000..11b7ad26f --- /dev/null +++ b/docs/doc_examples/f235544a883fd04bed2dc369b0c450f3.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.query({ + format: "txt", + query: "SELECT * FROM library", + filter: { + terms: { + _routing: ["abc"], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f2359acfb6eaa919125463cc1d3a7cd1.asciidoc b/docs/doc_examples/f2359acfb6eaa919125463cc1d3a7cd1.asciidoc new file mode 100644 index 000000000..48dedaf2a --- /dev/null +++ b/docs/doc_examples/f2359acfb6eaa919125463cc1d3a7cd1.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "admins", + roles: ["monitoring", "user"], + rules: { + field: { + groups: "cn=admins,dc=example,dc=com", + }, + }, + enabled: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f268416813befd13c604642c6fe6eda9.asciidoc b/docs/doc_examples/f268416813befd13c604642c6fe6eda9.asciidoc new file mode 100644 index 000000000..45015b079 --- /dev/null +++ b/docs/doc_examples/f268416813befd13c604642c6fe6eda9.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "custom_lowercase_example", + settings: { + analysis: { + analyzer: { + greek_lowercase_example: { + type: "custom", + tokenizer: "standard", + filter: ["greek_lowercase"], + }, + }, + filter: { + greek_lowercase: { + type: "lowercase", + language: "greek", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f27c28ddbf4c266b5f42d14da837b8de.asciidoc b/docs/doc_examples/f27c28ddbf4c266b5f42d14da837b8de.asciidoc new file mode 100644 index 000000000..fc520e33d --- /dev/null +++ b/docs/doc_examples/f27c28ddbf4c266b5f42d14da837b8de.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.flush(); +console.log(response); +---- diff --git a/docs/doc_examples/f281ff50b2cdb67ac0ece93f1594fa95.asciidoc b/docs/doc_examples/f281ff50b2cdb67ac0ece93f1594fa95.asciidoc new file mode 100644 index 000000000..d280260a8 --- /dev/null +++ b/docs/doc_examples/f281ff50b2cdb67ac0ece93f1594fa95.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "example_points", + query: { + bool: { + must: { + match_all: {}, + }, + filter: { + geo_shape: { + location: { + shape: { + type: "envelope", + coordinates: [ + [13, 53], + [14, 52], + ], + }, + relation: "intersects", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f298c4eb50ea97b34c57f8756eb350d3.asciidoc b/docs/doc_examples/f298c4eb50ea97b34c57f8756eb350d3.asciidoc new file mode 100644 index 000000000..4f46cf2e9 --- /dev/null +++ b/docs/doc_examples/f298c4eb50ea97b34c57f8756eb350d3.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.pendingTasks({ + v: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f29a28fffa7ec604a33a838f48f7ea79.asciidoc b/docs/doc_examples/f29a28fffa7ec604a33a838f48f7ea79.asciidoc index 6cd9afe16..29b095fb2 100644 --- a/docs/doc_examples/f29a28fffa7ec604a33a838f48f7ea79.asciidoc +++ b/docs/doc_examples/f29a28fffa7ec604a33a838f48f7ea79.asciidoc @@ -4,39 +4,36 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - bool: { - must: [ - { - match: { - title: 'Search' - } + query: { + bool: { + must: [ + { + match: { + title: "Search", }, - { - match: { - content: 'Elasticsearch' - } - } - ], - filter: [ - { - term: { - status: 'published' - } + }, + { + match: { + content: "Elasticsearch", }, - { - range: { - publish_date: { - gte: '2015-01-01' - } - } - } - ] - } - } - } -}) -console.log(response) + }, + ], + filter: [ + { + term: { + status: "published", + }, + }, + { + range: { + publish_date: { + gte: "2015-01-01", + }, + }, + }, + ], + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/f29b2674299ddf51a25ed87619025ede.asciidoc b/docs/doc_examples/f29b2674299ddf51a25ed87619025ede.asciidoc new file mode 100644 index 000000000..3400481d1 --- /dev/null +++ b/docs/doc_examples/f29b2674299ddf51a25ed87619025ede.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.rollup.rollupSearch({ + index: "sensor_rollup", + size: 0, + aggregations: { + max_temperature: { + max: { + field: "temperature", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f2a5f77f929cc7b893b80f4bd5b1a192.asciidoc b/docs/doc_examples/f2a5f77f929cc7b893b80f4bd5b1a192.asciidoc new file mode 100644 index 000000000..ab808a3c4 --- /dev/null +++ b/docs/doc_examples/f2a5f77f929cc7b893b80f4bd5b1a192.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.connector.get({ + connector_id: "my-connector", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f2b2d62bc0a44940ad14fca57d6d008a.asciidoc b/docs/doc_examples/f2b2d62bc0a44940ad14fca57d6d008a.asciidoc new file mode 100644 index 000000000..b318357eb --- /dev/null +++ b/docs/doc_examples/f2b2d62bc0a44940ad14fca57d6d008a.asciidoc @@ -0,0 +1,106 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transform.putTransform({ + transform_id: "suspicious_client_ips", + source: { + index: "kibana_sample_data_logs", + }, + dest: { + index: "sample_weblogs_by_clientip", + }, + sync: { + time: { + field: "timestamp", + delay: "60s", + }, + }, + pivot: { + group_by: { + clientip: { + terms: { + field: "clientip", + }, + }, + }, + aggregations: { + url_dc: { + cardinality: { + field: "url.keyword", + }, + }, + bytes_sum: { + sum: { + field: "bytes", + }, + }, + "geo.src_dc": { + cardinality: { + field: "geo.src", + }, + }, + agent_dc: { + cardinality: { + field: "agent.keyword", + }, + }, + "geo.dest_dc": { + cardinality: { + field: "geo.dest", + }, + }, + "responses.total": { + value_count: { + field: "timestamp", + }, + }, + success: { + filter: { + term: { + response: "200", + }, + }, + }, + error404: { + filter: { + term: { + response: "404", + }, + }, + }, + error5xx: { + filter: { + range: { + response: { + gte: 500, + lt: 600, + }, + }, + }, + }, + "timestamp.min": { + min: { + field: "timestamp", + }, + }, + "timestamp.max": { + max: { + field: "timestamp", + }, + }, + "timestamp.duration_ms": { + bucket_script: { + buckets_path: { + min_time: "timestamp.min.value", + max_time: "timestamp.max.value", + }, + script: "(params.max_time - params.min_time)", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f2c9afd052878b2ec00908739b0d0f74.asciidoc b/docs/doc_examples/f2c9afd052878b2ec00908739b0d0f74.asciidoc new file mode 100644 index 000000000..1aa9b2a81 --- /dev/null +++ b/docs/doc_examples/f2c9afd052878b2ec00908739b0d0f74.asciidoc @@ -0,0 +1,40 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "my-pipeline", + processors: [ + { + rename: { + description: "Rename 'provider' to 'cloud.provider'", + field: "provider", + target_field: "cloud.provider", + on_failure: [ + { + set: { + description: "Set 'error.message'", + field: "error.message", + value: + "Field 'provider' does not exist. Cannot rename to 'cloud.provider'", + override: false, + on_failure: [ + { + set: { + description: "Set 'error.message.multi'", + field: "error.message.multi", + value: "Document encountered multiple ingest errors", + override: true, + }, + }, + ], + }, + }, + ], + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/f2d68493abd3ca430bd03a7f7f8d18f9.asciidoc b/docs/doc_examples/f2d68493abd3ca430bd03a7f7f8d18f9.asciidoc deleted file mode 100644 index f8bcb94ba..000000000 --- a/docs/doc_examples/f2d68493abd3ca430bd03a7f7f8d18f9.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - query_string: { - fields: [ - 'content', - 'name' - ], - query: 'this AND that' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/f2e854b6c99659ccc1824e86c096e433.asciidoc b/docs/doc_examples/f2e854b6c99659ccc1824e86c096e433.asciidoc new file mode 100644 index 000000000..b5bc56dfb --- /dev/null +++ b/docs/doc_examples/f2e854b6c99659ccc1824e86c096e433.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.resumeAutoFollowPattern({ + name: "my_auto_follow_pattern", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f2ec53c0ef5025de8890d0ff8ec287a0.asciidoc b/docs/doc_examples/f2ec53c0ef5025de8890d0ff8ec287a0.asciidoc new file mode 100644 index 000000000..3571da97b --- /dev/null +++ b/docs/doc_examples/f2ec53c0ef5025de8890d0ff8ec287a0.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.rankEval({ + index: "my-index-000001", + requests: [ + { + id: "JFK query", + request: { + query: { + match_all: {}, + }, + }, + ratings: [], + }, + ], + metric: { + mean_reciprocal_rank: { + k: 20, + relevant_rating_threshold: 1, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f2f1cae094855a45fd8f73478bec8e70.asciidoc b/docs/doc_examples/f2f1cae094855a45fd8f73478bec8e70.asciidoc new file mode 100644 index 000000000..838a4da46 --- /dev/null +++ b/docs/doc_examples/f2f1cae094855a45fd8f73478bec8e70.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.split({ + index: "my_source_index", + target: "my_target_index", + settings: { + "index.number_of_shards": 5, + }, + aliases: { + my_search_indices: {}, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f321d4e92aa83d573ecf52bf56b0b774.asciidoc b/docs/doc_examples/f321d4e92aa83d573ecf52bf56b0b774.asciidoc new file mode 100644 index 000000000..8e0dd2359 --- /dev/null +++ b/docs/doc_examples/f321d4e92aa83d573ecf52bf56b0b774.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "POST", + path: "/_connector/_sync_job", + body: { + id: "my-connector-id", + job_type: "full", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f329242d7c8406297eff9bf609870c37.asciidoc b/docs/doc_examples/f329242d7c8406297eff9bf609870c37.asciidoc new file mode 100644 index 000000000..875cd32a3 --- /dev/null +++ b/docs/doc_examples/f329242d7c8406297eff9bf609870c37.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "music", + pretty: "true", + suggest: { + "song-suggest": { + prefix: "nor", + completion: { + field: "suggest", + fuzzy: { + fuzziness: 2, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f32f0c19b42de3b87dd764fe4ca17e7c.asciidoc b/docs/doc_examples/f32f0c19b42de3b87dd764fe4ca17e7c.asciidoc index d6baad2f7..bffd0b207 100644 --- a/docs/doc_examples/f32f0c19b42de3b87dd764fe4ca17e7c.asciidoc +++ b/docs/doc_examples/f32f0c19b42de3b87dd764fe4ca17e7c.asciidoc @@ -4,16 +4,13 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - query_string: { - default_field: 'title', - query: 'ny city', - auto_generate_synonyms_phrase_query: false - } - } - } -}) -console.log(response) + query: { + query_string: { + default_field: "title", + query: "ny city", + auto_generate_synonyms_phrase_query: false, + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/f342465c65ba76383dedbb334b57b616.asciidoc b/docs/doc_examples/f342465c65ba76383dedbb334b57b616.asciidoc new file mode 100644 index 000000000..4a8ae5885 --- /dev/null +++ b/docs/doc_examples/f342465c65ba76383dedbb334b57b616.asciidoc @@ -0,0 +1,42 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + text: { + type: "text", + index_options: "offsets", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + text: "Quick brown fox", + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + query: { + match: { + text: "brown fox", + }, + }, + highlight: { + fields: { + text: {}, + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/f34c02351662481dd61a5c2a3e206c60.asciidoc b/docs/doc_examples/f34c02351662481dd61a5c2a3e206c60.asciidoc new file mode 100644 index 000000000..ace60f481 --- /dev/null +++ b/docs/doc_examples/f34c02351662481dd61a5c2a3e206c60.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: [ + { + type: "hyphenation_decompounder", + hyphenation_patterns_path: "analysis/hyphenation_patterns.xml", + word_list: ["Kaffee", "zucker", "tasse"], + }, + ], + text: "Kaffeetasse", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f3594de7ef39ab09b0bb12c1e76bfe6b.asciidoc b/docs/doc_examples/f3594de7ef39ab09b0bb12c1e76bfe6b.asciidoc new file mode 100644 index 000000000..f78c80064 --- /dev/null +++ b/docs/doc_examples/f3594de7ef39ab09b0bb12c1e76bfe6b.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.shrink({ + index: "my_source_index", + target: "my_target_index", + settings: { + "index.routing.allocation.require._name": null, + "index.blocks.write": null, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f3697682a886ab129530f3e5c1b30632.asciidoc b/docs/doc_examples/f3697682a886ab129530f3e5c1b30632.asciidoc new file mode 100644 index 000000000..dfdaa28a7 --- /dev/null +++ b/docs/doc_examples/f3697682a886ab129530f3e5c1b30632.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.termvectors({ + index: "my-index-000001", + id: 1, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f37173a75cd1b0d683c6f67819dd1de3.asciidoc b/docs/doc_examples/f37173a75cd1b0d683c6f67819dd1de3.asciidoc new file mode 100644 index 000000000..d63f990e3 --- /dev/null +++ b/docs/doc_examples/f37173a75cd1b0d683c6f67819dd1de3.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.get({ + index: "my-new-index-000001", + id: 1, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f388e571224dd6850f8c9f9f08fca3da.asciidoc b/docs/doc_examples/f388e571224dd6850f8c9f9f08fca3da.asciidoc new file mode 100644 index 000000000..ce1095fd2 --- /dev/null +++ b/docs/doc_examples/f388e571224dd6850f8c9f9f08fca3da.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.invalidateApiKey({ + name: "my-api-key", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f3942d9b34138dfca79dff707af270b7.asciidoc b/docs/doc_examples/f3942d9b34138dfca79dff707af270b7.asciidoc new file mode 100644 index 000000000..02d7258d2 --- /dev/null +++ b/docs/doc_examples/f3942d9b34138dfca79dff707af270b7.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-data-stream", + timestamp_field: "file.accessed", + event_category_field: "file.type", + query: '\n file where (file.size > 1 and file.type == "file")\n ', +}); +console.log(response); +---- diff --git a/docs/doc_examples/f39512478cae2db8f4566a1e4af9e8f5.asciidoc b/docs/doc_examples/f39512478cae2db8f4566a1e4af9e8f5.asciidoc new file mode 100644 index 000000000..ee8421977 --- /dev/null +++ b/docs/doc_examples/f39512478cae2db8f4566a1e4af9e8f5.asciidoc @@ -0,0 +1,38 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.rollup.rollupSearch({ + index: "sensor_rollup", + size: 0, + aggregations: { + timeline: { + date_histogram: { + field: "timestamp", + fixed_interval: "7d", + }, + aggs: { + nodes: { + terms: { + field: "node", + }, + aggs: { + max_temperature: { + max: { + field: "temperature", + }, + }, + avg_voltage: { + avg: { + field: "voltage", + }, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f3ab820e1f2f54ea718017aeae865742.asciidoc b/docs/doc_examples/f3ab820e1f2f54ea718017aeae865742.asciidoc new file mode 100644 index 000000000..4e1adad3d --- /dev/null +++ b/docs/doc_examples/f3ab820e1f2f54ea718017aeae865742.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "oidc-finance", + roles: ["finance_data"], + enabled: true, + rules: { + all: [ + { + field: { + "realm.name": "oidc1", + }, + }, + { + field: { + groups: "finance-team", + }, + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f3b185131f40687c25d2f85e1231d8bd.asciidoc b/docs/doc_examples/f3b185131f40687c25d2f85e1231d8bd.asciidoc new file mode 100644 index 000000000..8db77c5b0 --- /dev/null +++ b/docs/doc_examples/f3b185131f40687c25d2f85e1231d8bd.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.validateQuery({ + index: "my-index-000001", + q: "user.id:kimchy", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f3b4ddce8ff21fc1a76a7c0d9c36650e.asciidoc b/docs/doc_examples/f3b4ddce8ff21fc1a76a7c0d9c36650e.asciidoc new file mode 100644 index 000000000..5a7ab8f36 --- /dev/null +++ b/docs/doc_examples/f3b4ddce8ff21fc1a76a7c0d9c36650e.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + warm: { + actions: { + shrink: { + number_of_shards: 1, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f3c696cd63a3f042e62cbb94b75c2427.asciidoc b/docs/doc_examples/f3c696cd63a3f042e62cbb94b75c2427.asciidoc new file mode 100644 index 000000000..a6e06c5e2 --- /dev/null +++ b/docs/doc_examples/f3c696cd63a3f042e62cbb94b75c2427.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.getSettings({ + flat_settings: "true", + filter_path: "persistent.archived*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f3e1dfe1c440e3590be26f265e19425d.asciidoc b/docs/doc_examples/f3e1dfe1c440e3590be26f265e19425d.asciidoc new file mode 100644 index 000000000..e9b09015b --- /dev/null +++ b/docs/doc_examples/f3e1dfe1c440e3590be26f265e19425d.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + script_score: { + query: { + bool: { + filter: { + term: { + status: "published", + }, + }, + }, + }, + script: { + source: "1 / (1 + l2norm(params.queryVector, 'my_dense_vector'))", + params: { + queryVector: [4, 3.4, -0.2], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f3fb3cba44988b6e9fee93316138b2cf.asciidoc b/docs/doc_examples/f3fb3cba44988b6e9fee93316138b2cf.asciidoc new file mode 100644 index 000000000..5369161be --- /dev/null +++ b/docs/doc_examples/f3fb3cba44988b6e9fee93316138b2cf.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.clearCachedPrivileges({ + application: "myapp,my-other-app", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f3fb52680482925c202c2e2f8af6f044.asciidoc b/docs/doc_examples/f3fb52680482925c202c2e2f8af6f044.asciidoc new file mode 100644 index 000000000..814d8e865 --- /dev/null +++ b/docs/doc_examples/f3fb52680482925c202c2e2f8af6f044.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.count({ + index: "my-index-000001", + v: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f3fe2012557ebbce1ebad4fc997c092d.asciidoc b/docs/doc_examples/f3fe2012557ebbce1ebad4fc997c092d.asciidoc new file mode 100644 index 000000000..4a3c06e4e --- /dev/null +++ b/docs/doc_examples/f3fe2012557ebbce1ebad4fc997c092d.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.createRepository({ + name: "my_fs_backup", + repository: { + type: "fs", + settings: { + location: "my_fs_backup_location", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f43d551aaaad73d979adf1b86533e6a3.asciidoc b/docs/doc_examples/f43d551aaaad73d979adf1b86533e6a3.asciidoc new file mode 100644 index 000000000..b1a46928e --- /dev/null +++ b/docs/doc_examples/f43d551aaaad73d979adf1b86533e6a3.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sales_over_time: { + date_histogram: { + field: "date", + fixed_interval: "2w", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f43ec4041e3b72bbde063452990bfc4b.asciidoc b/docs/doc_examples/f43ec4041e3b72bbde063452990bfc4b.asciidoc new file mode 100644 index 000000000..5d7b46190 --- /dev/null +++ b/docs/doc_examples/f43ec4041e3b72bbde063452990bfc4b.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.clearCache({ + index: "my-index-000001,my-index-000002", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f44d287c6937785eb09b91353c1deb1e.asciidoc b/docs/doc_examples/f44d287c6937785eb09b91353c1deb1e.asciidoc new file mode 100644 index 000000000..8e6ddc8cb --- /dev/null +++ b/docs/doc_examples/f44d287c6937785eb09b91353c1deb1e.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.getDatafeedStats({ + datafeed_id: "datafeed-high_sum_total_sales", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f453e14bcf30853e57618bf12f83e148.asciidoc b/docs/doc_examples/f453e14bcf30853e57618bf12f83e148.asciidoc new file mode 100644 index 000000000..afa69bc17 --- /dev/null +++ b/docs/doc_examples/f453e14bcf30853e57618bf12f83e148.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "pattern_example", + settings: { + analysis: { + tokenizer: { + split_on_non_word: { + type: "pattern", + pattern: "\\W+", + }, + }, + analyzer: { + rebuilt_pattern: { + tokenizer: "split_on_non_word", + filter: ["lowercase"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f454e3f8ad5f5bd82a4a25af7dee9ca1.asciidoc b/docs/doc_examples/f454e3f8ad5f5bd82a4a25af7dee9ca1.asciidoc new file mode 100644 index 000000000..5c0f37868 --- /dev/null +++ b/docs/doc_examples/f454e3f8ad5f5bd82a4a25af7dee9ca1.asciidoc @@ -0,0 +1,49 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + document: { + message: "some arrays in this document...", + tags: ["elasticsearch", "wow"], + lists: [ + { + name: "prog_list", + description: "programming list", + }, + { + name: "cool_list", + description: "cool stuff list", + }, + ], + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 2, + document: { + message: "no arrays in this document...", + tags: "elasticsearch", + lists: { + name: "prog_list", + description: "programming list", + }, + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "my-index-000001", + query: { + match: { + tags: "elasticsearch", + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/f45990264f8755b96b11c69c12c90ff4.asciidoc b/docs/doc_examples/f45990264f8755b96b11c69c12c90ff4.asciidoc new file mode 100644 index 000000000..0852c2e70 --- /dev/null +++ b/docs/doc_examples/f45990264f8755b96b11c69c12c90ff4.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.exists({ + index: "my-data-stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f495f9c99916a05e3b28166d31955fad.asciidoc b/docs/doc_examples/f495f9c99916a05e3b28166d31955fad.asciidoc new file mode 100644 index 000000000..5522bffda --- /dev/null +++ b/docs/doc_examples/f495f9c99916a05e3b28166d31955fad.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + genres: { + terms: { + field: "genre", + order: { + "playback_stats.max": "desc", + }, + }, + aggs: { + playback_stats: { + stats: { + field: "play_count", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f49ac80f0130cae8d0ea6f4472a149dd.asciidoc b/docs/doc_examples/f49ac80f0130cae8d0ea6f4472a149dd.asciidoc new file mode 100644 index 000000000..e40f14b9d --- /dev/null +++ b/docs/doc_examples/f49ac80f0130cae8d0ea6f4472a149dd.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-image-index", + mappings: { + properties: { + "image-vector": { + type: "dense_vector", + dims: 3, + index: true, + similarity: "l2_norm", + }, + "file-type": { + type: "keyword", + }, + title: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f4a1008b3f9baa67bb03ce9ef5ab4cb4.asciidoc b/docs/doc_examples/f4a1008b3f9baa67bb03ce9ef5ab4cb4.asciidoc deleted file mode 100644 index 12bf7d016..000000000 --- a/docs/doc_examples/f4a1008b3f9baa67bb03ce9ef5ab4cb4.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'index_double', - body: { - mappings: { - properties: { - field: { - type: 'date' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/f4ae3f3fbf07a7d39122ac5ac20b9c03.asciidoc b/docs/doc_examples/f4ae3f3fbf07a7d39122ac5ac20b9c03.asciidoc new file mode 100644 index 000000000..15cfea68e --- /dev/null +++ b/docs/doc_examples/f4ae3f3fbf07a7d39122ac5ac20b9c03.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "quantized-image-index", + mappings: { + properties: { + "image-vector": { + type: "dense_vector", + element_type: "float", + dims: 2, + index: true, + index_options: { + type: "int8_hnsw", + }, + }, + title: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f4b9baed3c6a82be3672cbc8999c2368.asciidoc b/docs/doc_examples/f4b9baed3c6a82be3672cbc8999c2368.asciidoc new file mode 100644 index 000000000..1c73cea43 --- /dev/null +++ b/docs/doc_examples/f4b9baed3c6a82be3672cbc8999c2368.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.termsEnum({ + index: "stackoverflow", + field: "tags", + string: "kiba", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f4c194628761a4cf2a01453a96bbcc3c.asciidoc b/docs/doc_examples/f4c194628761a4cf2a01453a96bbcc3c.asciidoc new file mode 100644 index 000000000..06a8284a3 --- /dev/null +++ b/docs/doc_examples/f4c194628761a4cf2a01453a96bbcc3c.asciidoc @@ -0,0 +1,42 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: { + type: "multipolygon", + coordinates: [ + [ + [ + [1002, 200], + [1003, 200], + [1003, 300], + [1002, 300], + [1002, 200], + ], + ], + [ + [ + [1000, 200], + [1001, 100], + [1001, 100], + [1000, 100], + [1000, 100], + ], + [ + [1000.2, 200.2], + [1000.8, 100.2], + [1000.8, 100.8], + [1000.2, 100.8], + [1000.2, 100.2], + ], + ], + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f4dc1286d0a2f8d1fde64fbf12fd9f8d.asciidoc b/docs/doc_examples/f4dc1286d0a2f8d1fde64fbf12fd9f8d.asciidoc new file mode 100644 index 000000000..e41ea896e --- /dev/null +++ b/docs/doc_examples/f4dc1286d0a2f8d1fde64fbf12fd9f8d.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + "cluster.routing.allocation.disk.watermark.low": null, + "cluster.routing.allocation.disk.watermark.low.max_headroom": null, + "cluster.routing.allocation.disk.watermark.high": null, + "cluster.routing.allocation.disk.watermark.high.max_headroom": null, + "cluster.routing.allocation.disk.watermark.flood_stage": null, + "cluster.routing.allocation.disk.watermark.flood_stage.max_headroom": null, + "cluster.routing.allocation.disk.watermark.flood_stage.frozen": null, + "cluster.routing.allocation.disk.watermark.flood_stage.frozen.max_headroom": + null, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f4f557716049b23f8840d58d71e748f0.asciidoc b/docs/doc_examples/f4f557716049b23f8840d58d71e748f0.asciidoc new file mode 100644 index 000000000..a7503c9a3 --- /dev/null +++ b/docs/doc_examples/f4f557716049b23f8840d58d71e748f0.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putSettings({ + index: "my-index-000001", + settings: { + index: { + refresh_interval: "-1", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f4fdfe52ecba65eec6beb30d8deb8bbf.asciidoc b/docs/doc_examples/f4fdfe52ecba65eec6beb30d8deb8bbf.asciidoc new file mode 100644 index 000000000..cd19c6359 --- /dev/null +++ b/docs/doc_examples/f4fdfe52ecba65eec6beb30d8deb8bbf.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.forgetFollower({ + index: "", + follower_cluster: "", + follower_index: "", + follower_index_uuid: "", + leader_remote_cluster: "", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f5013174f77868da4dc40cdd745d4ea4.asciidoc b/docs/doc_examples/f5013174f77868da4dc40cdd745d4ea4.asciidoc new file mode 100644 index 000000000..f8876e0f8 --- /dev/null +++ b/docs/doc_examples/f5013174f77868da4dc40cdd745d4ea4.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + aggs: { + genres: { + rare_terms: { + field: "genre", + max_doc_count: 2, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f5140f08f56c64b5789357539f8b9ba8.asciidoc b/docs/doc_examples/f5140f08f56c64b5789357539f8b9ba8.asciidoc new file mode 100644 index 000000000..b6609f84d --- /dev/null +++ b/docs/doc_examples/f5140f08f56c64b5789357539f8b9ba8.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.deleteAlias({ + index: "my-data-stream", + name: "my-alias", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f545bb95214769aca993c1632a71ad2c.asciidoc b/docs/doc_examples/f545bb95214769aca993c1632a71ad2c.asciidoc new file mode 100644 index 000000000..a54c8c9de --- /dev/null +++ b/docs/doc_examples/f545bb95214769aca993c1632a71ad2c.asciidoc @@ -0,0 +1,59 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "french_example", + settings: { + analysis: { + filter: { + french_elision: { + type: "elision", + articles_case: true, + articles: [ + "l", + "m", + "t", + "qu", + "n", + "s", + "j", + "d", + "c", + "jusqu", + "quoiqu", + "lorsqu", + "puisqu", + ], + }, + french_stop: { + type: "stop", + stopwords: "_french_", + }, + french_keywords: { + type: "keyword_marker", + keywords: ["Example"], + }, + french_stemmer: { + type: "stemmer", + language: "light_french", + }, + }, + analyzer: { + rebuilt_french: { + tokenizer: "standard", + filter: [ + "french_elision", + "lowercase", + "french_stop", + "french_keywords", + "french_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f54f6d06163221f2c7aff6e8db942be3.asciidoc b/docs/doc_examples/f54f6d06163221f2c7aff6e8db942be3.asciidoc new file mode 100644 index 000000000..97adcb0dc --- /dev/null +++ b/docs/doc_examples/f54f6d06163221f2c7aff6e8db942be3.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.slm.putLifecycle({ + policy_id: "daily-snapshots", + name: "", + schedule: "0 45 23 * * ?", + repository: "my_repository", + config: { + indices: "*", + include_global_state: true, + }, + retention: { + expire_after: "30d", + min_count: 1, + max_count: 31, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f5569945024b9d664828693705c27c1a.asciidoc b/docs/doc_examples/f5569945024b9d664828693705c27c1a.asciidoc deleted file mode 100644 index 5218065d0..000000000 --- a/docs/doc_examples/f5569945024b9d664828693705c27c1a.asciidoc +++ /dev/null @@ -1,12 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'kimchy,elasticsearch', - q: 'user:kimchy' -}) -console.log(response) ----- - diff --git a/docs/doc_examples/f57ce7de0946e9416ddb9150e95f4b74.asciidoc b/docs/doc_examples/f57ce7de0946e9416ddb9150e95f4b74.asciidoc new file mode 100644 index 000000000..1b2520793 --- /dev/null +++ b/docs/doc_examples/f57ce7de0946e9416ddb9150e95f4b74.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "completion", + inference_id: "azure_openai_completion", + inference_config: { + service: "azureopenai", + service_settings: { + api_key: "", + resource_name: "", + deployment_id: "", + api_version: "2024-02-01", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f5815d573cee0447910c9668003887b8.asciidoc b/docs/doc_examples/f5815d573cee0447910c9668003887b8.asciidoc new file mode 100644 index 000000000..82e48529d --- /dev/null +++ b/docs/doc_examples/f5815d573cee0447910c9668003887b8.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + aggs: { + sales_over_time: { + date_histogram: { + field: "date", + calendar_interval: "2d", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f58969ac405db85f439c5940d014964b.asciidoc b/docs/doc_examples/f58969ac405db85f439c5940d014964b.asciidoc new file mode 100644 index 000000000..3406c8013 --- /dev/null +++ b/docs/doc_examples/f58969ac405db85f439c5940d014964b.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_locations", + query: { + bool: { + must: { + match_all: {}, + }, + filter: { + geo_bounding_box: { + "pin.location": { + wkt: "BBOX (-74.1, -71.12, 40.73, 40.01)", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f58fd031597e2c3df78bf0efd07206e3.asciidoc b/docs/doc_examples/f58fd031597e2c3df78bf0efd07206e3.asciidoc new file mode 100644 index 000000000..eb4cc2eaf --- /dev/null +++ b/docs/doc_examples/f58fd031597e2c3df78bf0efd07206e3.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.license.postStartBasic({ + acknowledge: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f5bf2526af19d964f8c4c59d4795cffc.asciidoc b/docs/doc_examples/f5bf2526af19d964f8c4c59d4795cffc.asciidoc new file mode 100644 index 000000000..46fe5e0e8 --- /dev/null +++ b/docs/doc_examples/f5bf2526af19d964f8c4c59d4795cffc.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "imdb", + mappings: { + properties: { + title: { + type: "text", + term_vector: "yes", + }, + description: { + type: "text", + }, + tags: { + type: "text", + fields: { + raw: { + type: "text", + analyzer: "keyword", + term_vector: "yes", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f5cbbb60ca26867a5d2da625a68a6e65.asciidoc b/docs/doc_examples/f5cbbb60ca26867a5d2da625a68a6e65.asciidoc new file mode 100644 index 000000000..b9617e400 --- /dev/null +++ b/docs/doc_examples/f5cbbb60ca26867a5d2da625a68a6e65.asciidoc @@ -0,0 +1,38 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "ecommerce-customers", + mappings: { + properties: { + "total_quantity.sum": { + type: "double", + }, + total_quantity: { + type: "object", + }, + taxless_total_price: { + type: "object", + }, + "taxless_total_price.sum": { + type: "double", + }, + "order_id.cardinality": { + type: "long", + }, + customer_id: { + type: "keyword", + }, + "total_quantity.max": { + type: "integer", + }, + order_id: { + type: "object", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f5e50fe8a60467adb2c5ee9e0f2d88da.asciidoc b/docs/doc_examples/f5e50fe8a60467adb2c5ee9e0f2d88da.asciidoc new file mode 100644 index 000000000..115cd3757 --- /dev/null +++ b/docs/doc_examples/f5e50fe8a60467adb2c5ee9e0f2d88da.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.clearCursor({ + cursor: + "sDXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAAEWYUpOYklQMHhRUEtld3RsNnFtYU1hQQ==:BAFmBGRhdGUBZgVsaWtlcwFzB21lc3NhZ2UBZgR1c2Vy9f///w8=", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f5e6378cc41ddf5326fe4084396c59b2.asciidoc b/docs/doc_examples/f5e6378cc41ddf5326fe4084396c59b2.asciidoc new file mode 100644 index 000000000..bc7cd7b1a --- /dev/null +++ b/docs/doc_examples/f5e6378cc41ddf5326fe4084396c59b2.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + default: { + type: "simple", + }, + default_search: { + type: "whitespace", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f5eed3f2e3558a238487bc85305b7a71.asciidoc b/docs/doc_examples/f5eed3f2e3558a238487bc85305b7a71.asciidoc new file mode 100644 index 000000000..1f5c5accb --- /dev/null +++ b/docs/doc_examples/f5eed3f2e3558a238487bc85305b7a71.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: + "POLYGON ((100.0 0.0, 101.0 0.0, 101.0 1.0, 100.0 1.0, 100.0 0.0))", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f5ef80dd92c67059ca353a833e6b7b5e.asciidoc b/docs/doc_examples/f5ef80dd92c67059ca353a833e6b7b5e.asciidoc new file mode 100644 index 000000000..b36c0c85e --- /dev/null +++ b/docs/doc_examples/f5ef80dd92c67059ca353a833e6b7b5e.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + query: { + constant_score: { + filter: { + match: { + type: "hat", + }, + }, + }, + }, + aggs: { + hat_prices: { + sum: { + field: "price", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f625fdbbe78c4198d9e40b35f3f008b3.asciidoc b/docs/doc_examples/f625fdbbe78c4198d9e40b35f3f008b3.asciidoc new file mode 100644 index 000000000..c506a9e44 --- /dev/null +++ b/docs/doc_examples/f625fdbbe78c4198d9e40b35f3f008b3.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.update({ + index: ".elastic-connectors", + id: "connector-id", + doc: { + custom_scheduling: {}, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f63f6343e74bd5c844854272e746de14.asciidoc b/docs/doc_examples/f63f6343e74bd5c844854272e746de14.asciidoc new file mode 100644 index 000000000..a1a92a95d --- /dev/null +++ b/docs/doc_examples/f63f6343e74bd5c844854272e746de14.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.deactivateWatch({ + watch_id: "my_watch", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f642b64e592131f37209a5100fe161cc.asciidoc b/docs/doc_examples/f642b64e592131f37209a5100fe161cc.asciidoc new file mode 100644 index 000000000..df08d6beb --- /dev/null +++ b/docs/doc_examples/f642b64e592131f37209a5100fe161cc.asciidoc @@ -0,0 +1,43 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + dynamic_templates: [ + { + named_analyzers: { + match_mapping_type: "string", + match: "*", + mapping: { + type: "text", + analyzer: "{name}", + }, + }, + }, + { + no_doc_values: { + match_mapping_type: "*", + mapping: { + type: "{dynamic_type}", + doc_values: false, + }, + }, + }, + ], + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "my-index-000001", + id: 1, + document: { + english: "Some English text", + count: 5, + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/f6566395f85d3afe917228643d7318d6.asciidoc b/docs/doc_examples/f6566395f85d3afe917228643d7318d6.asciidoc new file mode 100644 index 000000000..d4a98b60b --- /dev/null +++ b/docs/doc_examples/f6566395f85d3afe917228643d7318d6.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.delete({ + index: "my-index-000001", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f656c1e64268293ecc8ebd8065628faa.asciidoc b/docs/doc_examples/f656c1e64268293ecc8ebd8065628faa.asciidoc new file mode 100644 index 000000000..0c9f9c376 --- /dev/null +++ b/docs/doc_examples/f656c1e64268293ecc8ebd8065628faa.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.clearCachedServiceTokens({ + namespace: "elastic", + service: "fleet-server", + name: "*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f65abb38dd0cfedeb06e0cef206fbdab.asciidoc b/docs/doc_examples/f65abb38dd0cfedeb06e0cef206fbdab.asciidoc new file mode 100644 index 000000000..724fcf566 --- /dev/null +++ b/docs/doc_examples/f65abb38dd0cfedeb06e0cef206fbdab.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: ["ngram"], + text: "Quick fox", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f66643c54999426c5afa6d5a87435d4e.asciidoc b/docs/doc_examples/f66643c54999426c5afa6d5a87435d4e.asciidoc new file mode 100644 index 000000000..982d55165 --- /dev/null +++ b/docs/doc_examples/f66643c54999426c5afa6d5a87435d4e.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.clearApiKeyCache({ + ids: "yVGMr3QByxdh1MSaicYx", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f679e414de48b8fe25e458844be05618.asciidoc b/docs/doc_examples/f679e414de48b8fe25e458844be05618.asciidoc new file mode 100644 index 000000000..1c0fa96c5 --- /dev/null +++ b/docs/doc_examples/f679e414de48b8fe25e458844be05618.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.connector.put({ + connector_id: "my-connector-id", + name: "Music catalog", + index_name: "music", + service_type: "postgresql", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f67d8aab9106ad24b1d2c771d3840ed1.asciidoc b/docs/doc_examples/f67d8aab9106ad24b1d2c771d3840ed1.asciidoc new file mode 100644 index 000000000..4cf7e3016 --- /dev/null +++ b/docs/doc_examples/f67d8aab9106ad24b1d2c771d3840ed1.asciidoc @@ -0,0 +1,70 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.putWatch({ + id: "log_event_watch", + trigger: { + schedule: { + interval: "5m", + }, + }, + input: { + search: { + request: { + indices: "log-events", + body: { + size: 0, + query: { + match: { + status: "error", + }, + }, + }, + }, + }, + }, + condition: { + compare: { + "ctx.payload.hits.total": { + gt: 0, + }, + }, + }, + actions: { + email_administrator: { + email: { + to: "sys.admino@host.domain", + subject: "Encountered {{ctx.payload.hits.total}} errors", + body: "Too many error in the system, see attached data", + attachments: { + attached_data: { + data: { + format: "json", + }, + }, + }, + priority: "high", + }, + }, + notify_pager: { + condition: { + compare: { + "ctx.payload.hits.total": { + gt: 5, + }, + }, + }, + webhook: { + method: "POST", + host: "pager.service.domain", + port: 1234, + path: "/{{watch_id}}", + body: "Encountered {{ctx.payload.hits.total}} errors", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f6911b0f2f56523ccbd8027f276981b3.asciidoc b/docs/doc_examples/f6911b0f2f56523ccbd8027f276981b3.asciidoc new file mode 100644 index 000000000..95355dc17 --- /dev/null +++ b/docs/doc_examples/f6911b0f2f56523ccbd8027f276981b3.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + combined_fields: { + query: "database systems", + fields: ["title", "abstract", "body"], + operator: "and", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f6982ff80b9a64cd5fcac5b20908c906.asciidoc b/docs/doc_examples/f6982ff80b9a64cd5fcac5b20908c906.asciidoc new file mode 100644 index 000000000..33a796a1f --- /dev/null +++ b/docs/doc_examples/f6982ff80b9a64cd5fcac5b20908c906.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.deleteCalendarEvent({ + calendar_id: "planned-outages", + event_id: "LS8LJGEBMTCMA-qz49st", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f6b5032bf27c2445d28845be0d413970.asciidoc b/docs/doc_examples/f6b5032bf27c2445d28845be0d413970.asciidoc deleted file mode 100644 index 654779b1a..000000000 --- a/docs/doc_examples/f6b5032bf27c2445d28845be0d413970.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.create({ - index: 'index_long', - body: { - mappings: { - properties: { - field: { - type: 'long' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/f6c9d72fa26cbedd0c3f9fa64a88c38a.asciidoc b/docs/doc_examples/f6c9d72fa26cbedd0c3f9fa64a88c38a.asciidoc new file mode 100644 index 000000000..2b2e4c2b3 --- /dev/null +++ b/docs/doc_examples/f6c9d72fa26cbedd0c3f9fa64a88c38a.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + match_all: {}, + }, + _source: "route_length_miles", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f6d493650b4344f17297b568016fb445.asciidoc b/docs/doc_examples/f6d493650b4344f17297b568016fb445.asciidoc new file mode 100644 index 000000000..2ca1d4a9c --- /dev/null +++ b/docs/doc_examples/f6d493650b4344f17297b568016fb445.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ccr.unfollow({ + index: "", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f6d6889667f56b8f49d2858070571a6b.asciidoc b/docs/doc_examples/f6d6889667f56b8f49d2858070571a6b.asciidoc deleted file mode 100644 index 19ad9c0a6..000000000 --- a/docs/doc_examples/f6d6889667f56b8f49d2858070571a6b.asciidoc +++ /dev/null @@ -1,27 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.updateAliases({ - body: { - actions: [ - { - add: { - index: 'test', - alias: 'alias1', - is_write_index: true - } - }, - { - add: { - index: 'test2', - alias: 'alias1' - } - } - ] - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/f6de702c3d097af0b0bd391c4f947233.asciidoc b/docs/doc_examples/f6de702c3d097af0b0bd391c4f947233.asciidoc new file mode 100644 index 000000000..f7a8647ae --- /dev/null +++ b/docs/doc_examples/f6de702c3d097af0b0bd391c4f947233.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.indices({ + v: "true", + s: "rep:desc,pri.store.size:desc", + h: "health,index,pri,rep,store.size,pri.store.size", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f6df4acf3c7a4f85706ff314b21ebcb2.asciidoc b/docs/doc_examples/f6df4acf3c7a4f85706ff314b21ebcb2.asciidoc new file mode 100644 index 000000000..5cbc31432 --- /dev/null +++ b/docs/doc_examples/f6df4acf3c7a4f85706ff314b21ebcb2.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.clearCachedPrivileges({ + application: "myapp", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f6ead39c5505045543b9225deca7367d.asciidoc b/docs/doc_examples/f6ead39c5505045543b9225deca7367d.asciidoc new file mode 100644 index 000000000..9e329ed3e --- /dev/null +++ b/docs/doc_examples/f6ead39c5505045543b9225deca7367d.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.postVotingConfigExclusions({ + node_names: "nodeName1,nodeName2", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f6edbed2b5b2709bbc13866a4780e27a.asciidoc b/docs/doc_examples/f6edbed2b5b2709bbc13866a4780e27a.asciidoc new file mode 100644 index 000000000..34e9a18d1 --- /dev/null +++ b/docs/doc_examples/f6edbed2b5b2709bbc13866a4780e27a.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + document: { + username: "johnsmith", + name: { + first: "John", + last: "Smith", + }, + }, +}); +console.log(response); + +const response1 = await client.indices.getMapping({ + index: "my-index-000001", +}); +console.log(response1); +---- diff --git a/docs/doc_examples/f6eff830fb0fad200ebfb1e3e46f6f0e.asciidoc b/docs/doc_examples/f6eff830fb0fad200ebfb1e3e46f6f0e.asciidoc new file mode 100644 index 000000000..6d21aa440 --- /dev/null +++ b/docs/doc_examples/f6eff830fb0fad200ebfb1e3e46f6f0e.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.executeWatch({ + id: "my_watch", + trigger_data: { + triggered_time: "now", + scheduled_time: "now", + }, + alternative_input: { + foo: "bar", + }, + ignore_condition: true, + action_modes: { + "my-action": "force_simulate", + }, + record_execution: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f6f647eb644a2d236637ff05f833cb73.asciidoc b/docs/doc_examples/f6f647eb644a2d236637ff05f833cb73.asciidoc new file mode 100644 index 000000000..b7fdbd587 --- /dev/null +++ b/docs/doc_examples/f6f647eb644a2d236637ff05f833cb73.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "POST", + path: "/_connector/_secret", + body: { + value: "encoded_api_key", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f70a54cd9a9f4811bf962e469f2ca2ea.asciidoc b/docs/doc_examples/f70a54cd9a9f4811bf962e469f2ca2ea.asciidoc index dd8d7e4a8..df0215037 100644 --- a/docs/doc_examples/f70a54cd9a9f4811bf962e469f2ca2ea.asciidoc +++ b/docs/doc_examples/f70a54cd9a9f4811bf962e469f2ca2ea.asciidoc @@ -4,18 +4,15 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - bool: { - filter: { - term: { - status: 'active' - } - } - } - } - } -}) -console.log(response) + query: { + bool: { + filter: { + term: { + status: "active", + }, + }, + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/f70ff57c80cdbce3f1e7c63ee307c92d.asciidoc b/docs/doc_examples/f70ff57c80cdbce3f1e7c63ee307c92d.asciidoc new file mode 100644 index 000000000..4506b25ca --- /dev/null +++ b/docs/doc_examples/f70ff57c80cdbce3f1e7c63ee307c92d.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + source: { + index: "my_test_scores", + }, + dest: { + index: "my_test_scores_2", + pipeline: "my_test_scores_pipeline", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f7139b3c0e066be832b9100ae17157cc.asciidoc b/docs/doc_examples/f7139b3c0e066be832b9100ae17157cc.asciidoc new file mode 100644 index 000000000..8635b5ce1 --- /dev/null +++ b/docs/doc_examples/f7139b3c0e066be832b9100ae17157cc.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.esql.query({ + format: "txt", + query: + "\n FROM library\n | KEEP author, name, page_count, release_date\n | SORT page_count DESC\n | LIMIT 5\n ", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f733b25cd4c448b226bb76862974eef2.asciidoc b/docs/doc_examples/f733b25cd4c448b226bb76862974eef2.asciidoc new file mode 100644 index 000000000..390ed34ff --- /dev/null +++ b/docs/doc_examples/f733b25cd4c448b226bb76862974eef2.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "test", + settings: { + analysis: { + filter: { + code: { + type: "pattern_capture", + preserve_original: true, + patterns: ["(\\p{Ll}+|\\p{Lu}\\p{Ll}+|\\p{Lu}+)", "(\\d+)"], + }, + }, + analyzer: { + code: { + tokenizer: "pattern", + filter: ["code", "lowercase"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f749efe8f11ebd43ef83db91922c736e.asciidoc b/docs/doc_examples/f749efe8f11ebd43ef83db91922c736e.asciidoc new file mode 100644 index 000000000..630fed95c --- /dev/null +++ b/docs/doc_examples/f749efe8f11ebd43ef83db91922c736e.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cluster.putSettings({ + persistent: { + cluster: { + remote: { + clusterB: { + mode: "proxy", + skip_unavailable: "true", + server_name: "clusterb.es.region-b.gcp.elastic-cloud.com", + proxy_socket_connections: "18", + proxy_address: "clusterb.es.region-b.gcp.elastic-cloud.com:9400", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f7726cc2c60dea26b88bf0df99fb0813.asciidoc b/docs/doc_examples/f7726cc2c60dea26b88bf0df99fb0813.asciidoc new file mode 100644 index 000000000..355dc8d8a --- /dev/null +++ b/docs/doc_examples/f7726cc2c60dea26b88bf0df99fb0813.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + runtime: { + day_of_week: { + type: "keyword", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f785b5d17eb59f8d2a353c2dee66eb5b.asciidoc b/docs/doc_examples/f785b5d17eb59f8d2a353c2dee66eb5b.asciidoc new file mode 100644 index 000000000..b6d0265a7 --- /dev/null +++ b/docs/doc_examples/f785b5d17eb59f8d2a353c2dee66eb5b.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.transport.request({ + method: "GET", + path: "/_connector/_sync_job/my-connector-sync-job", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f7b20e4bb8366f6d2e4486f3bf4211bc.asciidoc b/docs/doc_examples/f7b20e4bb8366f6d2e4486f3bf4211bc.asciidoc new file mode 100644 index 000000000..a673f8da2 --- /dev/null +++ b/docs/doc_examples/f7b20e4bb8366f6d2e4486f3bf4211bc.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "sales", + size: 0, + query: { + constant_score: { + filter: { + range: { + price: { + lte: "500", + }, + }, + }, + }, + }, + aggs: { + prices: { + histogram: { + field: "price", + interval: 50, + hard_bounds: { + min: 100, + max: 200, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f7d3d367a3d8e8ff0eca426b6ea85252.asciidoc b/docs/doc_examples/f7d3d367a3d8e8ff0eca426b6ea85252.asciidoc new file mode 100644 index 000000000..1a905fa5f --- /dev/null +++ b/docs/doc_examples/f7d3d367a3d8e8ff0eca426b6ea85252.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + source: { + index: "k8s", + }, + dest: { + index: "k9s", + op_type: "create", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f7dc2fed08e57abda2c3e8a14f8eb098.asciidoc b/docs/doc_examples/f7dc2fed08e57abda2c3e8a14f8eb098.asciidoc new file mode 100644 index 000000000..2e19d6a11 --- /dev/null +++ b/docs/doc_examples/f7dc2fed08e57abda2c3e8a14f8eb098.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "armenian_example", + settings: { + analysis: { + filter: { + armenian_stop: { + type: "stop", + stopwords: "_armenian_", + }, + armenian_keywords: { + type: "keyword_marker", + keywords: ["օրինակ"], + }, + armenian_stemmer: { + type: "stemmer", + language: "armenian", + }, + }, + analyzer: { + rebuilt_armenian: { + tokenizer: "standard", + filter: [ + "lowercase", + "armenian_stop", + "armenian_keywords", + "armenian_stemmer", + ], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f7ec9062b3a7578fed55f119d7c22b74.asciidoc b/docs/doc_examples/f7ec9062b3a7578fed55f119d7c22b74.asciidoc new file mode 100644 index 000000000..e942d1c0c --- /dev/null +++ b/docs/doc_examples/f7ec9062b3a7578fed55f119d7c22b74.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "standard", + filter: ["lowercase", "asciifolding"], + text: "Is this déja vu?", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f823e4b87ed181b27f73ebc51351f0ee.asciidoc b/docs/doc_examples/f823e4b87ed181b27f73ebc51351f0ee.asciidoc new file mode 100644 index 000000000..e5501b1a8 --- /dev/null +++ b/docs/doc_examples/f823e4b87ed181b27f73ebc51351f0ee.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.deleteDataStream({ + name: "my-data-stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f83eb6605c7c56e297a494b318400ef0.asciidoc b/docs/doc_examples/f83eb6605c7c56e297a494b318400ef0.asciidoc new file mode 100644 index 000000000..4f336dd82 --- /dev/null +++ b/docs/doc_examples/f83eb6605c7c56e297a494b318400ef0.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "shirts", + query: { + bool: { + filter: [ + { + term: { + color: "red", + }, + }, + { + term: { + brand: "gucci", + }, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f86337e13526c968848cfe29a52d658f.asciidoc b/docs/doc_examples/f86337e13526c968848cfe29a52d658f.asciidoc new file mode 100644 index 000000000..d244f1a1e --- /dev/null +++ b/docs/doc_examples/f86337e13526c968848cfe29a52d658f.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "elser_embeddings_pipeline", + processors: [ + { + inference: { + model_id: "elser_embeddings", + input_output: { + input_field: "content", + output_field: "content_embedding", + }, + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/f8651356ce2e7e93fa306c30f57ed588.asciidoc b/docs/doc_examples/f8651356ce2e7e93fa306c30f57ed588.asciidoc new file mode 100644 index 000000000..60451049e --- /dev/null +++ b/docs/doc_examples/f8651356ce2e7e93fa306c30f57ed588.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "custom_truncate_example", + settings: { + analysis: { + analyzer: { + standard_truncate: { + tokenizer: "standard", + filter: ["truncate"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f8833488041f3d318435b60917fa877c.asciidoc b/docs/doc_examples/f8833488041f3d318435b60917fa877c.asciidoc new file mode 100644 index 000000000..2c6c95db2 --- /dev/null +++ b/docs/doc_examples/f8833488041f3d318435b60917fa877c.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.put({ + name: "my_search_application", + search_application: { + indices: ["my_search_index1", "my_search_index2"], + template: { + script: { + source: { + query: { + query_string: { + query: "{{query_string}}", + default_field: "{{default_field}}", + }, + }, + }, + params: { + query_string: "*", + default_field: "*", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f8a0010753b1ff563dc42d703902d2fa.asciidoc b/docs/doc_examples/f8a0010753b1ff563dc42d703902d2fa.asciidoc new file mode 100644 index 000000000..156c3a585 --- /dev/null +++ b/docs/doc_examples/f8a0010753b1ff563dc42d703902d2fa.asciidoc @@ -0,0 +1,45 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + bool: { + must: { + term: { + "user.id": "kimchy", + }, + }, + filter: { + term: { + tags: "production", + }, + }, + must_not: { + range: { + age: { + gte: 10, + lte: 20, + }, + }, + }, + should: [ + { + term: { + tags: "env1", + }, + }, + { + term: { + tags: "deployed", + }, + }, + ], + minimum_should_match: 1, + boost: 1, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f8cafb1a08bc9b2dd5239f99d4e93f4c.asciidoc b/docs/doc_examples/f8cafb1a08bc9b2dd5239f99d4e93f4c.asciidoc new file mode 100644 index 000000000..85f1392c5 --- /dev/null +++ b/docs/doc_examples/f8cafb1a08bc9b2dd5239f99d4e93f4c.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: { + type: "char_group", + tokenize_on_chars: ["whitespace", "-", "\n"], + }, + text: "The QUICK brown-fox", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f8cb1a04c2e487ff006b5ae0e1a7afbd.asciidoc b/docs/doc_examples/f8cb1a04c2e487ff006b5ae0e1a7afbd.asciidoc new file mode 100644 index 000000000..c02fd9b75 --- /dev/null +++ b/docs/doc_examples/f8cb1a04c2e487ff006b5ae0e1a7afbd.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.rollup.getRollupCaps({ + id: "sensor-1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f8cc4b331a19ff4df8e4a490f906ee69.asciidoc b/docs/doc_examples/f8cc4b331a19ff4df8e4a490f906ee69.asciidoc deleted file mode 100644 index 0843c75f3..000000000 --- a/docs/doc_examples/f8cc4b331a19ff4df8e4a490f906ee69.asciidoc +++ /dev/null @@ -1,11 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.cat.health({ - v: true -}) -console.log(response) ----- - diff --git a/docs/doc_examples/f8f960550104c33e00dc78bc8723ccef.asciidoc b/docs/doc_examples/f8f960550104c33e00dc78bc8723ccef.asciidoc new file mode 100644 index 000000000..8d3d91597 --- /dev/null +++ b/docs/doc_examples/f8f960550104c33e00dc78bc8723ccef.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "cooking_blog", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f92d2f5018a8843ffbb56ade15f84406.asciidoc b/docs/doc_examples/f92d2f5018a8843ffbb56ade15f84406.asciidoc new file mode 100644 index 000000000..f74bd1fbc --- /dev/null +++ b/docs/doc_examples/f92d2f5018a8843ffbb56ade15f84406.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.license.getBasicStatus(); +console.log(response); +---- diff --git a/docs/doc_examples/f95a4d7ab02bf400246c8822f0245f02.asciidoc b/docs/doc_examples/f95a4d7ab02bf400246c8822f0245f02.asciidoc new file mode 100644 index 000000000..90668404a --- /dev/null +++ b/docs/doc_examples/f95a4d7ab02bf400246c8822f0245f02.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.mlTrainedModels({ + h: "c,o,l,ct,v", + v: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f9636d7ef1a45be4f36418c875cf6bef.asciidoc b/docs/doc_examples/f9636d7ef1a45be4f36418c875cf6bef.asciidoc deleted file mode 100644 index 7ecc68bf5..000000000 --- a/docs/doc_examples/f9636d7ef1a45be4f36418c875cf6bef.asciidoc +++ /dev/null @@ -1,26 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.update({ - index: 'sessions', - id: 'dh3sgudg8gsrgl', - body: { - scripted_upsert: true, - script: { - id: 'my_web_session_summariser', - params: { - pageViewEvent: { - url: 'foo.com/bar', - response: 404, - time: '2014-01-01 12:32' - } - } - }, - upsert: {} - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/f96d4614f2fc294339fef325b794355f.asciidoc b/docs/doc_examples/f96d4614f2fc294339fef325b794355f.asciidoc new file mode 100644 index 000000000..7e97b2c84 --- /dev/null +++ b/docs/doc_examples/f96d4614f2fc294339fef325b794355f.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.getBuckets({ + job_id: "low_request_rate", + anomaly_score: 80, + start: 1454530200001, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f96d8131e8a592fbf6dfd686173940a9.asciidoc b/docs/doc_examples/f96d8131e8a592fbf6dfd686173940a9.asciidoc new file mode 100644 index 000000000..5dfedbed7 --- /dev/null +++ b/docs/doc_examples/f96d8131e8a592fbf6dfd686173940a9.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.watcher.putWatch({ + id: "test_watch", + trigger: { + schedule: { + hourly: { + minute: [0, 5], + }, + }, + }, + input: { + simple: { + payload: { + send: "yes", + }, + }, + }, + condition: { + always: {}, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f9732ce07960134ea7156e118c2da8a6.asciidoc b/docs/doc_examples/f9732ce07960134ea7156e118c2da8a6.asciidoc new file mode 100644 index 000000000..f7a5e21cc --- /dev/null +++ b/docs/doc_examples/f9732ce07960134ea7156e118c2da8a6.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + settings: { + analysis: { + analyzer: { + my_custom_simple_analyzer: { + tokenizer: "lowercase", + filter: [], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f978088f5117d4addd55c11ee3777312.asciidoc b/docs/doc_examples/f978088f5117d4addd55c11ee3777312.asciidoc new file mode 100644 index 000000000..43ef49020 --- /dev/null +++ b/docs/doc_examples/f978088f5117d4addd55c11ee3777312.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.createServiceToken({ + namespace: "elastic", + service: "fleet-server", + name: "token1", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f97aa2efabbf11a534073041eb2658c9.asciidoc b/docs/doc_examples/f97aa2efabbf11a534073041eb2658c9.asciidoc new file mode 100644 index 000000000..a46773eb2 --- /dev/null +++ b/docs/doc_examples/f97aa2efabbf11a534073041eb2658c9.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.deleteScript({ + id: "my-stored-script", +}); +console.log(response); +---- diff --git a/docs/doc_examples/f98687271e1bec031cc34d05d8f4b60b.asciidoc b/docs/doc_examples/f98687271e1bec031cc34d05d8f4b60b.asciidoc new file mode 100644 index 000000000..50dcca406 --- /dev/null +++ b/docs/doc_examples/f98687271e1bec031cc34d05d8f4b60b.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + span_multi: { + match: { + prefix: { + "user.id": { + value: "ki", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f994498dd6576be657dedce2822d2b9e.asciidoc b/docs/doc_examples/f994498dd6576be657dedce2822d2b9e.asciidoc new file mode 100644 index 000000000..21a737450 --- /dev/null +++ b/docs/doc_examples/f994498dd6576be657dedce2822d2b9e.asciidoc @@ -0,0 +1,42 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "semantic-embeddings", + retriever: { + rrf: { + retrievers: [ + { + standard: { + query: { + match: { + content: "How to avoid muscle soreness while running?", + }, + }, + }, + }, + { + standard: { + query: { + semantic: { + field: "semantic_text", + query: "How to avoid muscle soreness while running?", + }, + }, + }, + }, + ], + }, + }, + highlight: { + fields: { + semantic_text: { + number_of_fragments: 2, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f9a315ea99bed0cf2f36be1d74eb3e4a.asciidoc b/docs/doc_examples/f9a315ea99bed0cf2f36be1d74eb3e4a.asciidoc new file mode 100644 index 000000000..4ffc48a55 --- /dev/null +++ b/docs/doc_examples/f9a315ea99bed0cf2f36be1d74eb3e4a.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: + "MULTIPOLYGON (((102.0 2.0, 103.0 2.0, 103.0 3.0, 102.0 3.0, 102.0 2.0)), ((100.0 0.0, 101.0 0.0, 101.0 1.0, 100.0 1.0, 100.0 0.0), (100.2 0.2, 100.8 0.2, 100.8 0.8, 100.2 0.8, 100.2 0.2)))", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f9bad6fd369764185e1cb09b89ee39cc.asciidoc b/docs/doc_examples/f9bad6fd369764185e1cb09b89ee39cc.asciidoc new file mode 100644 index 000000000..2045d9c7d --- /dev/null +++ b/docs/doc_examples/f9bad6fd369764185e1cb09b89ee39cc.asciidoc @@ -0,0 +1,40 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, + }, + }, + mappings: { + properties: { + text: { + type: "text", + store: true, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "idx", + id: 1, + document: { + text: [ + "the quick brown fox", + "the quick brown fox", + "jumped over the lazy dog", + ], + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/f9c8245cc13770dff052b6759a749efa.asciidoc b/docs/doc_examples/f9c8245cc13770dff052b6759a749efa.asciidoc new file mode 100644 index 000000000..717368649 --- /dev/null +++ b/docs/doc_examples/f9c8245cc13770dff052b6759a749efa.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.getSource({ + index: "my-index-000001", + id: 1, +}); +console.log(response); +---- diff --git a/docs/doc_examples/f9f541ae23a184301913f07e62d1afd3.asciidoc b/docs/doc_examples/f9f541ae23a184301913f07e62d1afd3.asciidoc new file mode 100644 index 000000000..78b1ad12c --- /dev/null +++ b/docs/doc_examples/f9f541ae23a184301913f07e62d1afd3.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.sql.query({ + format: "json", + keep_alive: "2d", + wait_for_completion_timeout: "2s", + query: "SELECT * FROM library ORDER BY page_count DESC", + fetch_size: 5, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fa0f4485cd48f986b7ae8cbb24e331c4.asciidoc b/docs/doc_examples/fa0f4485cd48f986b7ae8cbb24e331c4.asciidoc deleted file mode 100644 index 2b631649e..000000000 --- a/docs/doc_examples/fa0f4485cd48f986b7ae8cbb24e331c4.asciidoc +++ /dev/null @@ -1,22 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.updateAliases({ - body: { - actions: [ - { - add: { - index: 'test', - alias: 'alias2', - search_routing: '1,2', - index_routing: '2' - } - } - ] - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/fa2fe60f570bd930d2891778c6efbfe6.asciidoc b/docs/doc_examples/fa2fe60f570bd930d2891778c6efbfe6.asciidoc deleted file mode 100644 index e7e37ece7..000000000 --- a/docs/doc_examples/fa2fe60f570bd930d2891778c6efbfe6.asciidoc +++ /dev/null @@ -1,17 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - match: { - message: 'this is a test' - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/fa42ae3bf6a300420cd0f77ba006458a.asciidoc b/docs/doc_examples/fa42ae3bf6a300420cd0f77ba006458a.asciidoc new file mode 100644 index 000000000..73cdcbcd1 --- /dev/null +++ b/docs/doc_examples/fa42ae3bf6a300420cd0f77ba006458a.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + analyzer: "standard", + text: "Quick Brown Foxes!", +}); +console.log(response); +---- diff --git a/docs/doc_examples/fa5dcd1c7fadc473a791daf0d7ceec36.asciidoc b/docs/doc_examples/fa5dcd1c7fadc473a791daf0d7ceec36.asciidoc new file mode 100644 index 000000000..ada16f058 --- /dev/null +++ b/docs/doc_examples/fa5dcd1c7fadc473a791daf0d7ceec36.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "tour", + filter_path: "aggregations", + aggregations: { + path: { + time_series: {}, + aggregations: { + museum_tour: { + geo_line: { + point: { + field: "location", + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fa61e3481b1f889b3bd4253866bb1c6b.asciidoc b/docs/doc_examples/fa61e3481b1f889b3bd4253866bb1c6b.asciidoc new file mode 100644 index 000000000..77efc1c63 --- /dev/null +++ b/docs/doc_examples/fa61e3481b1f889b3bd4253866bb1c6b.asciidoc @@ -0,0 +1,86 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "correlate_latency", + size: 0, + filter_path: "aggregations", + aggs: { + buckets: { + terms: { + field: "version", + size: 2, + }, + aggs: { + latency_ranges: { + range: { + field: "latency", + ranges: [ + { + to: 0, + }, + { + from: 0, + to: 105, + }, + { + from: 105, + to: 225, + }, + { + from: 225, + to: 445, + }, + { + from: 445, + to: 665, + }, + { + from: 665, + to: 885, + }, + { + from: 885, + to: 1115, + }, + { + from: 1115, + to: 1335, + }, + { + from: 1335, + to: 1555, + }, + { + from: 1555, + to: 1775, + }, + { + from: 1775, + }, + ], + }, + }, + bucket_correlation: { + bucket_correlation: { + buckets_path: "latency_ranges>_count", + function: { + count_correlation: { + indicator: { + expectations: [ + 0, 52.5, 165, 335, 555, 775, 1000, 1225, 1445, 1665, 1775, + ], + doc_count: 200, + }, + }, + }, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fa82d86a046d67366cfe9ce65535e433.asciidoc b/docs/doc_examples/fa82d86a046d67366cfe9ce65535e433.asciidoc new file mode 100644 index 000000000..0120aa38c --- /dev/null +++ b/docs/doc_examples/fa82d86a046d67366cfe9ce65535e433.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.graph.explore({ + index: "clicklogs", + vertices: [ + { + field: "product", + include: ["1854873"], + }, + ], + connections: { + vertices: [ + { + field: "query.raw", + exclude: ["midi keyboard", "midi", "synth"], + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fa88f6f5a7d728ec4f1d05244228cb09.asciidoc b/docs/doc_examples/fa88f6f5a7d728ec4f1d05244228cb09.asciidoc index ac1a67fd2..b7e200d3f 100644 --- a/docs/doc_examples/fa88f6f5a7d728ec4f1d05244228cb09.asciidoc +++ b/docs/doc_examples/fa88f6f5a7d728ec4f1d05244228cb09.asciidoc @@ -4,21 +4,18 @@ [source, js] ---- const response = await client.search({ - body: { - query: { - bool: { - must: { - match_all: {} + query: { + bool: { + must: { + match_all: {}, + }, + filter: { + term: { + status: "active", }, - filter: { - term: { - status: 'active' - } - } - } - } - } -}) -console.log(response) + }, + }, + }, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/fa946228e946da256d40264c8b070a1a.asciidoc b/docs/doc_examples/fa946228e946da256d40264c8b070a1a.asciidoc new file mode 100644 index 000000000..0cce8f2e4 --- /dev/null +++ b/docs/doc_examples/fa946228e946da256d40264c8b070a1a.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + aggs: { + "my-agg-name": { + terms: { + field: "my-field", + }, + meta: { + "my-metadata-field": "foo", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fa9a3ef94470f3d9bd6500b65bf993d1.asciidoc b/docs/doc_examples/fa9a3ef94470f3d9bd6500b65bf993d1.asciidoc new file mode 100644 index 000000000..7dc54746d --- /dev/null +++ b/docs/doc_examples/fa9a3ef94470f3d9bd6500b65bf993d1.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + index: "multiplexer_example", + analyzer: "my_analyzer", + text: "Going HOME", +}); +console.log(response); +---- diff --git a/docs/doc_examples/fab4b811ba968aa4df92fb1ac059ea31.asciidoc b/docs/doc_examples/fab4b811ba968aa4df92fb1ac059ea31.asciidoc new file mode 100644 index 000000000..a84fa74d6 --- /dev/null +++ b/docs/doc_examples/fab4b811ba968aa4df92fb1ac059ea31.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "example", + mappings: { + properties: { + location: { + type: "geo_shape", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fab702851e90e945c1b62dec0bb6a205.asciidoc b/docs/doc_examples/fab702851e90e945c1b62dec0bb6a205.asciidoc new file mode 100644 index 000000000..cf74a61dd --- /dev/null +++ b/docs/doc_examples/fab702851e90e945c1b62dec0bb6a205.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.deleteBehavioralAnalytics({ + name: "my_analytics_collection", +}); +console.log(response); +---- diff --git a/docs/doc_examples/fabe14480624a99e8ee42c7338672058.asciidoc b/docs/doc_examples/fabe14480624a99e8ee42c7338672058.asciidoc index 02ddabcee..36202bf45 100644 --- a/docs/doc_examples/fabe14480624a99e8ee42c7338672058.asciidoc +++ b/docs/doc_examples/fabe14480624a99e8ee42c7338672058.asciidoc @@ -4,9 +4,8 @@ [source, js] ---- const response = await client.indices.create({ - index: 'test', - wait_for_active_shards: '2' -}) -console.log(response) + index: "test", + wait_for_active_shards: 2, +}); +console.log(response); ---- - diff --git a/docs/doc_examples/fad26f4fb5a1bc9c38db33394e877d94.asciidoc b/docs/doc_examples/fad26f4fb5a1bc9c38db33394e877d94.asciidoc new file mode 100644 index 000000000..b6fb34952 --- /dev/null +++ b/docs/doc_examples/fad26f4fb5a1bc9c38db33394e877d94.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.getDataFrameAnalyticsStats({ + id: "weblog-outliers", +}); +console.log(response); +---- diff --git a/docs/doc_examples/fad524db23eb5718ff310956e590b00d.asciidoc b/docs/doc_examples/fad524db23eb5718ff310956e590b00d.asciidoc new file mode 100644 index 000000000..73022d70d --- /dev/null +++ b/docs/doc_examples/fad524db23eb5718ff310956e590b00d.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + function_score: { + random_score: { + seed: 10, + field: "_seq_no", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/faf7d8b9827cf5c0db5c177f01dc31c4.asciidoc b/docs/doc_examples/faf7d8b9827cf5c0db5c177f01dc31c4.asciidoc new file mode 100644 index 000000000..c6e1be29f --- /dev/null +++ b/docs/doc_examples/faf7d8b9827cf5c0db5c177f01dc31c4.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.rankEval({ + index: "my-index-000001", + requests: [ + { + id: "JFK query", + request: { + query: { + match_all: {}, + }, + }, + ratings: [], + }, + ], + metric: { + precision: { + k: 20, + relevant_rating_threshold: 1, + ignore_unlabeled: false, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fb0152f6c70f647a8b6709969113486d.asciidoc b/docs/doc_examples/fb0152f6c70f647a8b6709969113486d.asciidoc new file mode 100644 index 000000000..cd8a962e0 --- /dev/null +++ b/docs/doc_examples/fb0152f6c70f647a8b6709969113486d.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, + }, + }, + mappings: { + properties: { + kwd: { + type: "keyword", + store: true, + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "idx", + id: 1, + document: { + kwd: ["foo", "foo", "bar", "baz"], + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/fb1180992b2087dfb36576b44c4261e4.asciidoc b/docs/doc_examples/fb1180992b2087dfb36576b44c4261e4.asciidoc new file mode 100644 index 000000000..edb1ad6c8 --- /dev/null +++ b/docs/doc_examples/fb1180992b2087dfb36576b44c4261e4.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putMapping({ + index: "my-data-stream", + write_index_only: "true", + properties: { + host: { + properties: { + ip: { + type: "ip", + ignore_malformed: true, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fb1263cfdcbb6a89b20b57004d7e0dfc.asciidoc b/docs/doc_examples/fb1263cfdcbb6a89b20b57004d7e0dfc.asciidoc new file mode 100644 index 000000000..db4b69406 --- /dev/null +++ b/docs/doc_examples/fb1263cfdcbb6a89b20b57004d7e0dfc.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.simulate({ + pipeline: { + processors: [ + { + set: { + field: "my_field", + value: "{{{input_field.1}}}", + }, + }, + ], + }, + docs: [ + { + _index: "index", + _id: "id", + _source: { + input_field: ["Ubuntu", "Windows", "Ventura"], + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/fb2b91206cfa8b86b4c7117ac1b5193b.asciidoc b/docs/doc_examples/fb2b91206cfa8b86b4c7117ac1b5193b.asciidoc new file mode 100644 index 000000000..e3465e741 --- /dev/null +++ b/docs/doc_examples/fb2b91206cfa8b86b4c7117ac1b5193b.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "user_hits", + size: 0, + aggs: { + users_per_day: { + date_histogram: { + field: "timestamp", + calendar_interval: "day", + }, + aggs: { + distinct_users: { + cardinality: { + field: "user_id", + }, + }, + total_new_users: { + cumulative_cardinality: { + buckets_path: "distinct_users", + }, + }, + incremental_new_users: { + derivative: { + buckets_path: "total_new_users", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fb3505d976283fb7c7b9705a761e0dc2.asciidoc b/docs/doc_examples/fb3505d976283fb7c7b9705a761e0dc2.asciidoc new file mode 100644 index 000000000..5841a4fe8 --- /dev/null +++ b/docs/doc_examples/fb3505d976283fb7c7b9705a761e0dc2.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: { + type: "polygon", + orientation: "clockwise", + coordinates: [ + [ + [1000, 1000], + [1000, 1001], + [1001, 1001], + [1001, 1000], + [1000, 1000], + ], + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fb4799d2fe4011bf6084f89d97d9a4a5.asciidoc b/docs/doc_examples/fb4799d2fe4011bf6084f89d97d9a4a5.asciidoc new file mode 100644 index 000000000..f659ab628 --- /dev/null +++ b/docs/doc_examples/fb4799d2fe4011bf6084f89d97d9a4a5.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.autoscaling.getAutoscalingPolicy({ + name: "", +}); +console.log(response); +---- diff --git a/docs/doc_examples/fb56c2ac77d4c308d7702b6b33698382.asciidoc b/docs/doc_examples/fb56c2ac77d4c308d7702b6b33698382.asciidoc new file mode 100644 index 000000000..1405a070d --- /dev/null +++ b/docs/doc_examples/fb56c2ac77d4c308d7702b6b33698382.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.connector.updateApiKeyId({ + connector_id: "my_connector_id>", + api_key_id: "API key_id", + api_key_secret_id: "secret_id", +}); +console.log(response); +---- diff --git a/docs/doc_examples/fb955375a202f66133af009c04cb77ad.asciidoc b/docs/doc_examples/fb955375a202f66133af009c04cb77ad.asciidoc new file mode 100644 index 000000000..5e3b5b376 --- /dev/null +++ b/docs/doc_examples/fb955375a202f66133af009c04cb77ad.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "networks", + mappings: { + properties: { + range: { + type: "ip_range", + }, + name: { + type: "keyword", + }, + department: { + type: "keyword", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fbb38243221c8fb311660616e3add9ce.asciidoc b/docs/doc_examples/fbb38243221c8fb311660616e3add9ce.asciidoc new file mode 100644 index 000000000..543a8d989 --- /dev/null +++ b/docs/doc_examples/fbb38243221c8fb311660616e3add9ce.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + sort: [ + { + _geo_distance: { + "pin.location": [-70, 40], + order: "asc", + unit: "km", + mode: "min", + distance_type: "arc", + ignore_unmapped: true, + }, + }, + ], + query: { + term: { + user: "kimchy", + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fbc5ab85b908480bf944b55da0a43488.asciidoc b/docs/doc_examples/fbc5ab85b908480bf944b55da0a43488.asciidoc new file mode 100644 index 000000000..2f9356831 --- /dev/null +++ b/docs/doc_examples/fbc5ab85b908480bf944b55da0a43488.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + query: { + prefix: { + "user.id": { + value: "ki", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fbcf5078a6a9e09790553804054c36b3.asciidoc b/docs/doc_examples/fbcf5078a6a9e09790553804054c36b3.asciidoc deleted file mode 100644 index 37c29d110..000000000 --- a/docs/doc_examples/fbcf5078a6a9e09790553804054c36b3.asciidoc +++ /dev/null @@ -1,12 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.get({ - index: 'twitter', - id: '0' -}) -console.log(response) ----- - diff --git a/docs/doc_examples/fbdad6620eb645f5f1f02e3673604d01.asciidoc b/docs/doc_examples/fbdad6620eb645f5f1f02e3673604d01.asciidoc new file mode 100644 index 000000000..62822d80b --- /dev/null +++ b/docs/doc_examples/fbdad6620eb645f5f1f02e3673604d01.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my_locations", + query: { + bool: { + must: { + match_all: {}, + }, + filter: { + geo_distance: { + distance: "12km", + "pin.location": "drm3btev3e86", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fc1907515f6a913884a9f86451e90ee8.asciidoc b/docs/doc_examples/fc1907515f6a913884a9f86451e90ee8.asciidoc new file mode 100644 index 000000000..cd09758c7 --- /dev/null +++ b/docs/doc_examples/fc1907515f6a913884a9f86451e90ee8.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index", + mappings: { + _source: { + excludes: ["content_embedding"], + }, + properties: { + content_embedding: { + type: "sparse_vector", + }, + content: { + type: "text", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fc190fbbf71949331266dcb3f46a1198.asciidoc b/docs/doc_examples/fc190fbbf71949331266dcb3f46a1198.asciidoc new file mode 100644 index 000000000..679b55598 --- /dev/null +++ b/docs/doc_examples/fc190fbbf71949331266dcb3f46a1198.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.dataStreamsStats({ + name: "my-data-stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/fc26f51bb22c0b5270a66b4722f18aa7.asciidoc b/docs/doc_examples/fc26f51bb22c0b5270a66b4722f18aa7.asciidoc new file mode 100644 index 000000000..990b731c8 --- /dev/null +++ b/docs/doc_examples/fc26f51bb22c0b5270a66b4722f18aa7.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + warm: { + actions: { + allocate: { + number_of_replicas: 2, + total_shards_per_node: 200, + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fc3f5f40fa283559ca615cd0eb0a1755.asciidoc b/docs/doc_examples/fc3f5f40fa283559ca615cd0eb0a1755.asciidoc new file mode 100644 index 000000000..eede615f7 --- /dev/null +++ b/docs/doc_examples/fc3f5f40fa283559ca615cd0eb0a1755.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my_index", + mappings: { + properties: { + my_histogram: { + type: "histogram", + }, + my_text: { + type: "keyword", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fc49437ce2e7916facf58128308c2aa3.asciidoc b/docs/doc_examples/fc49437ce2e7916facf58128308c2aa3.asciidoc new file mode 100644 index 000000000..fed89c0fb --- /dev/null +++ b/docs/doc_examples/fc49437ce2e7916facf58128308c2aa3.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchableSnapshots.mount({ + repository: "my_repository", + snapshot: "my_snapshot", + wait_for_completion: "true", + index: "my_docs", + renamed_index: "docs", + index_settings: { + "index.number_of_replicas": 0, + }, + ignore_index_settings: ["index.refresh_interval"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/fc51fbc60b0e20aac83300a43ad90252.asciidoc b/docs/doc_examples/fc51fbc60b0e20aac83300a43ad90252.asciidoc new file mode 100644 index 000000000..d6db2d338 --- /dev/null +++ b/docs/doc_examples/fc51fbc60b0e20aac83300a43ad90252.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "example", + document: { + location: { + type: "geometrycollection", + geometries: [ + { + type: "point", + coordinates: [1000, 100], + }, + { + type: "linestring", + coordinates: [ + [1001, 100], + [1002, 100], + ], + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fc5a81f34d416e4b45ca8a859dd3b8f1.asciidoc b/docs/doc_examples/fc5a81f34d416e4b45ca8a859dd3b8f1.asciidoc new file mode 100644 index 000000000..1498d7d30 --- /dev/null +++ b/docs/doc_examples/fc5a81f34d416e4b45ca8a859dd3b8f1.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + size: 0, + aggs: { + by_day: { + auto_date_histogram: { + field: "date", + buckets: 3, + time_zone: "-01:00", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fc75ea748e5f49b8ab292e453ab641a6.asciidoc b/docs/doc_examples/fc75ea748e5f49b8ab292e453ab641a6.asciidoc new file mode 100644 index 000000000..76340b434 --- /dev/null +++ b/docs/doc_examples/fc75ea748e5f49b8ab292e453ab641a6.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "products", + size: 0, + query: { + match: { + name: "led tv", + }, + }, + aggs: { + resellers: { + nested: { + path: "resellers", + }, + aggs: { + min_price: { + min: { + field: "resellers.price", + }, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fc8a426f8a5112e61e2acb913982a8d9.asciidoc b/docs/doc_examples/fc8a426f8a5112e61e2acb913982a8d9.asciidoc new file mode 100644 index 000000000..0342ca2e6 --- /dev/null +++ b/docs/doc_examples/fc8a426f8a5112e61e2acb913982a8d9.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "events", + size: 10, + sort: [ + { + timestamp: "desc", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/fc9a1b1173690a911725cff3912e9755.asciidoc b/docs/doc_examples/fc9a1b1173690a911725cff3912e9755.asciidoc new file mode 100644 index 000000000..e97eff174 --- /dev/null +++ b/docs/doc_examples/fc9a1b1173690a911725cff3912e9755.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ilm.putLifecycle({ + name: "my_policy", + policy: { + phases: { + warm: { + actions: { + readonly: {}, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fccbddfba9f975de7e321732874dfb78.asciidoc b/docs/doc_examples/fccbddfba9f975de7e321732874dfb78.asciidoc new file mode 100644 index 000000000..48de444fe --- /dev/null +++ b/docs/doc_examples/fccbddfba9f975de7e321732874dfb78.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.dataStreamsStats({ + name: "my-data-stream*", + human: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/fce5c03a388c893cb11a6696e068543f.asciidoc b/docs/doc_examples/fce5c03a388c893cb11a6696e068543f.asciidoc new file mode 100644 index 000000000..eede0ecb8 --- /dev/null +++ b/docs/doc_examples/fce5c03a388c893cb11a6696e068543f.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.hasPrivilegesUserProfile({ + uids: [ + "u_LQPnxDxEjIH0GOUoFkZr5Y57YUwSkL9Joiq-g4OCbPc_0", + "u_rzRnxDgEHIH0GOUoFkZr5Y27YUwSk19Joiq=g4OCxxB_1", + "u_does-not-exist_0", + ], + privileges: { + cluster: ["monitor", "create_snapshot", "manage_ml"], + index: [ + { + names: ["suppliers", "products"], + privileges: ["create_doc"], + }, + { + names: ["inventory"], + privileges: ["read", "write"], + }, + ], + application: [ + { + application: "inventory_manager", + privileges: ["read", "data:write/inventory"], + resources: ["product/1852563"], + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fce7a35a737fc9e54ac1225e310dd561.asciidoc b/docs/doc_examples/fce7a35a737fc9e54ac1225e310dd561.asciidoc new file mode 100644 index 000000000..0a773ef35 --- /dev/null +++ b/docs/doc_examples/fce7a35a737fc9e54ac1225e310dd561.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + script_score: { + query: { + bool: { + filter: { + term: { + status: "published", + }, + }, + }, + }, + script: { + source: + "\n double value = dotProduct(params.query_vector, 'my_dense_vector');\n return sigmoid(1, Math.E, -value); \n ", + params: { + query_vector: [4, 3.4, -0.2], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fd04289c54493e19c1d3ac70d0b489c4.asciidoc b/docs/doc_examples/fd04289c54493e19c1d3ac70d0b489c4.asciidoc new file mode 100644 index 000000000..7805fd227 --- /dev/null +++ b/docs/doc_examples/fd04289c54493e19c1d3ac70d0b489c4.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "my-pipeline", + processors: [ + { + drop: { + description: "Drop documents that don't contain 'prod' tag", + if: "\n Collection tags = ctx.tags;\n if(tags != null){\n for (String tag : tags) {\n if (tag.toLowerCase().contains('prod')) {\n return false;\n }\n }\n }\n return true;\n ", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/fd0cd8ecd03468726b59a605eea06d75.asciidoc b/docs/doc_examples/fd0cd8ecd03468726b59a605eea06d75.asciidoc new file mode 100644 index 000000000..2c97c321d --- /dev/null +++ b/docs/doc_examples/fd0cd8ecd03468726b59a605eea06d75.asciidoc @@ -0,0 +1,40 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "test", + query: { + bool: { + must: [ + { + match: { + content: "2016", + }, + }, + ], + should: [ + { + rank_feature: { + field: "pagerank", + }, + }, + { + rank_feature: { + field: "url_length", + boost: 0.1, + }, + }, + { + rank_feature: { + field: "topics.sports", + boost: 0.4, + }, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fd26bfdbe95b2d2db374385d12849f77.asciidoc b/docs/doc_examples/fd26bfdbe95b2d2db374385d12849f77.asciidoc new file mode 100644 index 000000000..7209a78f7 --- /dev/null +++ b/docs/doc_examples/fd26bfdbe95b2d2db374385d12849f77.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "trim_example", + settings: { + analysis: { + analyzer: { + keyword_trim: { + tokenizer: "keyword", + filter: ["trim"], + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fd2d289e6b725fcc3cbe8fe7ffe02ea0.asciidoc b/docs/doc_examples/fd2d289e6b725fcc3cbe8fe7ffe02ea0.asciidoc new file mode 100644 index 000000000..ec8571bec --- /dev/null +++ b/docs/doc_examples/fd2d289e6b725fcc3cbe8fe7ffe02ea0.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getTemplate(); +console.log(response); +---- diff --git a/docs/doc_examples/fd352b472d44d197022a46fce90b6ecb.asciidoc b/docs/doc_examples/fd352b472d44d197022a46fce90b6ecb.asciidoc new file mode 100644 index 000000000..fcd562fa2 --- /dev/null +++ b/docs/doc_examples/fd352b472d44d197022a46fce90b6ecb.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.mget({ + docs: [ + { + _index: "test", + _id: "1", + _source: false, + }, + { + _index: "test", + _id: "2", + _source: ["field3", "field4"], + }, + { + _index: "test", + _id: "3", + _source: { + include: ["user"], + exclude: ["user.location"], + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/fd60b4092c6552164862cec287359676.asciidoc b/docs/doc_examples/fd60b4092c6552164862cec287359676.asciidoc new file mode 100644 index 000000000..3418d2f7b --- /dev/null +++ b/docs/doc_examples/fd60b4092c6552164862cec287359676.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.stopDatafeed({ + datafeed_id: "datafeed-low_request_rate", + timeout: "30s", +}); +console.log(response); +---- diff --git a/docs/doc_examples/fd620f09dbce62c6f0f603a366623607.asciidoc b/docs/doc_examples/fd620f09dbce62c6f0f603a366623607.asciidoc new file mode 100644 index 000000000..5fe2c6fd7 --- /dev/null +++ b/docs/doc_examples/fd620f09dbce62c6f0f603a366623607.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.connector.updateFiltering({ + connector_id: "my-sql-connector", + advanced_snippet: { + value: [ + { + tables: ["users", "orders"], + query: + "SELECT users.id AS id, orders.order_id AS order_id FROM users JOIN orders ON users.id = orders.user_id", + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fd6fdc8fa994dd02cf1177077325304f.asciidoc b/docs/doc_examples/fd6fdc8fa994dd02cf1177077325304f.asciidoc new file mode 100644 index 000000000..13c5bfeb8 --- /dev/null +++ b/docs/doc_examples/fd6fdc8fa994dd02cf1177077325304f.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.restore({ + repository: "my_repository", + snapshot: "snapshot-20200617", + feature_states: ["geoip"], + indices: "kibana_sample_data_flights,.ds-my-data-stream-2022.06.17-000001", + include_aliases: true, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fd738a9af7b5d21da31a7722f03aade8.asciidoc b/docs/doc_examples/fd738a9af7b5d21da31a7722f03aade8.asciidoc new file mode 100644 index 000000000..c7bd58f9e --- /dev/null +++ b/docs/doc_examples/fd738a9af7b5d21da31a7722f03aade8.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.cat.shards({ + v: "true", + h: "index,prirep,shard,store", + s: "prirep,store", + bytes: "gb", +}); +console.log(response); +---- diff --git a/docs/doc_examples/fd7eeadab6251d9113c4380a7fbe2572.asciidoc b/docs/doc_examples/fd7eeadab6251d9113c4380a7fbe2572.asciidoc new file mode 100644 index 000000000..b5b6b35b5 --- /dev/null +++ b/docs/doc_examples/fd7eeadab6251d9113c4380a7fbe2572.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRole({ + name: "remote-replication", + cluster: ["manage_ccr"], + remote_indices: [ + { + clusters: ["my_remote_cluster"], + names: ["leader-index"], + privileges: ["cross_cluster_replication"], + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/fd9b668eeb1f117950bd4991c7c03fb1.asciidoc b/docs/doc_examples/fd9b668eeb1f117950bd4991c7c03fb1.asciidoc new file mode 100644 index 000000000..202324d8c --- /dev/null +++ b/docs/doc_examples/fd9b668eeb1f117950bd4991c7c03fb1.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + analyzer: "standard", + text: ["this is a test", "the second text"], +}); +console.log(response); +---- diff --git a/docs/doc_examples/fdada036a875d7995d5d7aba9c06361e.asciidoc b/docs/doc_examples/fdada036a875d7995d5d7aba9c06361e.asciidoc new file mode 100644 index 000000000..245c36566 --- /dev/null +++ b/docs/doc_examples/fdada036a875d7995d5d7aba9c06361e.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-2", + mappings: { + properties: { + my_vector: { + type: "dense_vector", + dims: 3, + index: false, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fdc8e090293e78e9a6b283650b682517.asciidoc b/docs/doc_examples/fdc8e090293e78e9a6b283650b682517.asciidoc new file mode 100644 index 000000000..8fd30d6cf --- /dev/null +++ b/docs/doc_examples/fdc8e090293e78e9a6b283650b682517.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.open({ + index: "my-data-stream", +}); +console.log(response); +---- diff --git a/docs/doc_examples/fdd38f0d248385a444c777e7acd97846.asciidoc b/docs/doc_examples/fdd38f0d248385a444c777e7acd97846.asciidoc deleted file mode 100644 index 4b58cb9e3..000000000 --- a/docs/doc_examples/fdd38f0d248385a444c777e7acd97846.asciidoc +++ /dev/null @@ -1,22 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - body: { - query: { - query_string: { - fields: [ - 'title', - 'content' - ], - query: 'this OR that OR thus', - minimum_should_match: 2 - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/fde3463ddf136fdfff1306a60986515e.asciidoc b/docs/doc_examples/fde3463ddf136fdfff1306a60986515e.asciidoc new file mode 100644 index 000000000..1296b55b0 --- /dev/null +++ b/docs/doc_examples/fde3463ddf136fdfff1306a60986515e.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getSettings({ + index: "*", + flat_settings: "true", + filter_path: "**.settings.archived*", +}); +console.log(response); +---- diff --git a/docs/doc_examples/fdf7cfdf1c92d21ee710675596eac6fd.asciidoc b/docs/doc_examples/fdf7cfdf1c92d21ee710675596eac6fd.asciidoc new file mode 100644 index 000000000..869b00ef4 --- /dev/null +++ b/docs/doc_examples/fdf7cfdf1c92d21ee710675596eac6fd.asciidoc @@ -0,0 +1,38 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index", + retriever: { + rrf: { + retrievers: [ + { + standard: { + query: { + match: { + my_text_field: "the query string", + }, + }, + }, + }, + { + knn: { + field: "text_embedding.predicted_value", + k: 10, + num_candidates: 100, + query_vector_builder: { + text_embedding: { + model_id: "sentence-transformers__msmarco-minilm-l-12-v3", + model_text: "the query string", + }, + }, + }, + }, + ], + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fe208d94ec93eabf3bd06139fa70701e.asciidoc b/docs/doc_examples/fe208d94ec93eabf3bd06139fa70701e.asciidoc new file mode 100644 index 000000000..256eb6384 --- /dev/null +++ b/docs/doc_examples/fe208d94ec93eabf3bd06139fa70701e.asciidoc @@ -0,0 +1,44 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.putIndexTemplate({ + name: "sensor-template", + index_patterns: ["sensor-*"], + data_stream: {}, + template: { + lifecycle: { + downsampling: [ + { + after: "1d", + fixed_interval: "1h", + }, + ], + }, + settings: { + "index.mode": "time_series", + }, + mappings: { + properties: { + node: { + type: "keyword", + time_series_dimension: true, + }, + temperature: { + type: "half_float", + time_series_metric: "gauge", + }, + voltage: { + type: "half_float", + time_series_metric: "gauge", + }, + "@timestamp": { + type: "date", + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fe3a927d868cbc530e08e05964d5174a.asciidoc b/docs/doc_examples/fe3a927d868cbc530e08e05964d5174a.asciidoc new file mode 100644 index 000000000..8988080e4 --- /dev/null +++ b/docs/doc_examples/fe3a927d868cbc530e08e05964d5174a.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "my-index-000001", + id: 1, + refresh: "true", + document: { + product: "chocolate", + price: [20, 4], + }, +}); +console.log(response); + +const response1 = await client.search({ + query: { + term: { + product: "chocolate", + }, + }, + sort: [ + { + price: { + order: "asc", + mode: "avg", + }, + }, + ], +}); +console.log(response1); +---- diff --git a/docs/doc_examples/fe54f3e53dbe7dee40ec3108a461d19a.asciidoc b/docs/doc_examples/fe54f3e53dbe7dee40ec3108a461d19a.asciidoc new file mode 100644 index 000000000..c879695f1 --- /dev/null +++ b/docs/doc_examples/fe54f3e53dbe7dee40ec3108a461d19a.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.putRoleMapping({ + name: "jwt_user1", + refresh: "true", + roles: ["jwt_role1"], + rules: { + all: [ + { + field: { + "realm.name": "jwt2", + }, + }, + { + field: { + username: "user2", + }, + }, + ], + }, + enabled: true, + metadata: { + version: 1, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fe5763d32955e8b65eb3048e97b1580c.asciidoc b/docs/doc_examples/fe5763d32955e8b65eb3048e97b1580c.asciidoc deleted file mode 100644 index aee2245fc..000000000 --- a/docs/doc_examples/fe5763d32955e8b65eb3048e97b1580c.asciidoc +++ /dev/null @@ -1,12 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.indices.forcemerge({ - index: 'twitter', - max_num_segments: '5' -}) -console.log(response) ----- - diff --git a/docs/doc_examples/fe6429d0d82174aa5acf95e96e237380.asciidoc b/docs/doc_examples/fe6429d0d82174aa5acf95e96e237380.asciidoc new file mode 100644 index 000000000..0e51c7393 --- /dev/null +++ b/docs/doc_examples/fe6429d0d82174aa5acf95e96e237380.asciidoc @@ -0,0 +1,41 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "idx", + settings: { + index: { + mapping: { + source: { + mode: "synthetic", + }, + }, + }, + }, + mappings: { + properties: { + my_range: { + type: "ip_range", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "idx", + id: 1, + document: { + my_range: [ + "10.0.0.0/24", + { + gte: "10.0.0.0", + lte: "10.0.0.255", + }, + ], + }, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/fe6e35839f7d7381f8ec535c8f21959b.asciidoc b/docs/doc_examples/fe6e35839f7d7381f8ec535c8f21959b.asciidoc new file mode 100644 index 000000000..cd8c15f98 --- /dev/null +++ b/docs/doc_examples/fe6e35839f7d7381f8ec535c8f21959b.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "index", + query: { + script_score: { + query: { + match: { + body: "elasticsearch", + }, + }, + script: { + source: "_score * saturation(doc['pagerank'].value, 10)", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fe7169bab8e626f582c9ea87585d0f35.asciidoc b/docs/doc_examples/fe7169bab8e626f582c9ea87585d0f35.asciidoc new file mode 100644 index 000000000..4a08144cf --- /dev/null +++ b/docs/doc_examples/fe7169bab8e626f582c9ea87585d0f35.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + my_histogram: { + type: "histogram", + }, + my_text: { + type: "keyword", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fe806011466e7cdc1590da186297edb6.asciidoc b/docs/doc_examples/fe806011466e7cdc1590da186297edb6.asciidoc new file mode 100644 index 000000000..d7d6148cc --- /dev/null +++ b/docs/doc_examples/fe806011466e7cdc1590da186297edb6.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "", +}); +console.log(response); +---- diff --git a/docs/doc_examples/fe825c05e13e8163073166572c7ac97d.asciidoc b/docs/doc_examples/fe825c05e13e8163073166572c7ac97d.asciidoc new file mode 100644 index 000000000..47072cba1 --- /dev/null +++ b/docs/doc_examples/fe825c05e13e8163073166572c7ac97d.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "geocells", + id: 1, + pipeline: "geohex2shape", + document: { + geocell: "811fbffffffffff", + }, +}); +console.log(response); + +const response1 = await client.get({ + index: "geocells", + id: 1, +}); +console.log(response1); +---- diff --git a/docs/doc_examples/fe8c3e2632f5057bfbd1898a8fe4d0d2.asciidoc b/docs/doc_examples/fe8c3e2632f5057bfbd1898a8fe4d0d2.asciidoc new file mode 100644 index 000000000..9c1b8804e --- /dev/null +++ b/docs/doc_examples/fe8c3e2632f5057bfbd1898a8fe4d0d2.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.searchApplication.put({ + name: "my_search_application", + search_application: { + indices: ["index1", "index2"], + template: { + script: { + lang: "mustache", + source: + '\n {\n "query": {\n "multi_match": {\n "query": "{{query_string}}",\n "fields": [{{#text_fields}}"{{name}}^{{boost}}",{{/text_fields}}]\n }\n },\n "explain": "{{explain}}",\n "from": "{{from}}",\n "size": "{{size}}"\n }\n ', + params: { + query_string: "*", + text_fields: [ + { + name: "title", + boost: 10, + }, + { + name: "description", + boost: 5, + }, + ], + explain: false, + from: 0, + size: 10, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fe96ca3b2a559d8411aca7ed5f3854bd.asciidoc b/docs/doc_examples/fe96ca3b2a559d8411aca7ed5f3854bd.asciidoc new file mode 100644 index 000000000..a8701212d --- /dev/null +++ b/docs/doc_examples/fe96ca3b2a559d8411aca7ed5f3854bd.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.getSettings({ + index: "my-index-000001", + flat_settings: "true", +}); +console.log(response); +---- diff --git a/docs/doc_examples/febb71d774e0a1fc67454213d7448c53.asciidoc b/docs/doc_examples/febb71d774e0a1fc67454213d7448c53.asciidoc new file mode 100644 index 000000000..791a84783 --- /dev/null +++ b/docs/doc_examples/febb71d774e0a1fc67454213d7448c53.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.update({ + index: "my-index-000001", + id: 1, + script: "ctx._source.remove('new_field')", +}); +console.log(response); +---- diff --git a/docs/doc_examples/fece7c0fe1f7d113aa05ff5346a18aff.asciidoc b/docs/doc_examples/fece7c0fe1f7d113aa05ff5346a18aff.asciidoc new file mode 100644 index 000000000..a820ab4eb --- /dev/null +++ b/docs/doc_examples/fece7c0fe1f7d113aa05ff5346a18aff.asciidoc @@ -0,0 +1,43 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.bulk({ + index: "my-data-stream", + refresh: "true", + operations: [ + { + create: {}, + }, + { + "@timestamp": "2099-03-08T11:04:05.000Z", + user: { + id: "vlb44hny", + }, + message: "Login attempt failed", + }, + { + create: {}, + }, + { + "@timestamp": "2099-03-08T11:06:07.000Z", + user: { + id: "8a4f500d", + }, + message: "Login successful", + }, + { + create: {}, + }, + { + "@timestamp": "2099-03-09T11:07:08.000Z", + user: { + id: "l7gk7f82", + }, + message: "Logout successful", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/feda4b996ea7004f8b2c5f5007fb717b.asciidoc b/docs/doc_examples/feda4b996ea7004f8b2c5f5007fb717b.asciidoc new file mode 100644 index 000000000..550ae710f --- /dev/null +++ b/docs/doc_examples/feda4b996ea7004f8b2c5f5007fb717b.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.putPipeline({ + id: "networks_lookup", + processors: [ + { + enrich: { + description: "Add 'network' data based on 'ip'", + policy_name: "networks-policy", + field: "ip", + target_field: "network", + max_matches: "10", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/feefeb68144002fd1fff57b77b95b85e.asciidoc b/docs/doc_examples/feefeb68144002fd1fff57b77b95b85e.asciidoc deleted file mode 100644 index 6c9393e31..000000000 --- a/docs/doc_examples/feefeb68144002fd1fff57b77b95b85e.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// Use `node scripts/generate-docs-examples.js` to generate the docs examples - -[source, js] ----- -const response = await client.search({ - index: 'bank', - body: { - size: 0, - aggs: { - group_by_state: { - terms: { - field: 'state.keyword' - } - } - } - } -}) -console.log(response) ----- - diff --git a/docs/doc_examples/fef520cbc9b0656e6aac7b3dd3da9984.asciidoc b/docs/doc_examples/fef520cbc9b0656e6aac7b3dd3da9984.asciidoc new file mode 100644 index 000000000..39457fccc --- /dev/null +++ b/docs/doc_examples/fef520cbc9b0656e6aac7b3dd3da9984.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.eql.search({ + index: "my-index*", + query: + "\n sample by host\n [any where uptime > 0] by os\n [any where port > 100] by op_sys\n [any where bool == true] by os\n ", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ff05842419968a2141bde0371ac2f6f4.asciidoc b/docs/doc_examples/ff05842419968a2141bde0371ac2f6f4.asciidoc new file mode 100644 index 000000000..f64626281 --- /dev/null +++ b/docs/doc_examples/ff05842419968a2141bde0371ac2f6f4.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.renderSearchTemplate({ + source: { + query: { + match: { + "user.group.emails": "{{#join}}emails{{/join}}", + }, + }, + }, + params: { + emails: ["user1@example.com", "user_one@example.com"], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ff09e13391cecb2e8b9dd440b37e065f.asciidoc b/docs/doc_examples/ff09e13391cecb2e8b9dd440b37e065f.asciidoc new file mode 100644 index 000000000..c0b5cd35e --- /dev/null +++ b/docs/doc_examples/ff09e13391cecb2e8b9dd440b37e065f.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-new-index-000001", + size: 0, + filter_path: "hits.total", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ff1b96d2fdcf628bd938bff9e939943c.asciidoc b/docs/doc_examples/ff1b96d2fdcf628bd938bff9e939943c.asciidoc new file mode 100644 index 000000000..1357bcc36 --- /dev/null +++ b/docs/doc_examples/ff1b96d2fdcf628bd938bff9e939943c.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "my-index-000001", + mappings: { + properties: { + timestamp: { + type: "date", + }, + temperature: { + type: "long", + }, + voltage: { + type: "double", + }, + node: { + type: "keyword", + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ff27e5cddd1f58d8a8f84f807fd27eec.asciidoc b/docs/doc_examples/ff27e5cddd1f58d8a8f84f807fd27eec.asciidoc new file mode 100644 index 000000000..08c23aa74 --- /dev/null +++ b/docs/doc_examples/ff27e5cddd1f58d8a8f84f807fd27eec.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ingest.simulate({ + pipeline: { + processors: [ + { + redact: { + field: "message", + patterns: ["%{GITHUB_NAME:GITHUB_NAME}"], + pattern_definitions: { + GITHUB_NAME: "@%{USERNAME}", + }, + }, + }, + ], + }, + docs: [ + { + _source: { + message: "@elastic-data-management the PR is ready for review", + }, + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/ff56ded50c65998c70f3c5691ddc6f86.asciidoc b/docs/doc_examples/ff56ded50c65998c70f3c5691ddc6f86.asciidoc new file mode 100644 index 000000000..0db142db9 --- /dev/null +++ b/docs/doc_examples/ff56ded50c65998c70f3c5691ddc6f86.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.snapshot.deleteRepository({ + name: "my_repository", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ff63ae39c34925dbfa54282ec9989124.asciidoc b/docs/doc_examples/ff63ae39c34925dbfa54282ec9989124.asciidoc new file mode 100644 index 000000000..6eec21a63 --- /dev/null +++ b/docs/doc_examples/ff63ae39c34925dbfa54282ec9989124.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.reindex({ + source: { + remote: { + host: "http://otherhost:9200/", + headers: { + Authorization: "ApiKey API_KEY_VALUE", + }, + }, + index: "my-index-000001", + query: { + match: { + test: "data", + }, + }, + }, + dest: { + index: "my-new-index-000001", + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ff776c0fccf93e1c7050f7cb7efbae0b.asciidoc b/docs/doc_examples/ff776c0fccf93e1c7050f7cb7efbae0b.asciidoc new file mode 100644 index 000000000..89bd3a53c --- /dev/null +++ b/docs/doc_examples/ff776c0fccf93e1c7050f7cb7efbae0b.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.ml.inferTrainedModel({ + model_id: "model2", + docs: [ + { + text_field: "Hi my name is Josh and I live in Berlin", + }, + ], +}); +console.log(response); +---- diff --git a/docs/doc_examples/ff7b81fa96c3b994efa3dee230512291.asciidoc b/docs/doc_examples/ff7b81fa96c3b994efa3dee230512291.asciidoc new file mode 100644 index 000000000..0fdc9278b --- /dev/null +++ b/docs/doc_examples/ff7b81fa96c3b994efa3dee230512291.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.graph.explore({ + index: "clicklogs", + query: { + match: { + "query.raw": "midi", + }, + }, + vertices: [ + { + field: "product", + }, + ], + connections: { + vertices: [ + { + field: "query.raw", + }, + ], + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/ff945f5db7d8a9b0d9f6a2f2fcf849e3.asciidoc b/docs/doc_examples/ff945f5db7d8a9b0d9f6a2f2fcf849e3.asciidoc new file mode 100644 index 000000000..e8bf6e810 --- /dev/null +++ b/docs/doc_examples/ff945f5db7d8a9b0d9f6a2f2fcf849e3.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.index({ + index: "index_1", + id: 1, + document: { + text: "Document in index 1", + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "index_2", + id: 2, + refresh: "true", + document: { + text: "Document in index 2", + }, +}); +console.log(response1); + +const response2 = await client.search({ + index: "index_1,index_2", + query: { + terms: { + _tier: ["data_hot", "data_warm"], + }, + }, +}); +console.log(response2); +---- diff --git a/docs/doc_examples/ffcf80e1094aa2d774f56f6b0bc54827.asciidoc b/docs/doc_examples/ffcf80e1094aa2d774f56f6b0bc54827.asciidoc new file mode 100644 index 000000000..785b77d90 --- /dev/null +++ b/docs/doc_examples/ffcf80e1094aa2d774f56f6b0bc54827.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.analyze({ + tokenizer: "keyword", + filter: ["word_delimiter_graph"], + text: "Neil's-Super-Duper-XL500--42+AutoCoder", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ffd63dd186ab81b893faec3b3358fa09.asciidoc b/docs/doc_examples/ffd63dd186ab81b893faec3b3358fa09.asciidoc new file mode 100644 index 000000000..79abaeb97 --- /dev/null +++ b/docs/doc_examples/ffd63dd186ab81b893faec3b3358fa09.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.security.deleteUser({ + username: "jacknich", +}); +console.log(response); +---- diff --git a/docs/doc_examples/ffda10edaa7ce087703193c3cb95a426.asciidoc b/docs/doc_examples/ffda10edaa7ce087703193c3cb95a426.asciidoc new file mode 100644 index 000000000..0ccb2c77a --- /dev/null +++ b/docs/doc_examples/ffda10edaa7ce087703193c3cb95a426.asciidoc @@ -0,0 +1,108 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.indices.create({ + index: "retrievers_example", + settings: { + number_of_shards: 1, + }, + mappings: { + properties: { + vector: { + type: "dense_vector", + dims: 3, + similarity: "l2_norm", + index: true, + index_options: { + type: "flat", + }, + }, + text: { + type: "text", + }, + year: { + type: "integer", + }, + topic: { + type: "keyword", + }, + timestamp: { + type: "date", + }, + }, + }, +}); +console.log(response); + +const response1 = await client.index({ + index: "retrievers_example", + id: 1, + document: { + vector: [0.23, 0.67, 0.89], + text: "Large language models are revolutionizing information retrieval by boosting search precision, deepening contextual understanding, and reshaping user experiences in data-rich environments.", + year: 2024, + topic: ["llm", "ai", "information_retrieval"], + timestamp: "2021-01-01T12:10:30", + }, +}); +console.log(response1); + +const response2 = await client.index({ + index: "retrievers_example", + id: 2, + document: { + vector: [0.12, 0.56, 0.78], + text: "Artificial intelligence is transforming medicine, from advancing diagnostics and tailoring treatment plans to empowering predictive patient care for improved health outcomes.", + year: 2023, + topic: ["ai", "medicine"], + timestamp: "2022-01-01T12:10:30", + }, +}); +console.log(response2); + +const response3 = await client.index({ + index: "retrievers_example", + id: 3, + document: { + vector: [0.45, 0.32, 0.91], + text: "AI is redefining security by enabling advanced threat detection, proactive risk analysis, and dynamic defenses against increasingly sophisticated cyber threats.", + year: 2024, + topic: ["ai", "security"], + timestamp: "2023-01-01T12:10:30", + }, +}); +console.log(response3); + +const response4 = await client.index({ + index: "retrievers_example", + id: 4, + document: { + vector: [0.34, 0.21, 0.98], + text: "Elastic introduces Elastic AI Assistant, the open, generative AI sidekick powered by ESRE to democratize cybersecurity and enable users of every skill level.", + year: 2023, + topic: ["ai", "elastic", "assistant"], + timestamp: "2024-01-01T12:10:30", + }, +}); +console.log(response4); + +const response5 = await client.index({ + index: "retrievers_example", + id: 5, + document: { + vector: [0.11, 0.65, 0.47], + text: "Learn how to spin up a deployment of our hosted Elasticsearch Service and use Elastic Observability to gain deeper insight into the behavior of your applications and systems.", + year: 2024, + topic: ["documentation", "observability", "elastic"], + timestamp: "2025-01-01T12:10:30", + }, +}); +console.log(response5); + +const response6 = await client.indices.refresh({ + index: "retrievers_example", +}); +console.log(response6); +---- diff --git a/docs/doc_examples/ffe45a7c70071730c2078cabb8cbdf95.asciidoc b/docs/doc_examples/ffe45a7c70071730c2078cabb8cbdf95.asciidoc new file mode 100644 index 000000000..876bb701d --- /dev/null +++ b/docs/doc_examples/ffe45a7c70071730c2078cabb8cbdf95.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.search({ + index: "my-index-000001", + query: { + script_score: { + query: { + bool: { + filter: { + term: { + status: "published", + }, + }, + }, + }, + script: { + source: + "\n float[] v = doc['my_dense_vector'].vectorValue;\n float vm = doc['my_dense_vector'].magnitude;\n float dotProduct = 0;\n for (int i = 0; i < v.length; i++) {\n dotProduct += v[i] * params.queryVector[i];\n }\n return dotProduct / (vm * (float) params.queryVectorMag);\n ", + params: { + queryVector: [4, 3.4, -0.2], + queryVectorMag: 5.25357, + }, + }, + }, + }, +}); +console.log(response); +---- diff --git a/docs/doc_examples/fff86117c47f974074284644e8a97a99.asciidoc b/docs/doc_examples/fff86117c47f974074284644e8a97a99.asciidoc new file mode 100644 index 000000000..339435f69 --- /dev/null +++ b/docs/doc_examples/fff86117c47f974074284644e8a97a99.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// Use `node scripts/generate-docs-examples.js` to generate the docs examples + +[source, js] +---- +const response = await client.inference.put({ + task_type: "text_embedding", + inference_id: "jinaai-embeddings", + inference_config: { + service: "jinaai", + service_settings: { + model_id: "jina-embeddings-v3", + api_key: "", + }, + }, +}); +console.log(response); +---- diff --git a/docs/docset.yml b/docs/docset.yml new file mode 100644 index 000000000..28407d2fa --- /dev/null +++ b/docs/docset.yml @@ -0,0 +1,15 @@ +project: 'Node.js client' +products: + - id: elasticsearch-client +exclude: + - examples/proxy/README.md +cross_links: + - docs-content + - elasticsearch + - elastic-otel-node +toc: + - toc: reference + - toc: release-notes +subs: + stack: "Elastic Stack" + es: "Elasticsearch" diff --git a/docs/examples/asStream.asciidoc b/docs/examples/asStream.asciidoc deleted file mode 100644 index df66744e0..000000000 --- a/docs/examples/asStream.asciidoc +++ /dev/null @@ -1,104 +0,0 @@ -[[as_stream_examples]] -== asStream - -Instead of getting the parsed body back, you will get the raw Node.js stream of -data. - -[source,js] ----- -'use strict' - -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) - -async function run () { - const { body: bulkResponse } = await client.bulk({ - refresh: true, - body: [ - // operation to perform - { index: { _index: 'game-of-thrones' } }, - // the document to index - { - character: 'Ned Stark', - quote: 'Winter is coming.' - }, - - { index: { _index: 'game-of-thrones' } }, - { - character: 'Daenerys Targaryen', - quote: 'I am the blood of the dragon.' - }, - - { index: { _index: 'game-of-thrones' } }, - { - character: 'Tyrion Lannister', - quote: 'A mind needs books like a sword needs a whetstone.' - } - ] - }) - - if (bulkResponse.errors) { - console.log(bulkResponse) - process.exit(1) - } - - // Let's search! - const { body } = await client.search({ - index: 'game-of-thrones', - body: { - query: { - match: { - quote: 'winter' - } - } - } - }, { - asStream: true - }) - - // stream async iteration, available in Node.js ≥ 10 - var payload = '' - body.setEncoding('utf8') - for await (const chunk of body) { - payload += chunk - } - console.log(JSON.parse(payload)) - - // classic stream callback style - var payload = '' - body.setEncoding('utf8') - body.on('data', chunk => { payload += chunk }) - body.on('error', console.log) - body.on('end', () => { - console.log(JSON.parse(payload)) - }) -} - -run().catch(console.log) ----- - -TIP: This can be useful if you need to pipe the {es}'s response to a proxy, or -send it directly to another source. - -[source,js] ----- -'use strict' - -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) -const fastify = require('fastify')() - -fastify.post('/search/:index', async (req, reply) => { - const { body, statusCode, headers } = await client.search({ - index: req.params.index, - body: req.body - }, { - asStream: true - }) - - reply.code(statusCode).headers(headers) - return body -}) - -fastify.listen(3000) ----- \ No newline at end of file diff --git a/docs/examples/bulk.asciidoc b/docs/examples/bulk.asciidoc deleted file mode 100644 index 0f8b6e206..000000000 --- a/docs/examples/bulk.asciidoc +++ /dev/null @@ -1,92 +0,0 @@ -[[bulk_examples]] -== Bulk - -The `bulk` API makes it possible to perform many index/delete operations in a -single API call. This can greatly increase the indexing speed. - -NOTE: Did you know that we provide an helper for sending bulk request? You can find it {jsclient}/client-helpers.html[here]. - -[source,js] ----- -'use strict' - -require('array.prototype.flatmap').shim() -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ - node: 'http://localhost:9200/' -}) - -async function run () { - await client.indices.create({ - index: 'tweets', - body: { - mappings: { - properties: { - id: { type: 'integer' }, - text: { type: 'text' }, - user: { type: 'keyword' }, - time: { type: 'date' } - } - } - } - }, { ignore: [400] }) - - const dataset = [{ - id: 1, - text: 'If I fall, don\'t bring me back.', - user: 'jon', - date: new Date() - }, { - id: 2, - text: 'Witer is coming', - user: 'ned', - date: new Date() - }, { - id: 3, - text: 'A Lannister always pays his debts.', - user: 'tyrion', - date: new Date() - }, { - id: 4, - text: 'I am the blood of the dragon.', - user: 'daenerys', - date: new Date() - }, { - id: 5, // change this value to a string to see the bulk response with errors - text: 'A girl is Arya Stark of Winterfell. And I\'m going home.', - user: 'arya', - date: new Date() - }] - - const body = dataset.flatMap(doc => [{ index: { _index: 'tweets' } }, doc]) - - const { body: bulkResponse } = await client.bulk({ refresh: true, body }) - - if (bulkResponse.errors) { - const erroredDocuments = [] - // The items array has the same order of the dataset we just indexed. - // The presence of the `error` key indicates that the operation - // that we did for the document has failed. - bulkResponse.items.forEach((action, i) => { - const operation = Object.keys(action)[0] - if (action[operation].error) { - erroredDocuments.push({ - // If the status is 429 it means that you can retry the document, - // otherwise it's very likely a mapping error, and you should - // fix the document before to try it again. - status: action[operation].status, - error: action[operation].error, - operation: body[i * 2], - document: body[i * 2 + 1] - }) - } - }) - console.log(erroredDocuments) - } - - const { body: count } = await client.count({ index: 'tweets' }) - console.log(count) -} - -run().catch(console.log) ----- diff --git a/docs/examples/exists.asciidoc b/docs/examples/exists.asciidoc deleted file mode 100644 index dc8b42871..000000000 --- a/docs/examples/exists.asciidoc +++ /dev/null @@ -1,34 +0,0 @@ -[[exists_examples]] -== Exists - -Check that the document `/game-of-thrones/1` exists. - -NOTE: Since this API uses the `HEAD` method, the body value will be boolean. - -[source,js] ---------- -'use strict' - -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) - -async function run () { - await client.index({ - index: 'game-of-thrones', - id: '1', - body: { - character: 'Ned Stark', - quote: 'Winter is coming.' - } - }) - - const { body } = await client.exists({ - index: 'game-of-thrones', - id: 1 - }) - - console.log(body) // true -} - -run().catch(console.log) ---------- \ No newline at end of file diff --git a/docs/examples/get.asciidoc b/docs/examples/get.asciidoc deleted file mode 100644 index 3fa0fd720..000000000 --- a/docs/examples/get.asciidoc +++ /dev/null @@ -1,34 +0,0 @@ -[[get_examples]] -== Get - -The get API allows to get a typed JSON document from the index based on its id. -The following example gets a JSON document from an index called -`game-of-thrones`, under a type called `_doc`, with id valued `'1'`. - -[source,js] ---------- -'use strict' - -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) - -async function run () { - await client.index({ - index: 'game-of-thrones', - id: '1', - body: { - character: 'Ned Stark', - quote: 'Winter is coming.' - } - }) - - const { body } = await client.get({ - index: 'game-of-thrones', - id: '1' - }) - - console.log(body) -} - -run().catch(console.log) ---------- \ No newline at end of file diff --git a/docs/examples/ignore.asciidoc b/docs/examples/ignore.asciidoc deleted file mode 100644 index 8b8ec6b34..000000000 --- a/docs/examples/ignore.asciidoc +++ /dev/null @@ -1,62 +0,0 @@ -[[ignore_examples]] -== Ignore - -HTTP status codes which should not be considered errors for this request. - -[source,js] ----- -'use strict' - -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) - -async function run () { - const { body: bulkResponse } = await client.bulk({ - refresh: true, - body: [ - // operation to perform - { index: { _index: 'game-of-thrones' } }, - // the document to index - { - character: 'Ned Stark', - quote: 'Winter is coming.' - }, - - { index: { _index: 'game-of-thrones' } }, - { - character: 'Daenerys Targaryen', - quote: 'I am the blood of the dragon.' - }, - - { index: { _index: 'game-of-thrones' } }, - { - character: 'Tyrion Lannister', - quote: 'A mind needs books like a sword needs a whetstone.' - } - ] - }) - - if (bulkResponse.errors) { - console.log(bulkResponse) - process.exit(1) - } - - // Let's search! - const { body } = await client.search({ - index: 'game-of-thrones', - body: { - query: { - match: { - quote: 'fire' - } - } - } - }, { - ignore: [404] - }) - - console.log(body) // ResponseError -} - -run().catch(console.log) ----- \ No newline at end of file diff --git a/docs/examples/index.asciidoc b/docs/examples/index.asciidoc deleted file mode 100644 index fdc60a660..000000000 --- a/docs/examples/index.asciidoc +++ /dev/null @@ -1,36 +0,0 @@ -[[examples]] -= Examples - -Following you can find some examples on how to use the client. - -* Use of the <> parameter; -* Executing a <> request; -* Executing a <> request; -* Executing a <> request; -* Executing a <> request; -* Executing a <> request; -* Executing a <> request; -* Executing a <> request; -* Use of the <> parameter; -* Executing a <> request; -* How do I <>? -* Executing a <> request; -* I need <>; -* How to use the <> method; -* How to use <>; - -include::asStream.asciidoc[] -include::bulk.asciidoc[] -include::exists.asciidoc[] -include::get.asciidoc[] -include::ignore.asciidoc[] -include::msearch.asciidoc[] -include::scroll.asciidoc[] -include::search.asciidoc[] -include::suggest.asciidoc[] -include::transport.request.asciidoc[] -include::typescript.asciidoc[] -include::sql.query.asciidoc[] -include::update.asciidoc[] -include::update_by_query.asciidoc[] -include::reindex.asciidoc[] diff --git a/docs/examples/msearch.asciidoc b/docs/examples/msearch.asciidoc deleted file mode 100644 index 49fadd1ea..000000000 --- a/docs/examples/msearch.asciidoc +++ /dev/null @@ -1,57 +0,0 @@ -[[msearch_examples]] -== MSearch - -The multi search API allows to execute several search requests within the same -API. - -[source,js] ----- -'use strict' - -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) - -async function run () { - const { body: bulkResponse } = await client.bulk({ - refresh: true, - body: [ - { index: { _index: 'game-of-thrones' } }, - { - character: 'Ned Stark', - quote: 'Winter is coming.' - }, - - { index: { _index: 'game-of-thrones' } }, - { - character: 'Daenerys Targaryen', - quote: 'I am the blood of the dragon.' - }, - - { index: { _index: 'game-of-thrones' } }, - { - character: 'Tyrion Lannister', - quote: 'A mind needs books like a sword needs a whetstone.' - } - ] - }) - - if (bulkResponse.errors) { - console.log(bulkResponse) - process.exit(1) - } - - const { body } = await client.msearch({ - body: [ - { index: 'game-of-thrones' }, - { query: { match: { character: 'Daenerys' } } }, - - { index: 'game-of-thrones' }, - { query: { match: { character: 'Tyrion' } } } - ] - }) - - console.log(body.responses) -} - -run().catch(console.log) ----- \ No newline at end of file diff --git a/docs/examples/proxy/.gitignore b/docs/examples/proxy/.gitignore new file mode 100644 index 000000000..12536433a --- /dev/null +++ b/docs/examples/proxy/.gitignore @@ -0,0 +1,51 @@ +# Logs +logs +*.log +npm-debug.log* + +# Runtime data +pids +*.pid +*.seed + +# Directory for instrumented libs generated by jscoverage/JSCover +lib-cov + +# Coverage directory used by tools like istanbul +coverage + +# coverage output +coverage.lcov + +# nyc test coverage +.nyc_output + +# Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files) +.grunt + +# node-waf configuration +.lock-wscript + +# Compiled binary addons (http://nodejs.org/api/addons.html) +build/Release + +# Dependency directories +node_modules +jspm_packages + +# Optional npm cache directory +.npm + +# Optional REPL history +.node_repl_history + +# mac files +.DS_Store + +# vim swap files +*.swp + +#Jetbrains editor folder +.idea + +.vercel \ No newline at end of file diff --git a/docs/examples/proxy/README.md b/docs/examples/proxy/README.md new file mode 100644 index 000000000..23e835aae --- /dev/null +++ b/docs/examples/proxy/README.md @@ -0,0 +1,65 @@ +# Elasticsearch proxy example + +This folder contains an example of how to build a lightweight proxy +between your frontend code and Elasticsearch if you don't +have a more sophisticated backend in place yet. + +> **IMPORTANT:** This is not a production ready code and it is only for demonstration purposes, +> we make no guarantees on it's security and stability. + +This project is designed to be deployed on [Vercel](https://vercel.com/), a cloud platform +for static sites and Serverless Functions. You can use other functions providers, +such as [Google Cloud functions](https://cloud.google.com/functions). + +## Project structure + +The project comes with four endpoints: + +- `/api/search`: runs a search, requires `'read'` permission +- `/api/autocomplete`: runs an autocomplete suggestion, requires `'read'` permission +- `/api/index`: indexes or updates a document, requires `'write'` permission +- `/api/delete`: deletes a document, requires `'write'` permission + +Inside `utils/authorize.js` you can find the authorization logic for the endpoints. +In each endpoint you should configure the `INDEX` variable. + +## How to use + +Create an account on Vercel, then create a deployment on Elastic Cloud. If you +don't have an account on Elastic Cloud, you can create one with a free 14-day trial +of the [Elasticsearch Service](https://www.elastic.co/elasticsearch/service). + +### Configure Elasticsearch + +Once you have created a deployment on Elastic Cloud copy the generated Cloud Id and the credentials. +Then open `utils/prepare-elasticsearch.js` and fill your credentials. The script generates +an [Api Key](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-api-key.html) +that you can use for authenticating your request. Based on the configuration of the Api Key, you will be able +to perform different operation on the specified indices or index pattern. + +### Configure Vercel + +Install the [Vercel CLI](https://vercel.com/docs/cli) to bootstrap the project, +or read the [quickstart](https://vercel.com/docs) documentation. + +If you are using the CLI, bootstrap the project by running `vercel`. Test the project locally +with `vercel dev`, and deploy it with `vercel deploy`. +Configure the `ELASTIC_CLOUD_ID` [environment varible](https://vercel.com/docs/environment-variables) as well. +The Api Key is passed from the frontend app via a `Authorization` header as `Bearer` token and is +used to authorize the API calls to the endpoints as well. +Additional configuration, such as CORS, can be added to [`vercel.json`](https://vercel.com/docs/configuration). + +## Authentication + +If you are using Elasticsearch only for search purposes, such as a search box, you can create +an Api Key with `read` permissions and store it in your frontend app. Then you can send it +via `Authorization` header to the proxy and run your searches. + +If you need to ingest data as well, it's more secure to have a strong authentication in your application. +For such cases, use an external authentication service, such as [Auth0](https://auth0.com/) +or [Magic Link](https://magic.link/). Then create a different Api Key with `read` and `write` +permissions for authenticated users, that will not be stored in the frontend app. + +## License + +This software is licensed under the [Apache 2 license](../../LICENSE). diff --git a/docs/examples/proxy/api/autocomplete.js b/docs/examples/proxy/api/autocomplete.js new file mode 100644 index 000000000..fdd70e11f --- /dev/null +++ b/docs/examples/proxy/api/autocomplete.js @@ -0,0 +1,89 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +// IMPORTANT: this is not a production ready code & purely for demonstration purposes, +// we make no guarantees on it's security and stability + +// NOTE: to make this endpoint work, you should create an ApiKey with 'read' permissions + +'use strict' + +const { Client } = require('@elastic/elasticsearch') +const authorize = require('../utils/authorize') + +const INDEX = '' +const client = new Client({ + cloud: { + id: process.env.ELASTIC_CLOUD_ID + } +}) + +module.exports = async (req, res) => { + const [err, token] = authorize(req) + if (err) { + res.status(401) + res.json(err) + return + } + + if (typeof req.query.q !== 'string') { + res.status(400) + res.json({ + error: 'Bad Request', + message: 'Missing parameter "query.q"', + statusCode: 400 + }) + return + } + + if (req.query.q.length < 3) { + res.status(400) + res.json({ + error: 'Bad Request', + message: 'The length of "query.q" should be at least 3', + statusCode: 400 + }) + return + } + + try { + const response = await client.search({ + index: INDEX, + // You could directly send from the browser + // the Elasticsearch's query DSL, but it will + // expose you to the risk that a malicious user + // could overload your cluster by crafting + // expensive queries. + _source: ['id', 'url', 'name'], // the fields you want to show in the autocompletion + size: 0, + // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-suggesters-completion.html + suggest: { + suggestions: { + prefix: req.query.q, + completion: { + field: 'suggest', + size: 5 + } + } + } + }, { + headers: { + Authorization: `ApiKey ${token}` + } + }) + + // It might be useful to configure http control caching headers + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control + // res.setHeader('stale-while-revalidate', '30') + res.json(response) + } catch (err) { + res.status(err.statusCode || 500) + res.json({ + error: err.name, + message: err.message, + statusCode: err.statusCode || 500 + }) + } +} diff --git a/docs/examples/proxy/api/delete.js b/docs/examples/proxy/api/delete.js new file mode 100644 index 000000000..66de08635 --- /dev/null +++ b/docs/examples/proxy/api/delete.js @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +// IMPORTANT: this is not a production ready code & purely for demonstration purposes, +// we make no guarantees on it's security and stability + +// NOTE: to make this endpoint work, you should create an ApiKey with 'write' permissions + +'use strict' + +const { Client } = require('@elastic/elasticsearch') +const authorize = require('../utils/authorize') + +const INDEX = '' +const client = new Client({ + cloud: { + id: process.env.ELASTIC_CLOUD_ID + } +}) + +module.exports = async (req, res) => { + const [err, token] = authorize(req) + if (err) { + res.status(401) + res.json(err) + return + } + + if (typeof req.query.id !== 'string' && req.query.id.length === 0) { + res.status(400) + res.json({ + error: 'Bad Request', + message: 'Missing document id', + statusCode: 400 + }) + return + } + + try { + const response = await client.delete({ + index: INDEX, + id: req.query.id + }, { + headers: { + Authorization: `ApiKey ${token}` + } + }) + + res.json(response) + } catch (err) { + res.status(err.statusCode || 500) + res.json({ + error: err.name, + message: err.message, + statusCode: err.statusCode || 500 + }) + } +} diff --git a/docs/examples/proxy/api/index.js b/docs/examples/proxy/api/index.js new file mode 100644 index 000000000..446ba6757 --- /dev/null +++ b/docs/examples/proxy/api/index.js @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +// IMPORTANT: this is not a production ready code & purely for demonstration purposes, +// we make no guarantees on it's security and stability + +// NOTE: to make this endpoint work, you should create an ApiKey with 'write' permissions + +'use strict' + +const { Client } = require('@elastic/elasticsearch') +const authorize = require('../utils/authorize') + +const INDEX = '' +const client = new Client({ + cloud: { + id: process.env.ELASTIC_CLOUD_ID + } +}) + +module.exports = async (req, res) => { + const [err, token] = authorize(req) + if (err) { + res.status(401) + res.json(err) + return + } + + if (typeof req.body !== 'object') { + res.status(400) + res.json({ + error: 'Bad Request', + message: 'The document should be an object', + statusCode: 400 + }) + return + } + + try { + const response = await client.index({ + index: INDEX, + id: req.query.id, + document: req.body + }, { + headers: { + Authorization: `ApiKey ${token}` + }, + meta: true + }) + + res.status(response.statusCode) + res.json(response.body) + } catch (err) { + res.status(err.statusCode || 500) + res.json({ + error: err.name, + message: err.message, + statusCode: err.statusCode || 500 + }) + } +} diff --git a/docs/examples/proxy/api/search.js b/docs/examples/proxy/api/search.js new file mode 100644 index 000000000..116ef0676 --- /dev/null +++ b/docs/examples/proxy/api/search.js @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +// IMPORTANT: this is not a production ready code & purely for demonstration purposes, +// we make no guarantees on it's security and stability + +// NOTE: to make this endpoint work, you should create an ApiKey with 'read' permissions + +'use strict' + +const { Client } = require('@elastic/elasticsearch') +const authorize = require('../utils/authorize') + +const INDEX = '' +const client = new Client({ + cloud: { + id: process.env.ELASTIC_CLOUD_ID + } +}) + +module.exports = async (req, res) => { + const [err, token] = authorize(req) + if (err) { + res.status(401) + res.json(err) + return + } + + if (typeof req.body.text !== 'string') { + res.status(400) + res.json({ + error: 'Bad Request', + message: 'Missing parameter "body.text"', + statusCode: 400 + }) + return + } + + try { + const response = await client.search({ + index: INDEX, + // You could directly send from the browser + // the Elasticsearch's query DSL, but it will + // expose you to the risk that a malicious user + // could overload your cluster by crafting + // expensive queries. + query: { + match: { field: req.body.text } + } + }, { + headers: { + Authorization: `ApiKey ${token}` + } + }) + + // It might be useful to configure http control caching headers + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control + // res.setHeader('stale-while-revalidate', '30') + res.json(response) + } catch (err) { + res.status(err.statusCode || 500) + res.json({ + error: err.name, + message: err.message, + statusCode: err.statusCode || 500 + }) + } +} diff --git a/docs/examples/proxy/package.json b/docs/examples/proxy/package.json new file mode 100644 index 000000000..cdbeea15e --- /dev/null +++ b/docs/examples/proxy/package.json @@ -0,0 +1,19 @@ +{ + "name": "proxy-example", + "version": "1.0.0", + "private": true, + "description": "", + "main": "index.js", + "scripts": { + "test": "standard" + }, + "keywords": [], + "author": "Elastic Client Library Maintainers", + "license": "Apache-2.0", + "dependencies": { + "@elastic/elasticsearch": "^8.0.0" + }, + "devDependencies": { + "standard": "^16.0.3" + } +} diff --git a/docs/examples/proxy/utils/authorize.js b/docs/examples/proxy/utils/authorize.js new file mode 100644 index 000000000..74370a5ce --- /dev/null +++ b/docs/examples/proxy/utils/authorize.js @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +// IMPORTANT: this is not a production ready code & purely for demonstration purposes, +// we make no guarantees on it's security and stability + +'use strict' + +module.exports = (req) => { + const auth = req.headers.authorization + if (typeof auth !== 'string') { + return [{ + error: 'Unauthorized', + message: 'Missing authorization header', + statusCode: 401 + }, null] + } + + const [type, token] = req.headers.authorization.split(' ') + + if (type !== 'Bearer') { + return [{ + error: 'Unauthorized', + message: 'Bad authorization type', + statusCode: 401 + }, null] + } + + if (token.length === 0) { + return [{ + error: 'Unauthorized', + message: 'Bad authorization token', + statusCode: 401 + }, null] + } + + return [null, token] +} diff --git a/docs/examples/proxy/utils/prepare-elasticsearch.js b/docs/examples/proxy/utils/prepare-elasticsearch.js new file mode 100644 index 000000000..6850aaae4 --- /dev/null +++ b/docs/examples/proxy/utils/prepare-elasticsearch.js @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +'use strict' + +const { Client } = require('@elastic/elasticsearch') + +// Your Cloud Id +const cloudId = '' +// Your admin username +const username = '' +// Your admin password +const password = '' +// The indices or index patterns you will need to access +const indexNames = ['my-index-name-or-pattern'] +// see https://www.elastic.co/guide/en/elasticsearch/reference/current/security-privileges.html#privileges-list-indices +const privileges = ['read'] + +async function generateApiKeys (opts) { + const client = new Client({ + cloud: { + id: cloudId + }, + auth: { + username, + password + } + }) + + const result = await client.security.createApiKey({ + name: 'elasticsearch-proxy', + role_descriptors: { + 'elasticsearch-proxy-users': { + index: [{ + names: indexNames, + privileges + }] + } + } + }) + + return Buffer.from(`${result.id}:${result.api_key}`).toString('base64') +} + +generateApiKeys() + .then(console.log) + .catch(err => { + console.error(err) + process.exit(1) + }) diff --git a/docs/examples/proxy/vercel.json b/docs/examples/proxy/vercel.json new file mode 100644 index 000000000..d1615c120 --- /dev/null +++ b/docs/examples/proxy/vercel.json @@ -0,0 +1,13 @@ +{ + "headers": [ + { + "source": "/api/(.*)", + "headers": [ + { "key": "Access-Control-Allow-Credentials", "value": "true" }, + { "key": "Access-Control-Allow-Origin", "value": "*" }, + { "key": "Access-Control-Allow-Methods", "value": "GET,OPTIONS,PATCH,DELETE,POST,PUT" }, + { "key": "Access-Control-Allow-Headers", "value": "X-CSRF-Token, X-Requested-With, Accept, Accept-Version, Content-Length, Content-MD5, Content-Type, Date, X-Api-Version" } + ] + } + ] +} diff --git a/docs/examples/reindex.asciidoc b/docs/examples/reindex.asciidoc deleted file mode 100644 index c91ac5793..000000000 --- a/docs/examples/reindex.asciidoc +++ /dev/null @@ -1,80 +0,0 @@ -[[reindex_examples]] -== Reindex - -The `reindex` API extracts the document source from the source index and indexes -the documents into the destination index. You can copy all documents to the -destination index, reindex a subset of the documents or update the source before -to reindex it. - -In the following example we have a `game-of-thrones` index which contains -different quotes of various characters, we want to create a new index only for -the house Stark and remove the `house` field from the document source. - -[source,js] ----- -'use strict' - -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) - -async function run () { - await client.index({ - index: 'game-of-thrones', - body: { - character: 'Ned Stark', - quote: 'Winter is coming.', - house: 'stark' - } - }) - - await client.index({ - index: 'game-of-thrones', - body: { - character: 'Arya Stark', - quote: 'A girl is Arya Stark of Winterfell. And I\'m going home.', - house: 'stark' - } - }) - - await client.index({ - index: 'game-of-thrones', - refresh: true, - body: { - character: 'Tyrion Lannister', - quote: 'A Lannister always pays his debts.', - house: 'lannister' - } - }) - - await client.reindex({ - waitForCompletion: true, - refresh: true, - body: { - source: { - index: 'game-of-thrones', - query: { - match: { character: 'stark' } - } - }, - dest: { - index: 'stark-index' - }, - script: { - lang: 'painless', - source: 'ctx._source.remove("house")' - } - } - }) - - const { body } = await client.search({ - index: 'stark-index', - body: { - query: { match_all: {} } - } - }) - - console.log(body.hits.hits) -} - -run().catch(console.log) ----- diff --git a/docs/examples/scroll.asciidoc b/docs/examples/scroll.asciidoc deleted file mode 100644 index 146400e5b..000000000 --- a/docs/examples/scroll.asciidoc +++ /dev/null @@ -1,193 +0,0 @@ -[[scroll_examples]] -== Scroll - -While a search request returns a single “page” of results, the scroll API can be -used to retrieve large numbers of results (or even all results) from a single -search request, in much the same way as you would use a cursor on a traditional -database. - -Scrolling is not intended for real time user requests, but rather for processing -large amounts of data, for example in order to reindex the contents of one index -into a new index with a different configuration. - -NOTE: The results that are returned from a scroll request reflect the state of -the index at the time that the initial search request was made, like a snapshot -in time. Subsequent changes to documents (index, update or delete) will only -affect later search requests. - -In order to use scrolling, the initial search request should specify the scroll -parameter in the query string, which tells {es} how long it should keep the -“search context” alive. - -NOTE: Did you know that we provide an helper for sending scroll requests? You can find it {jsclient}/client-helpers.html[here]. - -[source,js] ----- -'use strict' - -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) - -async function run () { - const allQuotes = [] - const responseQueue = [] - - // Let's index some data! - const { body: bulkResponse } = await client.bulk({ - // here we are forcing an index refresh, - // otherwise we will not get any result - // in the consequent search - refresh: true, - body: [ - // operation to perform - { index: { _index: 'game-of-thrones' } }, - // the document to index - { - character: 'Ned Stark', - quote: 'Winter is coming.' - }, - - { index: { _index: 'game-of-thrones' } }, - { - character: 'Daenerys Targaryen', - quote: 'I am the blood of the dragon.' - }, - - { index: { _index: 'game-of-thrones' } }, - { - character: 'Tyrion Lannister', - quote: 'A mind needs books like a sword needs a whetstone.' - } - ] - }) - - if (bulkResponse.errors) { - console.log(bulkResponse) - process.exit(1) - } - - // start things off by searching, setting a scroll timeout, and pushing - // our first response into the queue to be processed - const response = await client.search({ - index: 'game-of-thrones', - // keep the search results "scrollable" for 30 seconds - scroll: '30s', - // for the sake of this example, we will get only one result per search - size: 1, - // filter the source to only include the quote field - _source: ['quote'], - body: { - query: { - match_all: {} - } - } - }) - - responseQueue.push(response) - - while (responseQueue.length) { - const { body } = responseQueue.shift() - - // collect the titles from this response - body.hits.hits.forEach(function (hit) { - allQuotes.push(hit._source.quote) - }) - - // check to see if we have collected all of the quotes - if (body.hits.total.value === allQuotes.length) { - console.log('Every quote', allQuotes) - break - } - - // get the next response if there are more quotes to fetch - responseQueue.push( - await client.scroll({ - scrollId: body._scroll_id, - scroll: '30s' - }) - ) - } -} - -run().catch(console.log) ----- - -Another cool usage of the `scroll` API can be done with Node.js ≥ 10, by using -async iteration! - -[source,js] ----- -'use strict' - -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) - -// Scroll utility -async function * scrollSearch (params) { - var response = await client.search(params) - - while (true) { - const sourceHits = response.body.hits.hits - - if (sourceHits.length === 0) { - break - } - - for (const hit of sourceHits) { - yield hit - } - - if (!response.body._scroll_id) { - break - } - - response = await client.scroll({ - scrollId: response.body._scroll_id, - scroll: params.scroll - }) - } -} - -async function run () { - await client.bulk({ - refresh: true, - body: [ - { index: { _index: 'game-of-thrones' } }, - { - character: 'Ned Stark', - quote: 'Winter is coming.' - }, - - { index: { _index: 'game-of-thrones' } }, - { - character: 'Daenerys Targaryen', - quote: 'I am the blood of the dragon.' - }, - - { index: { _index: 'game-of-thrones' } }, - { - character: 'Tyrion Lannister', - quote: 'A mind needs books like a sword needs a whetstone.' - } - ] - }) - - const params = { - index: 'game-of-thrones', - scroll: '30s', - size: 1, - _source: ['quote'], - body: { - query: { - match_all: {} - } - } - } - - for await (const hit of scrollSearch(params)) { - console.log(hit._source) - } -} - -run().catch(console.log) ----- \ No newline at end of file diff --git a/docs/examples/search.asciidoc b/docs/examples/search.asciidoc deleted file mode 100644 index 006804fe6..000000000 --- a/docs/examples/search.asciidoc +++ /dev/null @@ -1,63 +0,0 @@ -[[search_examples]] -== Search - -The `search` API allows you to execute a search query and get back search hits -that match the query. The query can either be provided using a simple -https://www.elastic.co/guide/en/elasticsearch/reference/6.6/search-uri-request.html[query string as a parameter], -or using a -https://www.elastic.co/guide/en/elasticsearch/reference/6.6/search-request-body.html[request body]. - -[source,js] ----- -'use strict' - -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) - -async function run () { - // Let's start by indexing some data - await client.index({ - index: 'game-of-thrones', - body: { - character: 'Ned Stark', - quote: 'Winter is coming.' - } - }) - - await client.index({ - index: 'game-of-thrones', - body: { - character: 'Daenerys Targaryen', - quote: 'I am the blood of the dragon.' - } - }) - - await client.index({ - index: 'game-of-thrones', - // here we are forcing an index refresh, - // otherwise we will not get any result - // in the consequent search - refresh: true, - body: { - character: 'Tyrion Lannister', - quote: 'A mind needs books like a sword needs a whetstone.' - } - }) - - // Let's search! - const { body } = await client.search({ - index: 'game-of-thrones', - body: { - query: { - match: { - quote: 'winter' - } - } - } - }) - - console.log(body.hits.hits) -} - -run().catch(console.log) ----- \ No newline at end of file diff --git a/docs/examples/sql.query.asciidoc b/docs/examples/sql.query.asciidoc deleted file mode 100644 index bc1dab10e..000000000 --- a/docs/examples/sql.query.asciidoc +++ /dev/null @@ -1,71 +0,0 @@ -[[sql_query_examples]] -== SQL - -{es} SQL is an X-Pack component that allows SQL-like queries to be executed in -real-time against {es}. Whether using the REST interface, command-line or JDBC, -any client can use SQL to search and aggregate data natively inside {es}. One -can think of {es} SQL as a translator, one that understands both SQL and {es} -and makes it easy to read and process data in real-time, at scale by leveraging -{es} capabilities. - -In the following example we will search all the documents that has the field -`house` equals to `stark`, log the result with the tabular view and then -manipulate the result to obtain an object easy to navigate. - -[source,js] ----- -'use strict' - -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) - -async function run () { - await client.index({ - index: 'game-of-thrones', - body: { - character: 'Ned Stark', - quote: 'Winter is coming.', - house: 'stark' - } - }) - - await client.index({ - index: 'game-of-thrones', - body: { - character: 'Arya Stark', - quote: 'A girl is Arya Stark of Winterfell. And I\'m going home.', - house: 'stark' - } - }) - - await client.index({ - index: 'game-of-thrones', - refresh: true, - body: { - character: 'Tyrion Lannister', - quote: 'A Lannister always pays his debts.', - house: 'lannister' - } - }) - - const { body } = await client.sql.query({ - body: { - query: "SELECT * FROM \"game-of-thrones\" WHERE house='stark'" - } - }) - - console.log(body) - - const data = body.rows.map(row => { - const obj = {} - for (var i = 0; i < row.length; i++) { - obj[body.columns[i].name] = row[i] - } - return obj - }) - - console.log(data) -} - -run().catch(console.log) ----- diff --git a/docs/examples/suggest.asciidoc b/docs/examples/suggest.asciidoc deleted file mode 100644 index beaccf51f..000000000 --- a/docs/examples/suggest.asciidoc +++ /dev/null @@ -1,66 +0,0 @@ -[[suggest_examples]] -== Suggest - -The suggest feature suggests similar looking terms based on a provided text by -using a suggester. _Parts of the suggest feature are still under development._ - -The suggest request part is defined alongside the query part in a `search` -request. If the query part is left out, only suggestions are returned. - -[source,js] ----- -'use strict' - -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) - -async function run () { - const { body: bulkResponse } = await client.bulk({ - refresh: true, - body: [ - { index: { _index: 'game-of-thrones' } }, - { - character: 'Ned Stark', - quote: 'Winter is coming.' - }, - - { index: { _index: 'game-of-thrones' } }, - { - character: 'Daenerys Targaryen', - quote: 'I am the blood of the dragon.' - }, - - { index: { _index: 'game-of-thrones' } }, - { - character: 'Tyrion Lannister', - quote: 'A mind needs books like a sword needs a whetstone.' - } - ] - }) - - if (bulkResponse.errors) { - console.log(bulkResponse) - process.exit(1) - } - - const { body } = await client.search({ - index: 'game-of-thrones', - body: { - query: { - match: { quote: 'witner' } - }, - suggest: { - gotsuggest: { - text: 'witner', - term: { field: 'quote' } - } - } - } - }) - - console.log(body) -} - -run().catch(console.log) - ----- \ No newline at end of file diff --git a/docs/examples/transport.request.asciidoc b/docs/examples/transport.request.asciidoc deleted file mode 100644 index 385f61091..000000000 --- a/docs/examples/transport.request.asciidoc +++ /dev/null @@ -1,71 +0,0 @@ -[[transport_request_examples]] -== transport.request - -It can happen that you need to communicate with {es} by using an API that is not -supported by the client, to mitigate this issue you can directly call -`client.transport.request`, which is the internal utility that the client uses -to communicate with {es} when you use an API method. - -NOTE: When using the `transport.request` method you must provide all the -parameters needed to perform an HTTP call, such as `method`, `path`, -`querystring`, and `body`. - - -TIP: If you find yourself use this method too often, take in consideration the -use of `client.extend`, which will make your code look cleaner and easier to -maintain. - -[source,js] ----- -'use strict' - -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) - -async function run () { - const { body: bulkResponse } = await client.bulk({ - refresh: true, - body: [ - { index: { _index: 'game-of-thrones' } }, - { - character: 'Ned Stark', - quote: 'Winter is coming.' - }, - - { index: { _index: 'game-of-thrones' } }, - { - character: 'Daenerys Targaryen', - quote: 'I am the blood of the dragon.' - }, - - { index: { _index: 'game-of-thrones' } }, - { - character: 'Tyrion Lannister', - quote: 'A mind needs books like a sword needs a whetstone.' - } - ] - }) - - if (bulkResponse.errors) { - console.log(bulkResponse) - process.exit(1) - } - - const { body } = await client.transport.request({ - method: 'POST', - path: '/game-of-thrones/_search', - body: { - query: { - match: { - quote: 'winter' - } - } - }, - querystring: {} - }) - - console.log(body) -} - -run().catch(console.log) ----- \ No newline at end of file diff --git a/docs/examples/typescript.asciidoc b/docs/examples/typescript.asciidoc deleted file mode 100644 index 4fb97e7f2..000000000 --- a/docs/examples/typescript.asciidoc +++ /dev/null @@ -1,72 +0,0 @@ -[[typescript_examples]] -== Typescript - -The client offers a first-class support for TypeScript, since it ships the type -definitions for every exposed API. - -NOTE: If you are using TypeScript you will be required to use _snake_case_ style -to define the API parameters instead of _camelCase_. - -[source,ts] ----- -'use strict' - -import { Client, ApiResponse, RequestParams } from '@elastic/elasticsearch' -const client = new Client({ node: 'http://localhost:9200/' }) - -async function run (): void { - // Let's start by indexing some data - const doc1: RequestParams.Index = { - index: 'game-of-thrones', - body: { - character: 'Ned Stark', - quote: 'Winter is coming.' - } - } - await client.index(doc1) - - const doc2: RequestParams.Index = { - index: 'game-of-thrones', - body: { - character: 'Daenerys Targaryen', - quote: 'I am the blood of the dragon.' - } - } - await client.index(doc2) - - const doc3: RequestParams.Index = { - index: 'game-of-thrones', - // here we are forcing an index refresh, - // otherwise we will not get any result - // in the consequent search - refresh: true, - body: { - character: 'Tyrion Lannister', - quote: 'A mind needs books like a sword needs a whetstone.' - } - } - await client.index(doc3) - - // Let's search! - const params: RequestParams.Search = { - index: 'game-of-thrones', - body: { - query: { - match: { - quote: 'winter' - } - } - } - } - client - .search(params) - .then((result: ApiResponse) => { - console.log(result.body.hits.hits) - }) - .catch((err: Error) => { - console.log(err) - }) -} - -run() ----- \ No newline at end of file diff --git a/docs/examples/update.asciidoc b/docs/examples/update.asciidoc deleted file mode 100644 index 9f5ce102b..000000000 --- a/docs/examples/update.asciidoc +++ /dev/null @@ -1,93 +0,0 @@ -[[update_examples]] -== Update - -The update API allows updates of a specific document using the given script. In -the following example, we will index a document that also tracks how many times -a character has said the given quote, and then we will update the `times` field. - -[source,js] ---------- -'use strict' - -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) - -async function run () { - await client.index({ - index: 'game-of-thrones', - id: '1', - body: { - character: 'Ned Stark', - quote: 'Winter is coming.', - times: 0 - } - }) - - await client.update({ - index: 'game-of-thrones', - id: '1', - body: { - script: { - lang: 'painless', - source: 'ctx._source.times++' - // you can also use parameters - // source: 'ctx._source.times += params.count', - // params: { count: 1 } - } - } - }) - - const { body } = await client.get({ - index: 'game-of-thrones', - id: '1' - }) - - console.log(body) -} - -run().catch(console.log) - ---------- - -With the update API, you can also run a partial update of a document. - -[source,js] ---------- -'use strict' - -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) - -async function run () { - await client.index({ - index: 'game-of-thrones', - id: '1', - body: { - character: 'Ned Stark', - quote: 'Winter is coming.', - isAlive: true - } - }) - - await client.update({ - index: 'game-of-thrones', - id: '1', - body: { - doc: { - isAlive: false - } - } - }) - - const { body } = await client.get({ - index: 'game-of-thrones', - id: '1' - }) - - console.log(body) -} - -run().catch(console.log) - - ---------- \ No newline at end of file diff --git a/docs/examples/update_by_query.asciidoc b/docs/examples/update_by_query.asciidoc deleted file mode 100644 index a9740ddf6..000000000 --- a/docs/examples/update_by_query.asciidoc +++ /dev/null @@ -1,61 +0,0 @@ -[[update_by_query_examples]] -== Update By Query - -The simplest usage of _update_by_query just performs an update on every document -in the index without changing the source. This is useful to pick up a new -property or some other online mapping change. - -[source,js] ---------- -'use strict' - -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) - -async function run () { - await client.index({ - index: 'game-of-thrones', - body: { - character: 'Ned Stark', - quote: 'Winter is coming.' - } - }) - - await client.index({ - index: 'game-of-thrones', - refresh: true, - body: { - character: 'Arya Stark', - quote: 'A girl is Arya Stark of Winterfell. And I\'m going home.' - } - }) - - await client.updateByQuery({ - index: 'game-of-thrones', - refresh: true, - body: { - script: { - lang: 'painless', - source: 'ctx._source["house"] = "stark"' - }, - query: { - match: { - character: 'stark' - } - } - } - }) - - const { body } = await client.search({ - index: 'game-of-thrones', - body: { - query: { match_all: {} } - } - }) - - console.log(body.hits.hits) -} - -run().catch(console.log) - ---------- diff --git a/docs/extend.asciidoc b/docs/extend.asciidoc deleted file mode 100644 index 0e9d2a532..000000000 --- a/docs/extend.asciidoc +++ /dev/null @@ -1,72 +0,0 @@ -[[extend-client]] -== Extend the client - -Sometimes you need to reuse the same logic, or you want to build a custom API to -allow you simplify your code. The easiest way to achieve that is by extending -the client. - -NOTE: If you want to override existing methods, you should specify the -`{ force: true }` option. - -[source,js] ----- -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) - -client.extend('supersearch', ({ makeRequest, ConfigurationError }) => { - return function supersearch (params, options) { - const { - body, - index, - method, - ...querystring - } = params - - // params validation - if (body == null) { - throw new ConfigurationError('Missing required parameter: body') - } - - // build request object - const request = { - method: method || 'POST', - path: `/${encodeURIComponent(index)}/_search_`, - body, - querystring - } - - // build request options object - const requestOptions = { - ignore: options.ignore || null, - requestTimeout: options.requestTimeout || null, - maxRetries: options.maxRetries || null, - asStream: options.asStream || false, - headers: options.headers || null - } - - return makeRequest(request, requestOptions) - } -}) - -client.extend('utility.index', ({ makeRequest }) => { - return function _index (params, options) { - // your code - } -}) - -client.extend('utility.delete', ({ makeRequest }) => { - return function _delete (params, options) { - // your code - } -}) - -client.extend('indices.delete', { force: true }, ({ makeRequest }) => { - return function _delete (params, options) { - // your code - } -}) - -client.supersearch(...) -client.utility.index(...) -client.utility.delete(...) ----- \ No newline at end of file diff --git a/docs/helpers.asciidoc b/docs/helpers.asciidoc deleted file mode 100644 index 37f747f82..000000000 --- a/docs/helpers.asciidoc +++ /dev/null @@ -1,447 +0,0 @@ -[[client-helpers]] -== Client Helpers - -The client comes with an handy collection of helpers to give you a more comfortable experience with some APIs. - -CAUTION: The client helpers are experimental, and the API may change in the next minor releases. -The helpers will not work in any Node.js version lower than 10. - -=== Bulk Helper - -~Added~ ~in~ ~`v7.7.0`~ - -Running Bulk requests can be complex due to the shape of the API, this helper aims to provide a nicer developer experience around the Bulk API. - -==== Usage -[source,js] ----- -const { createReadStream } = require('fs') -const split = require('split2') -const { Client } = require('@elastic/elasticsearch') - -const client = new Client({ node: 'http://localhost:9200/' }) -const result = await client.helpers.bulk({ - datasource: createReadStream('./dataset.ndjson').pipe(split()), - onDocument (doc) { - return { - index: { _index: 'my-index' } - } - } -}) - -console.log(result) -// { -// total: number, -// failed: number, -// retry: number, -// successful: number, -// time: number, -// bytes: number, -// aborted: boolean -// } ----- - -To create a new instance of the Bulk helper, you should access it as shown in the example above, the configuration options are: -[cols=2*] -|=== -|`datasource` -a|An array, async generator or a readable stream with the data you need to index/create/update/delete. -It can be an array of strings or objects, but also a stream of json strings or JavaScript objects. + -If it is a stream, we recommend to use the https://www.npmjs.com/package/split2[`split2`] package, that will split the stream on new lines delimiters. + -This parameter is mandatory. -[source,js] ----- -const { createReadStream } = require('fs') -const split = require('split2') -const b = client.helpers.bulk({ - // if you just use split(), the data will be used as array of strings - datasource: createReadStream('./dataset.ndjson').pipe(split()) - // if you need to manipulate the data, you can pass JSON.parse to split - datasource: createReadStream('./dataset.ndjson').pipe(split(JSON.parse)) -}) ----- - -|`onDocument` -a|A function that will be called for each document of the datasource. Inside this function you can manipulate the document and you must return the operation you want to execute with the document. Look at the link:{ref}/docs-bulk.html[Bulk API documentation] to see the supported operations. + -This parameter is mandatory. -[source,js] ----- -const b = client.helpers.bulk({ - onDocument (doc) { - return { - index: { _index: 'my-index' } - } - } -}) ----- - -|`onDrop` -a|A function that will be called for everytime a document can't be indexed and it has reached the maximum amount of retries. -[source,js] ----- -const b = client.helpers.bulk({ - onDrop (doc) { - console.log(doc) - } -}) ----- - -|`flushBytes` -a|The size of the bulk body in bytes to reach before to send it. Default of 5MB. + -_Default:_ `5000000` -[source,js] ----- -const b = client.helpers.bulk({ - flushBytes: 1000000 -}) ----- - -|`flushInterval` -a|How much time (in milliseconds) the helper will wait before flushing the body from the last document read. + -_Default:_ `30000` -[source,js] ----- -const b = client.helpers.bulk({ - flushInterval: 30000 -}) ----- - -|`concurrency` -a|How many request will be executed at the same time. + -_Default:_ `5` -[source,js] ----- -const b = client.helpers.bulk({ - concurrency: 10 -}) ----- - -|`retries` -a|How many times a document will be retried before to call the `onDrop` callback. + -_Default:_ Client max retries. -[source,js] ----- -const b = client.helpers.bulk({ - retries: 3 -}) ----- - -|`wait` -a|How much time to wait before retries in milliseconds. + -_Default:_ 5000. -[source,js] ----- -const b = client.helpers.bulk({ - wait: 3000 -}) ----- - -|`refreshOnCompletion` -a|If `true`, at the end of the bulk operation it will run a refresh on all indices or on the specified indices. + -_Default:_ false. -[source,js] ----- -const b = client.helpers.bulk({ - refreshOnCompletion: true - // or - refreshOnCompletion: 'index-name' -}) ----- - -|=== - -==== Abort a bulk operation -If needed, you can abort a bulk operation at any time. The bulk helper returns a https://promisesaplus.com/[thenable], which has an `abort` method. - -NOTE: The abort method will stop the execution of the bulk operation, but if you are using a concurrency higher than one, the operations that are already running will not be stopped. - -[source,js] ----- -const { createReadStream } = require('fs') -const split = require('split2') -const { Client } = require('@elastic/elasticsearch') - -const client = new Client({ node: 'http://localhost:9200/' }) -const b = client.helpers.bulk({ - datasource: createReadStream('./dataset.ndjson').pipe(split()), - onDocument (doc) { - return { - index: { _index: 'my-index' } - } - }, - onDrop (doc) { - b.abort() - } -}) - -console.log(await b) ----- - -==== Passing custom options to the Bulk API -You can pass any option supported by the link:{ref}/docs-bulk.html#docs-bulk-api-query-params[Bulk API] to the helper, and the helper will use those options in conjuction with the Bulk -API call. - -[source,js] ----- -const result = await client.helpers.bulk({ - datasource: [...] - onDocument (doc) { - return { - index: { _index: 'my-index' } - } - }, - pipeline: 'my-pipeline' -}) ----- - -==== Usage with an async generator - -[source,js] ----- -const { Client } = require('@elastic/elasticsearch') - -async function * generator () { - const dataset = [ - { user: 'jon', age: 23 }, - { user: 'arya', age: 18 }, - { user: 'tyrion', age: 39 } - ] - for (const doc of dataset) { - yield doc - } -} - -const client = new Client({ node: 'http://localhost:9200/' }) -const result = await client.helpers.bulk({ - datasource: generator(), - onDocument (doc) { - return { - index: { _index: 'my-index' } - } - } -}) - -console.log(result) ----- - -=== Multi Search Helper - -~Added~ ~in~ ~`v7.8.0`~ - -If you are sending search request at a high rate, this helper might be useful for you. -It will use the mutli search API under the hood to batch the requests and improve the overall performances of your application. + -The `result` exposes a `documents` property as well, which allows you to access directly the hits sources. - -==== Usage -[source,js] ----- -const { Client } = require('@elastic/elasticsearch') - -const client = new Client({ node: 'http://localhost:9200/' }) -const s = client.helpers.msearch() - -// promise style API -s.search( - { index: 'stackoverflow' }, - { query: { match: { title: 'javascript' } } } - ) - .then(result => console.log(result.body)) // or result.documents - .catch(err => console.error(err)) - -// callback style API -s.search( - { index: 'stackoverflow' }, - { query: { match: { title: 'ruby' } } }, - (err, result) => { - if (err) console.error(err) - console.log(result.body)) // or result.documents - } -) ----- - -To create a new instance of the Msearch helper, you should access it as shown in the example above, the configuration options are: -[cols=2*] -|=== -|`operations` -a|How many search operations should be sent in a single msearch request. + -_Default:_ `5` -[source,js] ----- -const b = client.helpers.msearch({ - operations: 10 -}) ----- - -|`flushInterval` -a|How much time (in milliseconds) the helper will wait before flushing the operations from the last operation read. + -_Default:_ `500` -[source,js] ----- -const b = client.helpers.msearch({ - flushInterval: 500 -}) ----- - -|`concurrency` -a|How many request will be executed at the same time. + -_Default:_ `5` -[source,js] ----- -const b = client.helpers.msearch({ - concurrency: 10 -}) ----- - -|`retries` -a|How many times an operation will be retried before to resolve the request. An operation will be retried only in case of a 429 error. + -_Default:_ Client max retries. -[source,js] ----- -const b = client.helpers.msearch({ - retries: 3 -}) ----- - -|`wait` -a|How much time to wait before retries in milliseconds. + -_Default:_ 5000. -[source,js] ----- -const b = client.helpers.msearch({ - wait: 3000 -}) ----- - -|=== - -==== Stopping the Msearch Helper -If needed, you can stop a msearch processor at any time. The msearch helper returns a https://promisesaplus.com/[thenable], which has an `stop` method. - -If you are creating multiple msearch helpers instances and using them for a limitied period of time, remember to always use the `stop` method once you have finished using them, otherwise your application will start leaking memory. - -The `stop` method accepts an optional error, that will be dispatched every subsequent search request. - -NOTE: The stop method will stop the execution of the msearch processor, but if you are using a concurrency higher than one, the operations that are already running will not be stopped. - -[source,js] ----- -const { Client } = require('@elastic/elasticsearch') - -const client = new Client({ node: 'http://localhost:9200/' }) -const s = client.helpers.msearch() - -s.search( - { index: 'stackoverflow' }, - { query: { match: { title: 'javascript' } } } - ) - .then(result => console.log(result.body)) - .catch(err => console.error(err)) - -s.search( - { index: 'stackoverflow' }, - { query: { match: { title: 'ruby' } } } - ) - .then(result => console.log(result.body)) - .catch(err => console.error(err)) - -setImmediate(() => s.stop()) ----- - -=== Search Helper - -~Added~ ~in~ ~`v7.7.0`~ - -A simple wrapper around the search API. Instead of returning the entire `result` object it will return only the search documents source. -For improving the performances, this helper automatically adds `filter_path=hits.hits._source` to the querystring. - -[source,js] ----- -const documents = await client.helpers.search({ - index: 'stackoverflow', - body: { - query: { - match: { - title: 'javascript' - } - } - } -}) - -for (const doc of documents) { - console.log(doc) -} ----- - -=== Scroll Search Helper - -~Added~ ~in~ ~`v7.7.0`~ - -This helpers offers a simple and intuitive way to use the scroll search API. Once called, it returns an https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/for-await...of[async iterator] which can be used in conjuction with a for-await...of. + -It handles automatically the `429` error and uses the client's `maxRetries` option. - -[source,js] ----- -const scrollSearch = await client.helpers.scrollSearch({ - index: 'stackoverflow', - body: { - query: { - match: { - title: 'javascript' - } - } - } -}) - -for await (const result of scrollSearch) { - console.log(result) -} ----- - -==== Clear a scroll search - -If needed, you can clear a scroll search by calling `result.clear()`: - -[source,js] ----- -for await (const result of scrollSearch) { - if (condition) { - await result.clear() - } -} ----- - -==== Quickly getting the documents - -If you only need the documents from the result of a scroll search, you can access them via `result.documents`: - -[source,js] ----- -for await (const result of scrollSearch) { - console.log(result.documents) -} ----- - -=== Scroll Documents Helper - -~Added~ ~in~ ~`v7.7.0`~ - -It works in the same way as the scroll search helper, but it returns only the documents instead. Note, every loop cycle will return you a single document, and you can't use the `clear` method. -For improving the performances, this helper automatically adds `filter_path=hits.hits._source` to the querystring. - -[source,js] ----- -const scrollSearch = await client.helpers.scrollDocuments({ - index: 'stackoverflow', - body: { - query: { - match: { - title: 'javascript' - } - } - } -}) - -for await (const doc of scrollSearch) { - console.log(doc) -} ----- \ No newline at end of file diff --git a/docs/index-custom-title-page.html b/docs/index-custom-title-page.html new file mode 100644 index 000000000..3efa92c00 --- /dev/null +++ b/docs/index-custom-title-page.html @@ -0,0 +1,185 @@ + + +
+ +
+
+

+

+

Documentation

+

+

+ The official Node.js client provides one-to-one mapping with Elasticsearch REST APIs. +

+

+ + + +

+
+
+ +
+
+ +

Get to know the JavaScript client

+ + + +
+
+

+ + Using the JS client +

+
+ +
+ +
+
+

+ + API and developer docs +

+
+ +
+ +

Explore by use case

+ + + +

View all Elastic docs

\ No newline at end of file diff --git a/docs/index.asciidoc b/docs/index.asciidoc deleted file mode 100644 index 41dc99347..000000000 --- a/docs/index.asciidoc +++ /dev/null @@ -1,18 +0,0 @@ -= Elasticsearch Node.js client - -:branch: master -include::{asciidoc-dir}/../../shared/attributes.asciidoc[] - -include::introduction.asciidoc[] -include::usage.asciidoc[] -include::configuration.asciidoc[] -include::reference.asciidoc[] -include::breaking-changes.asciidoc[] -include::authentication.asciidoc[] -include::observability.asciidoc[] -include::child.asciidoc[] -include::extend.asciidoc[] -include::helpers.asciidoc[] -include::typescript.asciidoc[] -include::testing.asciidoc[] -include::examples/index.asciidoc[] diff --git a/docs/introduction.asciidoc b/docs/introduction.asciidoc deleted file mode 100644 index 432b8799f..000000000 --- a/docs/introduction.asciidoc +++ /dev/null @@ -1,229 +0,0 @@ -[[introduction]] -== Introduction - -The official Node.js client for {es}. - - -=== Features - -* One-to-one mapping with REST API. -* Generalized, pluggable architecture. -* Configurable, automatic discovery of cluster nodes. -* Persistent, Keep-Alive connections. -* Load balancing across all available nodes. -* Child client support. -* TypeScript support out of the box. - - -=== Install - -[source,sh] ----- -npm install @elastic/elasticsearch ----- - - -=== Compatibility - -The minimum supported version of Node.js is `v8`. - -The library is compatible with all {es} versions since 5.x. We recommend you to -use the same major version of the client as the {es} instance that you are -using. - - -[%header,cols=2*] -|=== -|{es} Version -|Client Version - -|`master` -|`master` - -|`7.x` -|`7.x` - -|`6.x` -|`6.x` - -|`5.x` -|`5.x` -|=== - -To install a specific major version of the client, run the following command: - ----- -npm install @elastic/elasticsearch@ ----- - - -==== Browser - -WARNING: There is no official support for the browser environment. It exposes -your {es} instance to everyone, which could lead to security issues. We -recommend you to write a lightweight proxy that uses this client instead. - - -=== Quick start - -First of all, require, then initialize the client: - -[source,js] ----- -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) ----- - - -You can use both the callback API and the promise API, both behave the same way. - -[source,js] ----- -// promise API -const result = await client.search({ - index: 'my-index', - body: { foo: 'bar' } -}) - -// callback API -client.search({ - index: 'my-index', - body: { foo: 'bar' } -}, (err, result) => { - if (err) console.log(err) -}) ----- - - -The returned value of **every** API call is formed as follows: - -[source,ts] ----- -{ - body: object | boolean - statusCode: number - headers: object - warnings: [string] - meta: object -} ----- - - -Let's see a complete example! - -[source,js] ----- -'use strict' - -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) - -async function run () { - // Let's start by indexing some data - await client.index({ - index: 'game-of-thrones', - // type: '_doc', // uncomment this line if you are using {es} ≤ 6 - body: { - character: 'Ned Stark', - quote: 'Winter is coming.' - } - }) - - await client.index({ - index: 'game-of-thrones', - // type: '_doc', // uncomment this line if you are using {es} ≤ 6 - body: { - character: 'Daenerys Targaryen', - quote: 'I am the blood of the dragon.' - } - }) - - await client.index({ - index: 'game-of-thrones', - // type: '_doc', // uncomment this line if you are using {es} ≤ 6 - body: { - character: 'Tyrion Lannister', - quote: 'A mind needs books like a sword needs a whetstone.' - } - }) - - // We need to force an index refresh at this point, otherwise we will not - // get any result in the consequent search - await client.indices.refresh({ index: 'game-of-thrones' }) - - // Let's search! - const { body } = await client.search({ - index: 'game-of-thrones', - // type: '_doc', // uncomment this line if you are using {es} ≤ 6 - body: { - query: { - match: { quote: 'winter' } - } - } - }) - - console.log(body.hits.hits) -} - -run().catch(console.log) ----- - - -==== Install multiple versions - -If you are using multiple versions of {es}, you need to use multiple versions of -the client as well. In the past, installing multiple versions of the same -package was not possible, but with `npm v6.9`, you can do it via aliasing. - -To install different version of the client, run the following command: - -[source,sh] ----- -npm install @npm:@elastic/elasticsearch@ ----- - - -For example, if you need to install `7.x` and `6.x`, run the following commands: - -[source,sh] ----- -npm install es6@npm:@elastic/elasticsearch@6 -npm install es7@npm:@elastic/elasticsearch@7 ----- - - -Your `package.json` will look similar to the following example: - -[source,json] ----- -"dependencies": { - "es6": "npm:@elastic/elasticsearch@^6.7.0", - "es7": "npm:@elastic/elasticsearch@^7.0.0" -} ----- - - -Require the packages from your code by using the alias you have defined. - -[source,js] ----- -const { Client: Client6 } = require('es6') -const { Client: Client7 } = require('es7') - -const client6 = new Client6({ node: 'http://localhost:9200/' }) -const client7 = new Client7({ node: 'http://localhost:9201/' }) - -client6.info(console.log) -client7.info(console.log) ----- - - -Finally, if you want to install the client for the next version of {es} (the one -that lives in the {es} master branch), use the following command: - -[source,sh] ----- -npm install esmaster@github:elastic/elasticsearch-js ----- -WARNING: This command installs the master branch of the client which is not -considered stable. diff --git a/docs/observability.asciidoc b/docs/observability.asciidoc deleted file mode 100644 index 0f69ea2b0..000000000 --- a/docs/observability.asciidoc +++ /dev/null @@ -1,339 +0,0 @@ -[[observability]] -== Observability - -The client does not provide a default logger, but instead it offers an event -emitter interfaces to hook into internal events, such as `request` and -`response`. - -Correlating those events can be quite hard, especially if your applications have -a large codebase with many events happening at the same time. - -To help you with this, the client offers you a correlation id system and other -features. Let's see them in action. - -=== Events - -The client is an event emitter, this means that you can listen for its event and -add additional logic to your code, without need to change the client internals -or your normal usage. You can find the events names by access the `events` key -of the client. - -[source,js] ----- -const { events } = require('@elastic/elasticsearch') -console.log(events) ----- - - -The event emitter functionality can be useful if you want to log every request, -response and error that is happening during the use of the client. - -[source,js] ----- -const logger = require('my-logger')() -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) - -client.on('response', (err, result) => { - if (err) { - logger.error(err) - } else { - logger.info(result) - } -}) ----- - - -The client emits the following events: -[cols=2*] -|=== -|`request` -a|Emitted before sending the actual request to {es} _(emitted multiple times in case of retries)_. -[source,js] ----- -client.on('request', (err, result) => { - console.log(err, result) -}) ----- - -|`response` -a|Emitted once {es} response has been received and parsed. -[source,js] ----- -client.on('response', (err, result) => { - console.log(err, result) -}) ----- - -|`sniff` -a|Emitted when the client ends a sniffing request. -[source,js] ----- -client.on('sniff', (err, result) => { - console.log(err, result) -}) ----- - -|`resurrect` -a|Emitted if the client is able to resurrect a dead node. -[source,js] ----- -client.on('resurrect', (err, result) => { - console.log(err, result) -}) ----- - -|=== - -The values of `result` in `request`, `response` and `sniff` will be: - -[source,ts] ----- -body: any; -statusCode: number | null; -headers: anyObject | null; -warnings: string[] | null; -meta: { - context: any; - name: string; - request: { - params: TransportRequestParams; - options: TransportRequestOptions; - id: any; - }; - connection: Connection; - attempts: number; - aborted: boolean; - sniff?: { - hosts: any[]; - reason: string; - }; -}; ----- - - -While the `result` value in `resurrect` will be: - -[source,ts] ----- -strategy: string; -isAlive: boolean; -connection: Connection; -name: string; -request: { - id: any; -}; ----- - - -=== Correlation id - -Correlating events can be quite hard, especially if there are many events at the -same time. The client offers you an automatic (and configurable) system to help -you handle this problem. - -[source,js] ----- -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) - -client.on('request', (err, result) => { - const { id } = result.meta.request - if (err) { - console.log({ error: err, reqId: id }) - } -}) - -client.on('response', (err, result) => { - const { id } = result.meta.request - if (err) { - console.log({ error: err, reqId: id }) - } -}) - -client.search({ - index: 'my-index', - body: { foo: 'bar' } -}, (err, result) => { - if (err) console.log(err) -}) ----- - - -By default the id is an incremental integer, but you can easily configure that -with the `generateRequestId` option: - -[source,js] ----- -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ - node: 'http://localhost:9200/', - // it takes two parameters, the request parameters and options - generateRequestId: function (params, options) { - // your id generation logic - // must be syncronous - return 'id' - } -}) ----- - - -You can also specify a custom id per request: - -[source,js] ----- -client.search({ - index: 'my-index', - body: { foo: 'bar' } -}, { - id: 'custom-id' -}, (err, result) => { - if (err) console.log(err) -}) ----- - - -=== Context object - -Sometimes, you might need to make some custom data available in your events, you -can do that via the `context` option of a request: - -[source,js] ----- -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) - -client.on('request', (err, result) => { - const { id } = result.meta.request - const { context } = result.meta - if (err) { - console.log({ error: err, reqId: id, context }) - } -}) - -client.on('response', (err, result) => { - const { id } = result.meta.request - const { winter } = result.meta.context - if (err) { - console.log({ error: err, reqId: id, winter }) - } -}) - -client.search({ - index: 'my-index', - body: { foo: 'bar' } -}, { - context: { winter: 'is coming' } -}, (err, result) => { - if (err) console.log(err) -}) ----- - - -=== Client name - -If you are using multiple instances of the client or if you are using multiple -child clients _(which is the recommended way to have multiple instances of the -client)_, you might need to recognize which client you are using. The `name` -options will help you in this regard. - -[source,js] ----- -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ - node: 'http://localhost:9200/', - name: 'parent-client' // default to 'elasticsearch-js' -}) - -const child = client.child({ - name: 'child-client' -}) - -console.log(client.name, child.name) - -client.on('request', (err, result) => { - const { id } = result.meta.request - const { name } = result.meta - if (err) { - console.log({ error: err, reqId: id, name }) - } -}) - -client.on('response', (err, result) => { - const { id } = result.meta.request - const { name } = result.meta - if (err) { - console.log({ error: err, reqId: id, name }) - } -}) - -client.search({ - index: 'my-index', - body: { foo: 'bar' } -}, (err, result) => { - if (err) console.log(err) -}) - -child.search({ - index: 'my-index', - body: { foo: 'bar' } -}, (err, result) => { - if (err) console.log(err) -}) ----- - - -=== X-Opaque-Id support - -To improve the overall observability, the client offers an easy way to configure -the `X-Opaque-Id` header. If you set the `X-Opaque-Id` in a specific request, -this will allow you to discover this identifier in the -https://www.elastic.co/guide/en/elasticsearch/reference/master/logging.html#deprecation-logging[deprecation logs], -help you with https://www.elastic.co/guide/en/elasticsearch/reference/master/index-modules-slowlog.html#_identifying_search_slow_log_origin[identifying search slow log origin] -as well as https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html#_identifying_running_tasks[identifying running tasks]. - -The `X-Opaque-Id` should be configured in each request, for doing that you can -use the `opaqueId` option, as you can see in the following example. The -resulting header will be `{ 'X-Opaque-Id': 'my-search' }`. - -[source,js] ----- -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ - node: 'http://localhost:9200/' -}) - -client.search({ - index: 'my-index', - body: { foo: 'bar' } -}, { - opaqueId: 'my-search' -}, (err, result) => { - if (err) console.log(err) -}) ----- - -Sometimes it may be useful to prefix all the `X-Opaque-Id` headers with a -specific string, in case you need to identify a specific client or server. For -doing this, the client offers a top-level configuration option: -`opaqueIdPrefix`. In the following example, the resulting header will be -`{ 'X-Opaque-Id': 'proxy-client::my-search' }`. - -[source,js] ----- -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ - node: 'http://localhost:9200/', - opaqueIdPrefix: 'proxy-client::' -}) - -client.search({ - index: 'my-index', - body: { foo: 'bar' } -}, { - opaqueId: 'my-search' -}, (err, result) => { - if (err) console.log(err) -}) ----- - diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc deleted file mode 100644 index 70e731750..000000000 --- a/docs/reference.asciidoc +++ /dev/null @@ -1,10100 +0,0 @@ -[[api-reference]] - -//////// - - - -=========================================================================================================================== -|| || -|| || -|| || -|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || -|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || -|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || -|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || -|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || -|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || -|| || -|| || -|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || -|| You should update the script that does the generation, which can be found in '/scripts/utils/generateDocs.js'. || -|| || -|| You can run the script with the following command: || -|| node scripts/generate --branch || -|| or || -|| node scripts/generate --tag || -|| || -|| || -|| || -=========================================================================================================================== - - - -//////// - -== API Reference - -This document contains the entire list of the Elasticsearch API supported by the client, both OSS and commercial. The client is entirely licensed under Apache 2.0. - -Elasticsearch exposes an HTTP layer to communicate with, and the client is a library that will help you do this. Because of this reason, you will see HTTP related parameters, such as `body` or `headers`. - -Every API can accept two objects, the first contains all the parameters that will be sent to Elasticsearch, while the second includes the request specific parameters, such as timeouts, headers, and so on. -In the first object, every parameter but the body will be sent via querystring or url parameter, depending on the API, and every unrecognized parameter will be sent as querystring. - -[source,js] ----- -// promise API -const result = await client.search({ - index: 'my-index', - from: 20, - size: 10, - body: { foo: 'bar' } -}, { - ignore: [404], - maxRetries: 3 -}) - -// callback API -client.search({ - index: 'my-index', - from: 20, - size: 10, - body: { foo: 'bar' } -}, { - ignore: [404], - maxRetries: 3 -}, (err, result) => { - if (err) console.log(err) -}) ----- - -In this document, you will find the reference of every parameter accepted by the querystring or the url. If you also need to send the body, you can find the documentation of its format in the reference link that is present along with every endpoint. - - - -=== Common parameters -Parameters that are accepted by all API endpoints. - -link:{ref}/common-options.html[Documentation] -[cols=2*] -|=== -|`pretty` -|`boolean` - Pretty format the returned JSON response. - -|`human` -|`boolean` - Return human readable values for statistics. + - _Default:_ `true` - -|`error_trace` or `errorTrace` -|`boolean` - Include the stack trace of returned errors. - -|`source` -|`string` - The URL-encoded request definition. Useful for libraries that do not accept a request body for non-POST requests. - -|`filter_path` or `filterPath` -|`list` - A comma-separated list of filters used to reduce the response. - -|=== -=== bulk - -[source,ts] ----- -client.bulk({ - index: string, - type: string, - wait_for_active_shards: string, - refresh: 'true' | 'false' | 'wait_for', - routing: string, - timeout: string, - _source: string | string[], - _source_excludes: string | string[], - _source_includes: string | string[], - pipeline: string, - body: object -}) ----- -link:{ref}/docs-bulk.html[Documentation] + -{jsclient}/bulk_examples.html[Code Example] + -[cols=2*] -|=== -|`index` -|`string` - Default index for items which don't provide one - -|`type` -|`string` - Default document type for items which don't provide one - -|`wait_for_active_shards` or `waitForActiveShards` -|`string` - Sets the number of shard copies that must be active before proceeding with the bulk operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1) - -|`refresh` -|`'true' \| 'false' \| 'wait_for'` - If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then do nothing with refreshes. - -|`routing` -|`string` - Specific routing value - -|`timeout` -|`string` - Explicit operation timeout - -|`_source` -|`string \| string[]` - True or false to return the _source field or not, or default list of fields to return, can be overridden on each sub-request - -|`_source_excludes` or `_sourceExcludes` -|`string \| string[]` - Default list of fields to exclude from the returned _source field, can be overridden on each sub-request - -|`_source_includes` or `_sourceIncludes` -|`string \| string[]` - Default list of fields to extract and return from the _source field, can be overridden on each sub-request - -|`pipeline` -|`string` - The pipeline id to preprocess incoming documents with - -|`body` -|`object` - The operation definition and data (action-data pairs), separated by newlines - -|=== - -=== cat.aliases - -[source,ts] ----- -client.cat.aliases({ - name: string | string[], - format: string, - local: boolean, - h: string | string[], - help: boolean, - s: string | string[], - v: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all' -}) ----- -link:{ref}/cat-alias.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string \| string[]` - A comma-separated list of alias names to return - -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|`h` -|`string \| string[]` - Comma-separated list of column names to display - -|`help` -|`boolean` - Return help information - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|`v` -|`boolean` - Verbose mode. Display column headers - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `all` - -|=== - -=== cat.allocation - -[source,ts] ----- -client.cat.allocation({ - node_id: string | string[], - format: string, - bytes: 'b' | 'k' | 'kb' | 'm' | 'mb' | 'g' | 'gb' | 't' | 'tb' | 'p' | 'pb', - local: boolean, - master_timeout: string, - h: string | string[], - help: boolean, - s: string | string[], - v: boolean -}) ----- -link:{ref}/cat-allocation.html[Documentation] + -[cols=2*] -|=== -|`node_id` or `nodeId` -|`string \| string[]` - A comma-separated list of node IDs or names to limit the returned information - -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`bytes` -|`'b' \| 'k' \| 'kb' \| 'm' \| 'mb' \| 'g' \| 'gb' \| 't' \| 'tb' \| 'p' \| 'pb'` - The unit in which to display byte values - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`h` -|`string \| string[]` - Comma-separated list of column names to display - -|`help` -|`boolean` - Return help information - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|`v` -|`boolean` - Verbose mode. Display column headers - -|=== - -=== cat.count - -[source,ts] ----- -client.cat.count({ - index: string | string[], - format: string, - h: string | string[], - help: boolean, - s: string | string[], - v: boolean -}) ----- -link:{ref}/cat-count.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names to limit the returned information - -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`h` -|`string \| string[]` - Comma-separated list of column names to display - -|`help` -|`boolean` - Return help information - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|`v` -|`boolean` - Verbose mode. Display column headers - -|=== - -=== cat.fielddata - -[source,ts] ----- -client.cat.fielddata({ - fields: string | string[], - format: string, - bytes: 'b' | 'k' | 'kb' | 'm' | 'mb' | 'g' | 'gb' | 't' | 'tb' | 'p' | 'pb', - h: string | string[], - help: boolean, - s: string | string[], - v: boolean -}) ----- -link:{ref}/cat-fielddata.html[Documentation] + -[cols=2*] -|=== -|`fields` -|`string \| string[]` - A comma-separated list of fields to return the fielddata size - -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`bytes` -|`'b' \| 'k' \| 'kb' \| 'm' \| 'mb' \| 'g' \| 'gb' \| 't' \| 'tb' \| 'p' \| 'pb'` - The unit in which to display byte values - -|`h` -|`string \| string[]` - Comma-separated list of column names to display - -|`help` -|`boolean` - Return help information - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|`v` -|`boolean` - Verbose mode. Display column headers - -|=== - -=== cat.health - -[source,ts] ----- -client.cat.health({ - format: string, - h: string | string[], - help: boolean, - s: string | string[], - time: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos', - ts: boolean, - v: boolean -}) ----- -link:{ref}/cat-health.html[Documentation] + -[cols=2*] -|=== -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`h` -|`string \| string[]` - Comma-separated list of column names to display - -|`help` -|`boolean` - Return help information - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|`time` -|`'d' \| 'h' \| 'm' \| 's' \| 'ms' \| 'micros' \| 'nanos'` - The unit in which to display time values - -|`ts` -|`boolean` - Set to false to disable timestamping + -_Default:_ `true` - -|`v` -|`boolean` - Verbose mode. Display column headers - -|=== - -=== cat.help - -[source,ts] ----- -client.cat.help({ - help: boolean, - s: string | string[] -}) ----- -link:{ref}/cat.html[Documentation] + -[cols=2*] -|=== -|`help` -|`boolean` - Return help information - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|=== - -=== cat.indices - -[source,ts] ----- -client.cat.indices({ - index: string | string[], - format: string, - bytes: 'b' | 'k' | 'kb' | 'm' | 'mb' | 'g' | 'gb' | 't' | 'tb' | 'p' | 'pb', - local: boolean, - master_timeout: string, - h: string | string[], - health: 'green' | 'yellow' | 'red', - help: boolean, - pri: boolean, - s: string | string[], - time: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos', - v: boolean, - include_unloaded_segments: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all' -}) ----- -link:{ref}/cat-indices.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names to limit the returned information - -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`bytes` -|`'b' \| 'k' \| 'kb' \| 'm' \| 'mb' \| 'g' \| 'gb' \| 't' \| 'tb' \| 'p' \| 'pb'` - The unit in which to display byte values - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`h` -|`string \| string[]` - Comma-separated list of column names to display - -|`health` -|`'green' \| 'yellow' \| 'red'` - A health status ("green", "yellow", or "red" to filter only indices matching the specified health status - -|`help` -|`boolean` - Return help information - -|`pri` -|`boolean` - Set to true to return stats only for primary shards - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|`time` -|`'d' \| 'h' \| 'm' \| 's' \| 'ms' \| 'micros' \| 'nanos'` - The unit in which to display time values - -|`v` -|`boolean` - Verbose mode. Display column headers - -|`include_unloaded_segments` or `includeUnloadedSegments` -|`boolean` - If set to true segment stats will include stats for segments that are not currently loaded into memory - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `all` - -|=== - -=== cat.master - -[source,ts] ----- -client.cat.master({ - format: string, - local: boolean, - master_timeout: string, - h: string | string[], - help: boolean, - s: string | string[], - v: boolean -}) ----- -link:{ref}/cat-master.html[Documentation] + -[cols=2*] -|=== -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`h` -|`string \| string[]` - Comma-separated list of column names to display - -|`help` -|`boolean` - Return help information - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|`v` -|`boolean` - Verbose mode. Display column headers - -|=== - -=== cat.nodeattrs - -[source,ts] ----- -client.cat.nodeattrs({ - format: string, - local: boolean, - master_timeout: string, - h: string | string[], - help: boolean, - s: string | string[], - v: boolean -}) ----- -link:{ref}/cat-nodeattrs.html[Documentation] + -[cols=2*] -|=== -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`h` -|`string \| string[]` - Comma-separated list of column names to display - -|`help` -|`boolean` - Return help information - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|`v` -|`boolean` - Verbose mode. Display column headers - -|=== - -=== cat.nodes - -[source,ts] ----- -client.cat.nodes({ - bytes: 'b' | 'k' | 'kb' | 'm' | 'mb' | 'g' | 'gb' | 't' | 'tb' | 'p' | 'pb', - format: string, - full_id: boolean, - master_timeout: string, - h: string | string[], - help: boolean, - s: string | string[], - time: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos', - v: boolean -}) ----- -link:{ref}/cat-nodes.html[Documentation] + -[cols=2*] -|=== -|`bytes` -|`'b' \| 'k' \| 'kb' \| 'm' \| 'mb' \| 'g' \| 'gb' \| 't' \| 'tb' \| 'p' \| 'pb'` - The unit in which to display byte values - -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`full_id` or `fullId` -|`boolean` - Return the full node ID instead of the shortened version (default: false) - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`h` -|`string \| string[]` - Comma-separated list of column names to display - -|`help` -|`boolean` - Return help information - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|`time` -|`'d' \| 'h' \| 'm' \| 's' \| 'ms' \| 'micros' \| 'nanos'` - The unit in which to display time values - -|`v` -|`boolean` - Verbose mode. Display column headers - -|=== - -=== cat.pendingTasks - -[source,ts] ----- -client.cat.pendingTasks({ - format: string, - local: boolean, - master_timeout: string, - h: string | string[], - help: boolean, - s: string | string[], - time: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos', - v: boolean -}) ----- -link:{ref}/cat-pending-tasks.html[Documentation] + -[cols=2*] -|=== -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`h` -|`string \| string[]` - Comma-separated list of column names to display - -|`help` -|`boolean` - Return help information - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|`time` -|`'d' \| 'h' \| 'm' \| 's' \| 'ms' \| 'micros' \| 'nanos'` - The unit in which to display time values - -|`v` -|`boolean` - Verbose mode. Display column headers - -|=== - -=== cat.plugins - -[source,ts] ----- -client.cat.plugins({ - format: string, - local: boolean, - master_timeout: string, - h: string | string[], - help: boolean, - s: string | string[], - v: boolean -}) ----- -link:{ref}/cat-plugins.html[Documentation] + -[cols=2*] -|=== -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`h` -|`string \| string[]` - Comma-separated list of column names to display - -|`help` -|`boolean` - Return help information - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|`v` -|`boolean` - Verbose mode. Display column headers - -|=== - -=== cat.recovery - -[source,ts] ----- -client.cat.recovery({ - index: string | string[], - format: string, - active_only: boolean, - bytes: 'b' | 'k' | 'kb' | 'm' | 'mb' | 'g' | 'gb' | 't' | 'tb' | 'p' | 'pb', - detailed: boolean, - h: string | string[], - help: boolean, - s: string | string[], - time: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos', - v: boolean -}) ----- -link:{ref}/cat-recovery.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - Comma-separated list or wildcard expression of index names to limit the returned information - -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`active_only` or `activeOnly` -|`boolean` - If `true`, the response only includes ongoing shard recoveries - -|`bytes` -|`'b' \| 'k' \| 'kb' \| 'm' \| 'mb' \| 'g' \| 'gb' \| 't' \| 'tb' \| 'p' \| 'pb'` - The unit in which to display byte values - -|`detailed` -|`boolean` - If `true`, the response includes detailed information about shard recoveries - -|`h` -|`string \| string[]` - Comma-separated list of column names to display - -|`help` -|`boolean` - Return help information - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|`time` -|`'d' \| 'h' \| 'm' \| 's' \| 'ms' \| 'micros' \| 'nanos'` - The unit in which to display time values - -|`v` -|`boolean` - Verbose mode. Display column headers - -|=== - -=== cat.repositories - -[source,ts] ----- -client.cat.repositories({ - format: string, - local: boolean, - master_timeout: string, - h: string | string[], - help: boolean, - s: string | string[], - v: boolean -}) ----- -link:{ref}/cat-repositories.html[Documentation] + -[cols=2*] -|=== -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`h` -|`string \| string[]` - Comma-separated list of column names to display - -|`help` -|`boolean` - Return help information - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|`v` -|`boolean` - Verbose mode. Display column headers - -|=== - -=== cat.segments - -[source,ts] ----- -client.cat.segments({ - index: string | string[], - format: string, - bytes: 'b' | 'k' | 'kb' | 'm' | 'mb' | 'g' | 'gb' | 't' | 'tb' | 'p' | 'pb', - h: string | string[], - help: boolean, - s: string | string[], - v: boolean -}) ----- -link:{ref}/cat-segments.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names to limit the returned information - -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`bytes` -|`'b' \| 'k' \| 'kb' \| 'm' \| 'mb' \| 'g' \| 'gb' \| 't' \| 'tb' \| 'p' \| 'pb'` - The unit in which to display byte values - -|`h` -|`string \| string[]` - Comma-separated list of column names to display - -|`help` -|`boolean` - Return help information - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|`v` -|`boolean` - Verbose mode. Display column headers - -|=== - -=== cat.shards - -[source,ts] ----- -client.cat.shards({ - index: string | string[], - format: string, - bytes: 'b' | 'k' | 'kb' | 'm' | 'mb' | 'g' | 'gb' | 't' | 'tb' | 'p' | 'pb', - local: boolean, - master_timeout: string, - h: string | string[], - help: boolean, - s: string | string[], - time: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos', - v: boolean -}) ----- -link:{ref}/cat-shards.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names to limit the returned information - -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`bytes` -|`'b' \| 'k' \| 'kb' \| 'm' \| 'mb' \| 'g' \| 'gb' \| 't' \| 'tb' \| 'p' \| 'pb'` - The unit in which to display byte values - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`h` -|`string \| string[]` - Comma-separated list of column names to display - -|`help` -|`boolean` - Return help information - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|`time` -|`'d' \| 'h' \| 'm' \| 's' \| 'ms' \| 'micros' \| 'nanos'` - The unit in which to display time values - -|`v` -|`boolean` - Verbose mode. Display column headers - -|=== - -=== cat.snapshots - -[source,ts] ----- -client.cat.snapshots({ - repository: string | string[], - format: string, - ignore_unavailable: boolean, - master_timeout: string, - h: string | string[], - help: boolean, - s: string | string[], - time: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos', - v: boolean -}) ----- -link:{ref}/cat-snapshots.html[Documentation] + -[cols=2*] -|=== -|`repository` -|`string \| string[]` - Name of repository from which to fetch the snapshot information - -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Set to true to ignore unavailable snapshots - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`h` -|`string \| string[]` - Comma-separated list of column names to display - -|`help` -|`boolean` - Return help information - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|`time` -|`'d' \| 'h' \| 'm' \| 's' \| 'ms' \| 'micros' \| 'nanos'` - The unit in which to display time values - -|`v` -|`boolean` - Verbose mode. Display column headers - -|=== - -=== cat.tasks - -[source,ts] ----- -client.cat.tasks({ - format: string, - node_id: string | string[], - actions: string | string[], - detailed: boolean, - parent_task: number, - h: string | string[], - help: boolean, - s: string | string[], - time: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos', - v: boolean -}) ----- -link:{ref}/tasks.html[Documentation] + -[cols=2*] -|=== -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`node_id` or `nodeId` -|`string \| string[]` - A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes - -|`actions` -|`string \| string[]` - A comma-separated list of actions that should be returned. Leave empty to return all. - -|`detailed` -|`boolean` - Return detailed task information (default: false) - -|`parent_task` or `parentTask` -|`number` - Return tasks with specified parent task id. Set to -1 to return all. - -|`h` -|`string \| string[]` - Comma-separated list of column names to display - -|`help` -|`boolean` - Return help information - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|`time` -|`'d' \| 'h' \| 'm' \| 's' \| 'ms' \| 'micros' \| 'nanos'` - The unit in which to display time values - -|`v` -|`boolean` - Verbose mode. Display column headers - -|=== - -=== cat.templates - -[source,ts] ----- -client.cat.templates({ - name: string, - format: string, - local: boolean, - master_timeout: string, - h: string | string[], - help: boolean, - s: string | string[], - v: boolean -}) ----- -link:{ref}/cat-templates.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - A pattern that returned template names must match - -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`h` -|`string \| string[]` - Comma-separated list of column names to display - -|`help` -|`boolean` - Return help information - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|`v` -|`boolean` - Verbose mode. Display column headers - -|=== - -=== cat.threadPool - -[source,ts] ----- -client.cat.threadPool({ - thread_pool_patterns: string | string[], - format: string, - time: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos', - local: boolean, - master_timeout: string, - h: string | string[], - help: boolean, - s: string | string[], - v: boolean -}) ----- -link:{ref}/cat-thread-pool.html[Documentation] + -[cols=2*] -|=== -|`thread_pool_patterns` or `threadPoolPatterns` -|`string \| string[]` - A comma-separated list of regular-expressions to filter the thread pools in the output - -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`time` -|`'d' \| 'h' \| 'm' \| 's' \| 'ms' \| 'micros' \| 'nanos'` - The unit in which to display time values - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`h` -|`string \| string[]` - Comma-separated list of column names to display - -|`help` -|`boolean` - Return help information - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|`v` -|`boolean` - Verbose mode. Display column headers - -|=== - -=== clearScroll - -[source,ts] ----- -client.clearScroll({ - scroll_id: string | string[], - body: object -}) ----- -link:{ref}/search-request-body.html#_clear_scroll_api[Documentation] + -[cols=2*] -|=== -|`scroll_id` or `scrollId` -|`string \| string[]` - A comma-separated list of scroll IDs to clear + - -WARNING: This parameter has been deprecated. - -|`body` -|`object` - A comma-separated list of scroll IDs to clear if none was specified via the scroll_id parameter - -|=== - -=== cluster.allocationExplain - -[source,ts] ----- -client.cluster.allocationExplain({ - include_yes_decisions: boolean, - include_disk_info: boolean, - body: object -}) ----- -link:{ref}/cluster-allocation-explain.html[Documentation] + -[cols=2*] -|=== -|`include_yes_decisions` or `includeYesDecisions` -|`boolean` - Return 'YES' decisions in explanation (default: false) - -|`include_disk_info` or `includeDiskInfo` -|`boolean` - Return information about disk usage and shard sizes (default: false) - -|`body` -|`object` - The index, shard, and primary flag to explain. Empty means 'explain the first unassigned shard' - -|=== - -=== cluster.deleteComponentTemplate - -[source,ts] ----- -client.cluster.deleteComponentTemplate({ - name: string, - timeout: string, - master_timeout: string -}) ----- -link:{ref}/indices-component-template.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - The name of the template - -|`timeout` -|`string` - Explicit operation timeout - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|=== - -=== cluster.deleteVotingConfigExclusions - -[source,ts] ----- -client.cluster.deleteVotingConfigExclusions({ - wait_for_removal: boolean -}) ----- -link:{ref}/voting-config-exclusions.html[Documentation] + -[cols=2*] -|=== -|`wait_for_removal` or `waitForRemoval` -|`boolean` - Specifies whether to wait for all excluded nodes to be removed from the cluster before clearing the voting configuration exclusions list. + -_Default:_ `true` - -|=== - -=== cluster.existsComponentTemplate - -[source,ts] ----- -client.cluster.existsComponentTemplate({ - name: string, - master_timeout: string, - local: boolean -}) ----- -link:{ref}/indices-component-template.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - The name of the template - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|=== - -=== cluster.getComponentTemplate - -[source,ts] ----- -client.cluster.getComponentTemplate({ - name: string | string[], - master_timeout: string, - local: boolean -}) ----- -link:{ref}/indices-component-template.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string \| string[]` - The comma separated names of the component templates - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|=== - -=== cluster.getSettings - -[source,ts] ----- -client.cluster.getSettings({ - flat_settings: boolean, - master_timeout: string, - timeout: string, - include_defaults: boolean -}) ----- -link:{ref}/cluster-update-settings.html[Documentation] + -[cols=2*] -|=== -|`flat_settings` or `flatSettings` -|`boolean` - Return settings in flat format (default: false) - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`timeout` -|`string` - Explicit operation timeout - -|`include_defaults` or `includeDefaults` -|`boolean` - Whether to return all default clusters setting. - -|=== - -=== cluster.health - -[source,ts] ----- -client.cluster.health({ - index: string | string[], - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - level: 'cluster' | 'indices' | 'shards', - local: boolean, - master_timeout: string, - timeout: string, - wait_for_active_shards: string, - wait_for_nodes: string, - wait_for_events: 'immediate' | 'urgent' | 'high' | 'normal' | 'low' | 'languid', - wait_for_no_relocating_shards: boolean, - wait_for_no_initializing_shards: boolean, - wait_for_status: 'green' | 'yellow' | 'red' -}) ----- -link:{ref}/cluster-health.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - Limit the information returned to a specific index - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `all` - -|`level` -|`'cluster' \| 'indices' \| 'shards'` - Specify the level of detail for returned information + -_Default:_ `cluster` - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`timeout` -|`string` - Explicit operation timeout - -|`wait_for_active_shards` or `waitForActiveShards` -|`string` - Wait until the specified number of shards is active - -|`wait_for_nodes` or `waitForNodes` -|`string` - Wait until the specified number of nodes is available - -|`wait_for_events` or `waitForEvents` -|`'immediate' \| 'urgent' \| 'high' \| 'normal' \| 'low' \| 'languid'` - Wait until all currently queued events with the given priority are processed - -|`wait_for_no_relocating_shards` or `waitForNoRelocatingShards` -|`boolean` - Whether to wait until there are no relocating shards in the cluster - -|`wait_for_no_initializing_shards` or `waitForNoInitializingShards` -|`boolean` - Whether to wait until there are no initializing shards in the cluster - -|`wait_for_status` or `waitForStatus` -|`'green' \| 'yellow' \| 'red'` - Wait until cluster is in a specific state - -|=== - -=== cluster.pendingTasks - -[source,ts] ----- -client.cluster.pendingTasks({ - local: boolean, - master_timeout: string -}) ----- -link:{ref}/cluster-pending.html[Documentation] + -[cols=2*] -|=== -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|=== - -=== cluster.postVotingConfigExclusions - -[source,ts] ----- -client.cluster.postVotingConfigExclusions({ - node_ids: string, - node_names: string, - timeout: string -}) ----- -link:{ref}/voting-config-exclusions.html[Documentation] + -[cols=2*] -|=== -|`node_ids` or `nodeIds` -|`string` - A comma-separated list of the persistent ids of the nodes to exclude from the voting configuration. If specified, you may not also specify ?node_names. - -|`node_names` or `nodeNames` -|`string` - A comma-separated list of the names of the nodes to exclude from the voting configuration. If specified, you may not also specify ?node_ids. - -|`timeout` -|`string` - Explicit operation timeout + -_Default:_ `30s` - -|=== - -=== cluster.putComponentTemplate - -[source,ts] ----- -client.cluster.putComponentTemplate({ - name: string, - create: boolean, - timeout: string, - master_timeout: string, - body: object -}) ----- -link:{ref}/indices-component-template.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - The name of the template - -|`create` -|`boolean` - Whether the index template should only be added if new or can also replace an existing one - -|`timeout` -|`string` - Explicit operation timeout - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|`body` -|`object` - The template definition - -|=== - -=== cluster.putSettings - -[source,ts] ----- -client.cluster.putSettings({ - flat_settings: boolean, - master_timeout: string, - timeout: string, - body: object -}) ----- -link:{ref}/cluster-update-settings.html[Documentation] + -[cols=2*] -|=== -|`flat_settings` or `flatSettings` -|`boolean` - Return settings in flat format (default: false) - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`timeout` -|`string` - Explicit operation timeout - -|`body` -|`object` - The settings to be updated. Can be either `transient` or `persistent` (survives cluster restart). - -|=== - -=== cluster.remoteInfo - -[source,ts] ----- -client.cluster.remoteInfo() ----- -link:{ref}/cluster-remote-info.html[Documentation] + - - -=== cluster.reroute - -[source,ts] ----- -client.cluster.reroute({ - dry_run: boolean, - explain: boolean, - retry_failed: boolean, - metric: string | string[], - master_timeout: string, - timeout: string, - body: object -}) ----- -link:{ref}/cluster-reroute.html[Documentation] + -[cols=2*] -|=== -|`dry_run` or `dryRun` -|`boolean` - Simulate the operation only and return the resulting state - -|`explain` -|`boolean` - Return an explanation of why the commands can or cannot be executed - -|`retry_failed` or `retryFailed` -|`boolean` - Retries allocation of shards that are blocked due to too many subsequent allocation failures - -|`metric` -|`string \| string[]` - Limit the information returned to the specified metrics. Defaults to all but metadata - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`timeout` -|`string` - Explicit operation timeout - -|`body` -|`object` - The definition of `commands` to perform (`move`, `cancel`, `allocate`) - -|=== - -=== cluster.state - -[source,ts] ----- -client.cluster.state({ - index: string | string[], - metric: string | string[], - local: boolean, - master_timeout: string, - flat_settings: boolean, - wait_for_metadata_version: number, - wait_for_timeout: string, - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all' -}) ----- -link:{ref}/cluster-state.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices - -|`metric` -|`string \| string[]` - Limit the information returned to the specified metrics - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|`flat_settings` or `flatSettings` -|`boolean` - Return settings in flat format (default: false) - -|`wait_for_metadata_version` or `waitForMetadataVersion` -|`number` - Wait for the metadata version to be equal or greater than the specified metadata version - -|`wait_for_timeout` or `waitForTimeout` -|`string` - The maximum time to wait for wait_for_metadata_version before timing out - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|=== - -=== cluster.stats - -[source,ts] ----- -client.cluster.stats({ - node_id: string | string[], - flat_settings: boolean, - timeout: string -}) ----- -link:{ref}/cluster-stats.html[Documentation] + -[cols=2*] -|=== -|`node_id` or `nodeId` -|`string \| string[]` - A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes - -|`flat_settings` or `flatSettings` -|`boolean` - Return settings in flat format (default: false) - -|`timeout` -|`string` - Explicit operation timeout - -|=== - -=== count - -[source,ts] ----- -client.count({ - index: string | string[], - ignore_unavailable: boolean, - ignore_throttled: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - min_score: number, - preference: string, - routing: string | string[], - q: string, - analyzer: string, - analyze_wildcard: boolean, - default_operator: 'AND' | 'OR', - df: string, - lenient: boolean, - terminate_after: number, - body: object -}) ----- -link:{ref}/search-count.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of indices to restrict the results - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`ignore_throttled` or `ignoreThrottled` -|`boolean` - Whether specified concrete, expanded or aliased indices should be ignored when throttled - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|`min_score` or `minScore` -|`number` - Include only documents with a specific `_score` value in the result - -|`preference` -|`string` - Specify the node or shard the operation should be performed on (default: random) - -|`routing` -|`string \| string[]` - A comma-separated list of specific routing values - -|`q` -|`string` - Query in the Lucene query string syntax - -|`analyzer` -|`string` - The analyzer to use for the query string - -|`analyze_wildcard` or `analyzeWildcard` -|`boolean` - Specify whether wildcard and prefix queries should be analyzed (default: false) - -|`default_operator` or `defaultOperator` -|`'AND' \| 'OR'` - The default operator for query string query (AND or OR) + -_Default:_ `OR` - -|`df` -|`string` - The field to use as default where no field prefix is given in the query string - -|`lenient` -|`boolean` - Specify whether format-based query failures (such as providing text to a numeric field) should be ignored - -|`terminate_after` or `terminateAfter` -|`number` - The maximum count for each shard, upon reaching which the query execution will terminate early - -|`body` -|`object` - A query to restrict the results specified with the Query DSL (optional) - -|=== - -=== create - -[source,ts] ----- -client.create({ - id: string, - index: string, - type: string, - wait_for_active_shards: string, - refresh: 'true' | 'false' | 'wait_for', - routing: string, - timeout: string, - version: number, - version_type: 'internal' | 'external' | 'external_gte', - pipeline: string, - body: object -}) ----- -link:{ref}/docs-index_.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - Document ID - -|`index` -|`string` - The name of the index - -|`type` -|`string` - The type of the document + - -WARNING: This parameter has been deprecated. - -|`wait_for_active_shards` or `waitForActiveShards` -|`string` - Sets the number of shard copies that must be active before proceeding with the index operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1) - -|`refresh` -|`'true' \| 'false' \| 'wait_for'` - If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then do nothing with refreshes. - -|`routing` -|`string` - Specific routing value - -|`timeout` -|`string` - Explicit operation timeout - -|`version` -|`number` - Explicit version number for concurrency control - -|`version_type` or `versionType` -|`'internal' \| 'external' \| 'external_gte'` - Specific version type - -|`pipeline` -|`string` - The pipeline id to preprocess incoming documents with - -|`body` -|`object` - The document - -|=== - -=== delete - -[source,ts] ----- -client.delete({ - id: string, - index: string, - type: string, - wait_for_active_shards: string, - refresh: 'true' | 'false' | 'wait_for', - routing: string, - timeout: string, - if_seq_no: number, - if_primary_term: number, - version: number, - version_type: 'internal' | 'external' | 'external_gte' -}) ----- -link:{ref}/docs-delete.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The document ID - -|`index` -|`string` - The name of the index - -|`type` -|`string` - The type of the document + - -WARNING: This parameter has been deprecated. - -|`wait_for_active_shards` or `waitForActiveShards` -|`string` - Sets the number of shard copies that must be active before proceeding with the delete operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1) - -|`refresh` -|`'true' \| 'false' \| 'wait_for'` - If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then do nothing with refreshes. - -|`routing` -|`string` - Specific routing value - -|`timeout` -|`string` - Explicit operation timeout - -|`if_seq_no` or `ifSeqNo` -|`number` - only perform the delete operation if the last operation that has changed the document has the specified sequence number - -|`if_primary_term` or `ifPrimaryTerm` -|`number` - only perform the delete operation if the last operation that has changed the document has the specified primary term - -|`version` -|`number` - Explicit version number for concurrency control - -|`version_type` or `versionType` -|`'internal' \| 'external' \| 'external_gte'` - Specific version type - -|=== - -=== deleteByQuery - -[source,ts] ----- -client.deleteByQuery({ - index: string | string[], - analyzer: string, - analyze_wildcard: boolean, - default_operator: 'AND' | 'OR', - df: string, - from: number, - ignore_unavailable: boolean, - allow_no_indices: boolean, - conflicts: 'abort' | 'proceed', - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - lenient: boolean, - preference: string, - q: string, - routing: string | string[], - scroll: string, - search_type: 'query_then_fetch' | 'dfs_query_then_fetch', - search_timeout: string, - max_docs: number, - sort: string | string[], - _source: string | string[], - _source_excludes: string | string[], - _source_includes: string | string[], - terminate_after: number, - stats: string | string[], - version: boolean, - request_cache: boolean, - refresh: boolean, - timeout: string, - wait_for_active_shards: string, - scroll_size: number, - wait_for_completion: boolean, - requests_per_second: number, - slices: number|string, - body: object -}) ----- -link:{ref}/docs-delete-by-query.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices - -|`analyzer` -|`string` - The analyzer to use for the query string - -|`analyze_wildcard` or `analyzeWildcard` -|`boolean` - Specify whether wildcard and prefix queries should be analyzed (default: false) - -|`default_operator` or `defaultOperator` -|`'AND' \| 'OR'` - The default operator for query string query (AND or OR) + -_Default:_ `OR` - -|`df` -|`string` - The field to use as default where no field prefix is given in the query string - -|`from` -|`number` - Starting offset (default: 0) - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`conflicts` -|`'abort' \| 'proceed'` - What to do when the delete by query hits version conflicts? + -_Default:_ `abort` - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|`lenient` -|`boolean` - Specify whether format-based query failures (such as providing text to a numeric field) should be ignored - -|`preference` -|`string` - Specify the node or shard the operation should be performed on (default: random) - -|`q` -|`string` - Query in the Lucene query string syntax - -|`routing` -|`string \| string[]` - A comma-separated list of specific routing values - -|`scroll` -|`string` - Specify how long a consistent view of the index should be maintained for scrolled search - -|`search_type` or `searchType` -|`'query_then_fetch' \| 'dfs_query_then_fetch'` - Search operation type - -|`search_timeout` or `searchTimeout` -|`string` - Explicit timeout for each search request. Defaults to no timeout. - -|`max_docs` or `maxDocs` -|`number` - Maximum number of documents to process (default: all documents) - -|`sort` -|`string \| string[]` - A comma-separated list of : pairs - -|`_source` -|`string \| string[]` - True or false to return the _source field or not, or a list of fields to return - -|`_source_excludes` or `_sourceExcludes` -|`string \| string[]` - A list of fields to exclude from the returned _source field - -|`_source_includes` or `_sourceIncludes` -|`string \| string[]` - A list of fields to extract and return from the _source field - -|`terminate_after` or `terminateAfter` -|`number` - The maximum number of documents to collect for each shard, upon reaching which the query execution will terminate early. - -|`stats` -|`string \| string[]` - Specific 'tag' of the request for logging and statistical purposes - -|`version` -|`boolean` - Specify whether to return document version as part of a hit - -|`request_cache` or `requestCache` -|`boolean` - Specify if request cache should be used for this request or not, defaults to index level setting - -|`refresh` -|`boolean` - Should the affected indexes be refreshed? - -|`timeout` -|`string` - Time each individual bulk request should wait for shards that are unavailable. + -_Default:_ `1m` - -|`wait_for_active_shards` or `waitForActiveShards` -|`string` - Sets the number of shard copies that must be active before proceeding with the delete by query operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1) - -|`scroll_size` or `scrollSize` -|`number` - Size on the scroll request powering the delete by query + -_Default:_ `100` - -|`wait_for_completion` or `waitForCompletion` -|`boolean` - Should the request should block until the delete by query is complete. + -_Default:_ `true` - -|`requests_per_second` or `requestsPerSecond` -|`number` - The throttle for this request in sub-requests per second. -1 means no throttle. - -|`slices` -|`number\|string` - The number of slices this task should be divided into. Defaults to 1, meaning the task isn't sliced into subtasks. Can be set to `auto`. + -_Default:_ `1` - -|`body` -|`object` - The search definition using the Query DSL - -|=== - -=== deleteByQueryRethrottle - -[source,ts] ----- -client.deleteByQueryRethrottle({ - task_id: string, - requests_per_second: number -}) ----- -link:{ref}/docs-delete-by-query.html[Documentation] + -[cols=2*] -|=== -|`task_id` or `taskId` -|`string` - The task id to rethrottle - -|`requests_per_second` or `requestsPerSecond` -|`number` - The throttle to set on this request in floating sub-requests per second. -1 means set no throttle. - -|=== - -=== deleteScript - -[source,ts] ----- -client.deleteScript({ - id: string, - timeout: string, - master_timeout: string -}) ----- -link:{ref}/modules-scripting.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - Script ID - -|`timeout` -|`string` - Explicit operation timeout - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|=== - -=== exists - -[source,ts] ----- -client.exists({ - id: string, - index: string, - stored_fields: string | string[], - preference: string, - realtime: boolean, - refresh: boolean, - routing: string, - _source: string | string[], - _source_excludes: string | string[], - _source_includes: string | string[], - version: number, - version_type: 'internal' | 'external' | 'external_gte' -}) ----- -link:{ref}/docs-get.html[Documentation] + -{jsclient}/exists_examples.html[Code Example] + -[cols=2*] -|=== -|`id` -|`string` - The document ID - -|`index` -|`string` - The name of the index - -|`stored_fields` or `storedFields` -|`string \| string[]` - A comma-separated list of stored fields to return in the response - -|`preference` -|`string` - Specify the node or shard the operation should be performed on (default: random) - -|`realtime` -|`boolean` - Specify whether to perform the operation in realtime or search mode - -|`refresh` -|`boolean` - Refresh the shard containing the document before performing the operation - -|`routing` -|`string` - Specific routing value - -|`_source` -|`string \| string[]` - True or false to return the _source field or not, or a list of fields to return - -|`_source_excludes` or `_sourceExcludes` -|`string \| string[]` - A list of fields to exclude from the returned _source field - -|`_source_includes` or `_sourceIncludes` -|`string \| string[]` - A list of fields to extract and return from the _source field - -|`version` -|`number` - Explicit version number for concurrency control - -|`version_type` or `versionType` -|`'internal' \| 'external' \| 'external_gte'` - Specific version type - -|=== - -=== existsSource - -[source,ts] ----- -client.existsSource({ - id: string, - index: string, - type: string, - preference: string, - realtime: boolean, - refresh: boolean, - routing: string, - _source: string | string[], - _source_excludes: string | string[], - _source_includes: string | string[], - version: number, - version_type: 'internal' | 'external' | 'external_gte' -}) ----- -link:{ref}/docs-get.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The document ID - -|`index` -|`string` - The name of the index - -|`type` -|`string` - The type of the document; deprecated and optional starting with 7.0 + - -WARNING: This parameter has been deprecated. - -|`preference` -|`string` - Specify the node or shard the operation should be performed on (default: random) - -|`realtime` -|`boolean` - Specify whether to perform the operation in realtime or search mode - -|`refresh` -|`boolean` - Refresh the shard containing the document before performing the operation - -|`routing` -|`string` - Specific routing value - -|`_source` -|`string \| string[]` - True or false to return the _source field or not, or a list of fields to return - -|`_source_excludes` or `_sourceExcludes` -|`string \| string[]` - A list of fields to exclude from the returned _source field - -|`_source_includes` or `_sourceIncludes` -|`string \| string[]` - A list of fields to extract and return from the _source field - -|`version` -|`number` - Explicit version number for concurrency control - -|`version_type` or `versionType` -|`'internal' \| 'external' \| 'external_gte'` - Specific version type - -|=== - -=== explain - -[source,ts] ----- -client.explain({ - id: string, - index: string, - analyze_wildcard: boolean, - analyzer: string, - default_operator: 'AND' | 'OR', - df: string, - stored_fields: string | string[], - lenient: boolean, - preference: string, - q: string, - routing: string, - _source: string | string[], - _source_excludes: string | string[], - _source_includes: string | string[], - body: object -}) ----- -link:{ref}/search-explain.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The document ID - -|`index` -|`string` - The name of the index - -|`analyze_wildcard` or `analyzeWildcard` -|`boolean` - Specify whether wildcards and prefix queries in the query string query should be analyzed (default: false) - -|`analyzer` -|`string` - The analyzer for the query string query - -|`default_operator` or `defaultOperator` -|`'AND' \| 'OR'` - The default operator for query string query (AND or OR) + -_Default:_ `OR` - -|`df` -|`string` - The default field for query string query (default: _all) - -|`stored_fields` or `storedFields` -|`string \| string[]` - A comma-separated list of stored fields to return in the response - -|`lenient` -|`boolean` - Specify whether format-based query failures (such as providing text to a numeric field) should be ignored - -|`preference` -|`string` - Specify the node or shard the operation should be performed on (default: random) - -|`q` -|`string` - Query in the Lucene query string syntax - -|`routing` -|`string` - Specific routing value - -|`_source` -|`string \| string[]` - True or false to return the _source field or not, or a list of fields to return - -|`_source_excludes` or `_sourceExcludes` -|`string \| string[]` - A list of fields to exclude from the returned _source field - -|`_source_includes` or `_sourceIncludes` -|`string \| string[]` - A list of fields to extract and return from the _source field - -|`body` -|`object` - The query definition using the Query DSL - -|=== - -=== fieldCaps - -[source,ts] ----- -client.fieldCaps({ - index: string | string[], - fields: string | string[], - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - include_unmapped: boolean -}) ----- -link:{ref}/search-field-caps.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices - -|`fields` -|`string \| string[]` - A comma-separated list of field names - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|`include_unmapped` or `includeUnmapped` -|`boolean` - Indicates whether unmapped fields should be included in the response. - -|=== - -=== get - -[source,ts] ----- -client.get({ - id: string, - index: string, - stored_fields: string | string[], - preference: string, - realtime: boolean, - refresh: boolean, - routing: string, - _source: string | string[], - _source_excludes: string | string[], - _source_includes: string | string[], - version: number, - version_type: 'internal' | 'external' | 'external_gte' -}) ----- -link:{ref}/docs-get.html[Documentation] + -{jsclient}/get_examples.html[Code Example] + -[cols=2*] -|=== -|`id` -|`string` - The document ID - -|`index` -|`string` - The name of the index - -|`stored_fields` or `storedFields` -|`string \| string[]` - A comma-separated list of stored fields to return in the response - -|`preference` -|`string` - Specify the node or shard the operation should be performed on (default: random) - -|`realtime` -|`boolean` - Specify whether to perform the operation in realtime or search mode - -|`refresh` -|`boolean` - Refresh the shard containing the document before performing the operation - -|`routing` -|`string` - Specific routing value - -|`_source` -|`string \| string[]` - True or false to return the _source field or not, or a list of fields to return - -|`_source_excludes` or `_sourceExcludes` -|`string \| string[]` - A list of fields to exclude from the returned _source field - -|`_source_includes` or `_sourceIncludes` -|`string \| string[]` - A list of fields to extract and return from the _source field - -|`version` -|`number` - Explicit version number for concurrency control - -|`version_type` or `versionType` -|`'internal' \| 'external' \| 'external_gte'` - Specific version type - -|=== - -=== getScript - -[source,ts] ----- -client.getScript({ - id: string, - master_timeout: string -}) ----- -link:{ref}/modules-scripting.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - Script ID - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|=== - -=== getScriptContext -*Stability:* experimental -[source,ts] ----- -client.getScriptContext() ----- -link:https://www.elastic.co/guide/en/elasticsearch/painless/master/painless-contexts.html[Documentation] + - - -=== getScriptLanguages -*Stability:* experimental -[source,ts] ----- -client.getScriptLanguages() ----- -link:{ref}/modules-scripting.html[Documentation] + - - -=== getSource - -[source,ts] ----- -client.getSource({ - id: string, - index: string, - preference: string, - realtime: boolean, - refresh: boolean, - routing: string, - _source: string | string[], - _source_excludes: string | string[], - _source_includes: string | string[], - version: number, - version_type: 'internal' | 'external' | 'external_gte' -}) ----- -link:{ref}/docs-get.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The document ID - -|`index` -|`string` - The name of the index - -|`preference` -|`string` - Specify the node or shard the operation should be performed on (default: random) - -|`realtime` -|`boolean` - Specify whether to perform the operation in realtime or search mode - -|`refresh` -|`boolean` - Refresh the shard containing the document before performing the operation - -|`routing` -|`string` - Specific routing value - -|`_source` -|`string \| string[]` - True or false to return the _source field or not, or a list of fields to return - -|`_source_excludes` or `_sourceExcludes` -|`string \| string[]` - A list of fields to exclude from the returned _source field - -|`_source_includes` or `_sourceIncludes` -|`string \| string[]` - A list of fields to extract and return from the _source field - -|`version` -|`number` - Explicit version number for concurrency control - -|`version_type` or `versionType` -|`'internal' \| 'external' \| 'external_gte'` - Specific version type - -|=== - -=== index - -[source,ts] ----- -client.index({ - id: string, - index: string, - wait_for_active_shards: string, - op_type: 'index' | 'create', - refresh: 'true' | 'false' | 'wait_for', - routing: string, - timeout: string, - version: number, - version_type: 'internal' | 'external' | 'external_gte', - if_seq_no: number, - if_primary_term: number, - pipeline: string, - body: object -}) ----- -link:{ref}/docs-index_.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - Document ID - -|`index` -|`string` - The name of the index - -|`wait_for_active_shards` or `waitForActiveShards` -|`string` - Sets the number of shard copies that must be active before proceeding with the index operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1) - -|`op_type` or `opType` -|`'index' \| 'create'` - Explicit operation type. Defaults to `index` for requests with an explicit document ID, and to `create`for requests without an explicit document ID - -|`refresh` -|`'true' \| 'false' \| 'wait_for'` - If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then do nothing with refreshes. - -|`routing` -|`string` - Specific routing value - -|`timeout` -|`string` - Explicit operation timeout - -|`version` -|`number` - Explicit version number for concurrency control - -|`version_type` or `versionType` -|`'internal' \| 'external' \| 'external_gte'` - Specific version type - -|`if_seq_no` or `ifSeqNo` -|`number` - only perform the index operation if the last operation that has changed the document has the specified sequence number - -|`if_primary_term` or `ifPrimaryTerm` -|`number` - only perform the index operation if the last operation that has changed the document has the specified primary term - -|`pipeline` -|`string` - The pipeline id to preprocess incoming documents with - -|`body` -|`object` - The document - -|=== - -=== indices.analyze - -[source,ts] ----- -client.indices.analyze({ - index: string, - body: object -}) ----- -link:{ref}/indices-analyze.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string` - The name of the index to scope the operation - -|`body` -|`object` - Define analyzer/tokenizer parameters and the text on which the analysis should be performed - -|=== - -=== indices.clearCache - -[source,ts] ----- -client.indices.clearCache({ - index: string | string[], - fielddata: boolean, - fields: string | string[], - query: boolean, - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - request: boolean -}) ----- -link:{ref}/indices-clearcache.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index name to limit the operation - -|`fielddata` -|`boolean` - Clear field data - -|`fields` -|`string \| string[]` - A comma-separated list of fields to clear when using the `fielddata` parameter (default: all) - -|`query` -|`boolean` - Clear query caches - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|`request` -|`boolean` - Clear request cache - -|=== - -=== indices.clone - -[source,ts] ----- -client.indices.clone({ - index: string, - target: string, - timeout: string, - master_timeout: string, - wait_for_active_shards: string, - body: object -}) ----- -link:{ref}/indices-clone-index.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string` - The name of the source index to clone - -|`target` -|`string` - The name of the target index to clone into - -|`timeout` -|`string` - Explicit operation timeout - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|`wait_for_active_shards` or `waitForActiveShards` -|`string` - Set the number of active shards to wait for on the cloned index before the operation returns. - -|`body` -|`object` - The configuration for the target index (`settings` and `aliases`) - -|=== - -=== indices.close - -[source,ts] ----- -client.indices.close({ - index: string | string[], - timeout: string, - master_timeout: string, - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - wait_for_active_shards: string -}) ----- -link:{ref}/indices-open-close.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma separated list of indices to close - -|`timeout` -|`string` - Explicit operation timeout - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|`wait_for_active_shards` or `waitForActiveShards` -|`string` - Sets the number of active shards to wait for before the operation returns. - -|=== - -=== indices.create - -[source,ts] ----- -client.indices.create({ - index: string, - wait_for_active_shards: string, - timeout: string, - master_timeout: string, - body: object -}) ----- -link:{ref}/indices-create-index.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string` - The name of the index - -|`wait_for_active_shards` or `waitForActiveShards` -|`string` - Set the number of active shards to wait for before the operation returns. - -|`timeout` -|`string` - Explicit operation timeout - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|`body` -|`object` - The configuration for the index (`settings` and `mappings`) - -|=== - -=== indices.createDataStream -*Stability:* experimental -[source,ts] ----- -client.indices.createDataStream({ - name: string, - body: object -}) ----- -link:{ref}/data-streams.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - The name of the data stream - -|`body` -|`object` - The data stream definition - -|=== - -=== indices.delete - -[source,ts] ----- -client.indices.delete({ - index: string | string[], - timeout: string, - master_timeout: string, - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all' -}) ----- -link:{ref}/indices-delete-index.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of indices to delete; use `_all` or `*` string to delete all indices - -|`timeout` -|`string` - Explicit operation timeout - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Ignore unavailable indexes (default: false) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Ignore if a wildcard expression resolves to no concrete indices (default: false) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether wildcard expressions should get expanded to open or closed indices (default: open) + -_Default:_ `open` - -|=== - -=== indices.deleteAlias - -[source,ts] ----- -client.indices.deleteAlias({ - index: string | string[], - name: string | string[], - timeout: string, - master_timeout: string -}) ----- -link:{ref}/indices-aliases.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names (supports wildcards); use `_all` for all indices - -|`name` -|`string \| string[]` - A comma-separated list of aliases to delete (supports wildcards); use `_all` to delete all aliases for the specified indices. - -|`timeout` -|`string` - Explicit timestamp for the document - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|=== - -=== indices.deleteDataStream -*Stability:* experimental -[source,ts] ----- -client.indices.deleteDataStream({ - name: string -}) ----- -link:{ref}/data-streams.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - The name of the data stream - -|=== - -=== indices.deleteIndexTemplate - -[source,ts] ----- -client.indices.deleteIndexTemplate({ - name: string, - timeout: string, - master_timeout: string -}) ----- -link:{ref}/indices-templates.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - The name of the template - -|`timeout` -|`string` - Explicit operation timeout - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|=== - -=== indices.deleteTemplate - -[source,ts] ----- -client.indices.deleteTemplate({ - name: string, - timeout: string, - master_timeout: string -}) ----- -link:{ref}/indices-templates.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - The name of the template - -|`timeout` -|`string` - Explicit operation timeout - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|=== - -=== indices.exists - -[source,ts] ----- -client.indices.exists({ - index: string | string[], - local: boolean, - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - flat_settings: boolean, - include_defaults: boolean -}) ----- -link:{ref}/indices-exists.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Ignore unavailable indexes (default: false) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Ignore if a wildcard expression resolves to no concrete indices (default: false) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether wildcard expressions should get expanded to open or closed indices (default: open) + -_Default:_ `open` - -|`flat_settings` or `flatSettings` -|`boolean` - Return settings in flat format (default: false) - -|`include_defaults` or `includeDefaults` -|`boolean` - Whether to return all default setting for each of the indices. - -|=== - -=== indices.existsAlias - -[source,ts] ----- -client.indices.existsAlias({ - name: string | string[], - index: string | string[], - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - local: boolean -}) ----- -link:{ref}/indices-aliases.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string \| string[]` - A comma-separated list of alias names to return - -|`index` -|`string \| string[]` - A comma-separated list of index names to filter aliases - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `all` - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|=== - -=== indices.existsIndexTemplate - -[source,ts] ----- -client.indices.existsIndexTemplate({ - name: string, - flat_settings: boolean, - master_timeout: string, - local: boolean -}) ----- -link:{ref}/indices-templates.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - The name of the template - -|`flat_settings` or `flatSettings` -|`boolean` - Return settings in flat format (default: false) - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|=== - -=== indices.existsTemplate - -[source,ts] ----- -client.indices.existsTemplate({ - name: string | string[], - flat_settings: boolean, - master_timeout: string, - local: boolean -}) ----- -link:{ref}/indices-templates.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string \| string[]` - The comma separated names of the index templates - -|`flat_settings` or `flatSettings` -|`boolean` - Return settings in flat format (default: false) - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|=== - -=== indices.existsType - -[source,ts] ----- -client.indices.existsType({ - index: string | string[], - type: string | string[], - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - local: boolean -}) ----- -link:{ref}/indices-types-exists.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names; use `_all` to check the types across all indices - -|`type` -|`string \| string[]` - A comma-separated list of document types to check - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|=== - -=== indices.flush - -[source,ts] ----- -client.indices.flush({ - index: string | string[], - force: boolean, - wait_if_ongoing: boolean, - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all' -}) ----- -link:{ref}/indices-flush.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names; use `_all` or empty string for all indices - -|`force` -|`boolean` - Whether a flush should be forced even if it is not necessarily needed ie. if no changes will be committed to the index. This is useful if transaction log IDs should be incremented even if no uncommitted changes are present. (This setting can be considered as internal) - -|`wait_if_ongoing` or `waitIfOngoing` -|`boolean` - If set to true the flush operation will block until the flush can be executed if another flush operation is already executing. The default is true. If set to false the flush will be skipped iff if another flush operation is already running. - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|=== - -=== indices.forcemerge - -[source,ts] ----- -client.indices.forcemerge({ - index: string | string[], - flush: boolean, - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - max_num_segments: number, - only_expunge_deletes: boolean -}) ----- -link:{ref}/indices-forcemerge.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices - -|`flush` -|`boolean` - Specify whether the index should be flushed after performing the operation (default: true) - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|`max_num_segments` or `maxNumSegments` -|`number` - The number of segments the index should be merged into (default: dynamic) - -|`only_expunge_deletes` or `onlyExpungeDeletes` -|`boolean` - Specify whether the operation should only expunge deleted documents - -|=== - -=== indices.get - -[source,ts] ----- -client.indices.get({ - index: string | string[], - local: boolean, - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - flat_settings: boolean, - include_defaults: boolean, - master_timeout: string -}) ----- -link:{ref}/indices-get-index.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Ignore unavailable indexes (default: false) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Ignore if a wildcard expression resolves to no concrete indices (default: false) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether wildcard expressions should get expanded to open or closed indices (default: open) + -_Default:_ `open` - -|`flat_settings` or `flatSettings` -|`boolean` - Return settings in flat format (default: false) - -|`include_defaults` or `includeDefaults` -|`boolean` - Whether to return all default setting for each of the indices. - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|=== - -=== indices.getAlias - -[source,ts] ----- -client.indices.getAlias({ - name: string | string[], - index: string | string[], - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - local: boolean -}) ----- -link:{ref}/indices-aliases.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string \| string[]` - A comma-separated list of alias names to return - -|`index` -|`string \| string[]` - A comma-separated list of index names to filter aliases - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `all` - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|=== - -=== indices.getDataStream -*Stability:* experimental -[source,ts] ----- -client.indices.getDataStream({ - name: string -}) ----- -link:{ref}/data-streams.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - The name or wildcard expression of the requested data streams - -|=== - -=== indices.getFieldMapping - -[source,ts] ----- -client.indices.getFieldMapping({ - fields: string | string[], - index: string | string[], - include_defaults: boolean, - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - local: boolean -}) ----- -link:{ref}/indices-get-field-mapping.html[Documentation] + -[cols=2*] -|=== -|`fields` -|`string \| string[]` - A comma-separated list of fields - -|`index` -|`string \| string[]` - A comma-separated list of index names - -|`include_defaults` or `includeDefaults` -|`boolean` - Whether the default mapping values should be returned as well - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|=== - -=== indices.getIndexTemplate - -[source,ts] ----- -client.indices.getIndexTemplate({ - name: string | string[], - flat_settings: boolean, - master_timeout: string, - local: boolean -}) ----- -link:{ref}/indices-templates.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string \| string[]` - The comma separated names of the index templates - -|`flat_settings` or `flatSettings` -|`boolean` - Return settings in flat format (default: false) - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|=== - -=== indices.getMapping - -[source,ts] ----- -client.indices.getMapping({ - index: string | string[], - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - master_timeout: string, - local: boolean -}) ----- -link:{ref}/indices-get-mapping.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) + - -WARNING: This parameter has been deprecated. - -|=== - -=== indices.getSettings - -[source,ts] ----- -client.indices.getSettings({ - index: string | string[], - name: string | string[], - master_timeout: string, - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - flat_settings: boolean, - local: boolean, - include_defaults: boolean -}) ----- -link:{ref}/indices-get-settings.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices - -|`name` -|`string \| string[]` - The name of the settings that should be included - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `all` - -|`flat_settings` or `flatSettings` -|`boolean` - Return settings in flat format (default: false) - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|`include_defaults` or `includeDefaults` -|`boolean` - Whether to return all default setting for each of the indices. - -|=== - -=== indices.getTemplate - -[source,ts] ----- -client.indices.getTemplate({ - name: string | string[], - flat_settings: boolean, - master_timeout: string, - local: boolean -}) ----- -link:{ref}/indices-templates.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string \| string[]` - The comma separated names of the index templates - -|`flat_settings` or `flatSettings` -|`boolean` - Return settings in flat format (default: false) - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|=== - -=== indices.getUpgrade - -[source,ts] ----- -client.indices.getUpgrade({ - index: string | string[], - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all' -}) ----- -link:{ref}/indices-upgrade.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|=== - -=== indices.open - -[source,ts] ----- -client.indices.open({ - index: string | string[], - timeout: string, - master_timeout: string, - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - wait_for_active_shards: string -}) ----- -link:{ref}/indices-open-close.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma separated list of indices to open - -|`timeout` -|`string` - Explicit operation timeout - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `closed` - -|`wait_for_active_shards` or `waitForActiveShards` -|`string` - Sets the number of active shards to wait for before the operation returns. - -|=== - -=== indices.putAlias - -[source,ts] ----- -client.indices.putAlias({ - index: string | string[], - name: string, - timeout: string, - master_timeout: string, - body: object -}) ----- -link:{ref}/indices-aliases.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names the alias should point to (supports wildcards); use `_all` to perform the operation on all indices. - -|`name` -|`string` - The name of the alias to be created or updated - -|`timeout` -|`string` - Explicit timestamp for the document - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|`body` -|`object` - The settings for the alias, such as `routing` or `filter` - -|=== - -=== indices.putIndexTemplate - -[source,ts] ----- -client.indices.putIndexTemplate({ - name: string, - create: boolean, - cause: string, - master_timeout: string, - body: object -}) ----- -link:{ref}/indices-templates.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - The name of the template - -|`create` -|`boolean` - Whether the index template should only be added if new or can also replace an existing one - -|`cause` -|`string` - User defined reason for creating/updating the index template - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|`body` -|`object` - The template definition - -|=== - -=== indices.putMapping - -[source,ts] ----- -client.indices.putMapping({ - index: string | string[], - timeout: string, - master_timeout: string, - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - body: object -}) ----- -link:{ref}/indices-put-mapping.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names the mapping should be added to (supports wildcards); use `_all` or omit to add the mapping on all indices. - -|`timeout` -|`string` - Explicit operation timeout - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|`body` -|`object` - The mapping definition - -|=== - -=== indices.putSettings - -[source,ts] ----- -client.indices.putSettings({ - index: string | string[], - master_timeout: string, - timeout: string, - preserve_existing: boolean, - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - flat_settings: boolean, - body: object -}) ----- -link:{ref}/indices-update-settings.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|`timeout` -|`string` - Explicit operation timeout - -|`preserve_existing` or `preserveExisting` -|`boolean` - Whether to update existing settings. If set to `true` existing settings on an index remain unchanged, the default is `false` - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|`flat_settings` or `flatSettings` -|`boolean` - Return settings in flat format (default: false) - -|`body` -|`object` - The index settings to be updated - -|=== - -=== indices.putTemplate - -[source,ts] ----- -client.indices.putTemplate({ - name: string, - order: number, - create: boolean, - master_timeout: string, - body: object -}) ----- -link:{ref}/indices-templates.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - The name of the template - -|`order` -|`number` - The order for this template when merging multiple matching ones (higher numbers are merged later, overriding the lower numbers) - -|`create` -|`boolean` - Whether the index template should only be added if new or can also replace an existing one - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|`body` -|`object` - The template definition - -|=== - -=== indices.recovery - -[source,ts] ----- -client.indices.recovery({ - index: string | string[], - detailed: boolean, - active_only: boolean -}) ----- -link:{ref}/indices-recovery.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices - -|`detailed` -|`boolean` - Whether to display detailed information about shard recovery - -|`active_only` or `activeOnly` -|`boolean` - Display only those recoveries that are currently on-going - -|=== - -=== indices.refresh - -[source,ts] ----- -client.indices.refresh({ - index: string | string[], - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all' -}) ----- -link:{ref}/indices-refresh.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|=== - -=== indices.rollover - -[source,ts] ----- -client.indices.rollover({ - alias: string, - new_index: string, - timeout: string, - dry_run: boolean, - master_timeout: string, - wait_for_active_shards: string, - body: object -}) ----- -link:{ref}/indices-rollover-index.html[Documentation] + -[cols=2*] -|=== -|`alias` -|`string` - The name of the alias to rollover - -|`new_index` or `newIndex` -|`string` - The name of the rollover index - -|`timeout` -|`string` - Explicit operation timeout - -|`dry_run` or `dryRun` -|`boolean` - If set to true the rollover action will only be validated but not actually performed even if a condition matches. The default is false - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|`wait_for_active_shards` or `waitForActiveShards` -|`string` - Set the number of active shards to wait for on the newly created rollover index before the operation returns. - -|`body` -|`object` - The conditions that needs to be met for executing rollover - -|=== - -=== indices.segments - -[source,ts] ----- -client.indices.segments({ - index: string | string[], - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - verbose: boolean -}) ----- -link:{ref}/indices-segments.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|`verbose` -|`boolean` - Includes detailed memory usage by Lucene. - -|=== - -=== indices.shardStores - -[source,ts] ----- -client.indices.shardStores({ - index: string | string[], - status: string | string[], - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all' -}) ----- -link:{ref}/indices-shards-stores.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices - -|`status` -|`string \| string[]` - A comma-separated list of statuses used to filter on shards to get store information for - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|=== - -=== indices.shrink - -[source,ts] ----- -client.indices.shrink({ - index: string, - target: string, - timeout: string, - master_timeout: string, - wait_for_active_shards: string, - body: object -}) ----- -link:{ref}/indices-shrink-index.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string` - The name of the source index to shrink - -|`target` -|`string` - The name of the target index to shrink into - -|`timeout` -|`string` - Explicit operation timeout - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|`wait_for_active_shards` or `waitForActiveShards` -|`string` - Set the number of active shards to wait for on the shrunken index before the operation returns. - -|`body` -|`object` - The configuration for the target index (`settings` and `aliases`) - -|=== - -=== indices.simulateIndexTemplate - -[source,ts] ----- -client.indices.simulateIndexTemplate({ - name: string, - create: boolean, - cause: string, - master_timeout: string, - body: object -}) ----- -link:{ref}/indices-templates.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - The name of the index (it must be a concrete index name) - -|`create` -|`boolean` - Whether the index template we optionally defined in the body should only be dry-run added if new or can also replace an existing one - -|`cause` -|`string` - User defined reason for dry-run creating the new template for simulation purposes - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|`body` -|`object` - New index template definition, which will be included in the simulation, as if it already exists in the system - -|=== - -=== indices.simulateTemplate - -[source,ts] ----- -client.indices.simulateTemplate({ - name: string, - create: boolean, - cause: string, - master_timeout: string, - body: object -}) ----- -link:{ref}/indices-templates.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - The name of the index template - -|`create` -|`boolean` - Whether the index template we optionally defined in the body should only be dry-run added if new or can also replace an existing one - -|`cause` -|`string` - User defined reason for dry-run creating the new template for simulation purposes - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|`body` -|`object` - New index template definition to be simulated, if no index template name is specified - -|=== - -=== indices.split - -[source,ts] ----- -client.indices.split({ - index: string, - target: string, - timeout: string, - master_timeout: string, - wait_for_active_shards: string, - body: object -}) ----- -link:{ref}/indices-split-index.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string` - The name of the source index to split - -|`target` -|`string` - The name of the target index to split into - -|`timeout` -|`string` - Explicit operation timeout - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|`wait_for_active_shards` or `waitForActiveShards` -|`string` - Set the number of active shards to wait for on the shrunken index before the operation returns. - -|`body` -|`object` - The configuration for the target index (`settings` and `aliases`) - -|=== - -=== indices.stats - -[source,ts] ----- -client.indices.stats({ - metric: string | string[], - index: string | string[], - completion_fields: string | string[], - fielddata_fields: string | string[], - fields: string | string[], - groups: string | string[], - level: 'cluster' | 'indices' | 'shards', - types: string | string[], - include_segment_file_sizes: boolean, - include_unloaded_segments: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - forbid_closed_indices: boolean -}) ----- -link:{ref}/indices-stats.html[Documentation] + -[cols=2*] -|=== -|`metric` -|`string \| string[]` - Limit the information returned the specific metrics. - -|`index` -|`string \| string[]` - A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices - -|`completion_fields` or `completionFields` -|`string \| string[]` - A comma-separated list of fields for `fielddata` and `suggest` index metric (supports wildcards) - -|`fielddata_fields` or `fielddataFields` -|`string \| string[]` - A comma-separated list of fields for `fielddata` index metric (supports wildcards) - -|`fields` -|`string \| string[]` - A comma-separated list of fields for `fielddata` and `completion` index metric (supports wildcards) - -|`groups` -|`string \| string[]` - A comma-separated list of search groups for `search` index metric - -|`level` -|`'cluster' \| 'indices' \| 'shards'` - Return stats aggregated at cluster, index or shard level + -_Default:_ `indices` - -|`types` -|`string \| string[]` - A comma-separated list of document types for the `indexing` index metric - -|`include_segment_file_sizes` or `includeSegmentFileSizes` -|`boolean` - Whether to report the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested) - -|`include_unloaded_segments` or `includeUnloadedSegments` -|`boolean` - If set to true segment stats will include stats for segments that are not currently loaded into memory - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|`forbid_closed_indices` or `forbidClosedIndices` -|`boolean` - If set to false stats will also collected from closed indices if explicitly specified or if expand_wildcards expands to closed indices + -_Default:_ `true` - -|=== - -=== indices.updateAliases - -[source,ts] ----- -client.indices.updateAliases({ - timeout: string, - master_timeout: string, - body: object -}) ----- -link:{ref}/indices-aliases.html[Documentation] + -[cols=2*] -|=== -|`timeout` -|`string` - Request timeout - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|`body` -|`object` - The definition of `actions` to perform - -|=== - -=== indices.upgrade - -[source,ts] ----- -client.indices.upgrade({ - index: string | string[], - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - ignore_unavailable: boolean, - wait_for_completion: boolean, - only_ancient_segments: boolean -}) ----- -link:{ref}/indices-upgrade.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`wait_for_completion` or `waitForCompletion` -|`boolean` - Specify whether the request should block until the all segments are upgraded (default: false) - -|`only_ancient_segments` or `onlyAncientSegments` -|`boolean` - If true, only ancient (an older Lucene major release) segments will be upgraded - -|=== - -=== indices.validateQuery - -[source,ts] ----- -client.indices.validateQuery({ - index: string | string[], - type: string | string[], - explain: boolean, - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - q: string, - analyzer: string, - analyze_wildcard: boolean, - default_operator: 'AND' | 'OR', - df: string, - lenient: boolean, - rewrite: boolean, - all_shards: boolean, - body: object -}) ----- -link:{ref}/search-validate.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names to restrict the operation; use `_all` or empty string to perform the operation on all indices - -|`type` -|`string \| string[]` - A comma-separated list of document types to restrict the operation; leave empty to perform the operation on all types + - -WARNING: This parameter has been deprecated. - -|`explain` -|`boolean` - Return detailed information about the error - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|`q` -|`string` - Query in the Lucene query string syntax - -|`analyzer` -|`string` - The analyzer to use for the query string - -|`analyze_wildcard` or `analyzeWildcard` -|`boolean` - Specify whether wildcard and prefix queries should be analyzed (default: false) - -|`default_operator` or `defaultOperator` -|`'AND' \| 'OR'` - The default operator for query string query (AND or OR) + -_Default:_ `OR` - -|`df` -|`string` - The field to use as default where no field prefix is given in the query string - -|`lenient` -|`boolean` - Specify whether format-based query failures (such as providing text to a numeric field) should be ignored - -|`rewrite` -|`boolean` - Provide a more detailed explanation showing the actual Lucene query that will be executed. - -|`all_shards` or `allShards` -|`boolean` - Execute validation on all shards instead of one random shard per index - -|`body` -|`object` - The query definition specified with the Query DSL - -|=== - -=== info - -[source,ts] ----- -client.info() ----- -link:{ref}/index.html[Documentation] + - - -=== ingest.deletePipeline - -[source,ts] ----- -client.ingest.deletePipeline({ - id: string, - master_timeout: string, - timeout: string -}) ----- -link:{ref}/delete-pipeline-api.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - Pipeline ID - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`timeout` -|`string` - Explicit operation timeout - -|=== - -=== ingest.getPipeline - -[source,ts] ----- -client.ingest.getPipeline({ - id: string, - master_timeout: string -}) ----- -link:{ref}/get-pipeline-api.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - Comma separated list of pipeline ids. Wildcards supported - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|=== - -=== ingest.processorGrok - -[source,ts] ----- -client.ingest.processorGrok() ----- -link:{ref}/grok-processor.html#grok-processor-rest-get[Documentation] + - - -=== ingest.putPipeline - -[source,ts] ----- -client.ingest.putPipeline({ - id: string, - master_timeout: string, - timeout: string, - body: object -}) ----- -link:{ref}/put-pipeline-api.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - Pipeline ID - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`timeout` -|`string` - Explicit operation timeout - -|`body` -|`object` - The ingest definition - -|=== - -=== ingest.simulate - -[source,ts] ----- -client.ingest.simulate({ - id: string, - verbose: boolean, - body: object -}) ----- -link:{ref}/simulate-pipeline-api.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - Pipeline ID - -|`verbose` -|`boolean` - Verbose mode. Display data output for each processor in executed pipeline - -|`body` -|`object` - The simulate definition - -|=== - -=== mget - -[source,ts] ----- -client.mget({ - index: string, - stored_fields: string | string[], - preference: string, - realtime: boolean, - refresh: boolean, - routing: string, - _source: string | string[], - _source_excludes: string | string[], - _source_includes: string | string[], - body: object -}) ----- -link:{ref}/docs-multi-get.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string` - The name of the index - -|`stored_fields` or `storedFields` -|`string \| string[]` - A comma-separated list of stored fields to return in the response - -|`preference` -|`string` - Specify the node or shard the operation should be performed on (default: random) - -|`realtime` -|`boolean` - Specify whether to perform the operation in realtime or search mode - -|`refresh` -|`boolean` - Refresh the shard containing the document before performing the operation - -|`routing` -|`string` - Specific routing value - -|`_source` -|`string \| string[]` - True or false to return the _source field or not, or a list of fields to return - -|`_source_excludes` or `_sourceExcludes` -|`string \| string[]` - A list of fields to exclude from the returned _source field - -|`_source_includes` or `_sourceIncludes` -|`string \| string[]` - A list of fields to extract and return from the _source field - -|`body` -|`object` - Document identifiers; can be either `docs` (containing full document information) or `ids` (when index is provided in the URL. - -|=== - -=== msearch - -[source,ts] ----- -client.msearch({ - index: string | string[], - search_type: 'query_then_fetch' | 'query_and_fetch' | 'dfs_query_then_fetch' | 'dfs_query_and_fetch', - max_concurrent_searches: number, - typed_keys: boolean, - pre_filter_shard_size: number, - max_concurrent_shard_requests: number, - rest_total_hits_as_int: boolean, - ccs_minimize_roundtrips: boolean, - body: object -}) ----- -link:{ref}/search-multi-search.html[Documentation] + -{jsclient}/msearch_examples.html[Code Example] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names to use as default - -|`search_type` or `searchType` -|`'query_then_fetch' \| 'query_and_fetch' \| 'dfs_query_then_fetch' \| 'dfs_query_and_fetch'` - Search operation type - -|`max_concurrent_searches` or `maxConcurrentSearches` -|`number` - Controls the maximum number of concurrent searches the multi search api will execute - -|`typed_keys` or `typedKeys` -|`boolean` - Specify whether aggregation and suggester names should be prefixed by their respective types in the response - -|`pre_filter_shard_size` or `preFilterShardSize` -|`number` - A threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method ie. if date filters are mandatory to match but the shard bounds and the query are disjoint. - -|`max_concurrent_shard_requests` or `maxConcurrentShardRequests` -|`number` - The number of concurrent shard requests each sub search executes concurrently per node. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests + -_Default:_ `5` - -|`rest_total_hits_as_int` or `restTotalHitsAsInt` -|`boolean` - Indicates whether hits.total should be rendered as an integer or an object in the rest search response - -|`ccs_minimize_roundtrips` or `ccsMinimizeRoundtrips` -|`boolean` - Indicates whether network round-trips should be minimized as part of cross-cluster search requests execution + -_Default:_ `true` - -|`body` -|`object` - The request definitions (metadata-search request definition pairs), separated by newlines - -|=== - -=== msearchTemplate - -[source,ts] ----- -client.msearchTemplate({ - index: string | string[], - search_type: 'query_then_fetch' | 'query_and_fetch' | 'dfs_query_then_fetch' | 'dfs_query_and_fetch', - typed_keys: boolean, - max_concurrent_searches: number, - rest_total_hits_as_int: boolean, - ccs_minimize_roundtrips: boolean, - body: object -}) ----- -link:{ref}/search-multi-search.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names to use as default - -|`search_type` or `searchType` -|`'query_then_fetch' \| 'query_and_fetch' \| 'dfs_query_then_fetch' \| 'dfs_query_and_fetch'` - Search operation type - -|`typed_keys` or `typedKeys` -|`boolean` - Specify whether aggregation and suggester names should be prefixed by their respective types in the response - -|`max_concurrent_searches` or `maxConcurrentSearches` -|`number` - Controls the maximum number of concurrent searches the multi search api will execute - -|`rest_total_hits_as_int` or `restTotalHitsAsInt` -|`boolean` - Indicates whether hits.total should be rendered as an integer or an object in the rest search response - -|`ccs_minimize_roundtrips` or `ccsMinimizeRoundtrips` -|`boolean` - Indicates whether network round-trips should be minimized as part of cross-cluster search requests execution + -_Default:_ `true` - -|`body` -|`object` - The request definitions (metadata-search request definition pairs), separated by newlines - -|=== - -=== mtermvectors - -[source,ts] ----- -client.mtermvectors({ - index: string, - ids: string | string[], - term_statistics: boolean, - field_statistics: boolean, - fields: string | string[], - offsets: boolean, - positions: boolean, - payloads: boolean, - preference: string, - routing: string, - realtime: boolean, - version: number, - version_type: 'internal' | 'external' | 'external_gte', - body: object -}) ----- -link:{ref}/docs-multi-termvectors.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string` - The index in which the document resides. - -|`ids` -|`string \| string[]` - A comma-separated list of documents ids. You must define ids as parameter or set "ids" or "docs" in the request body - -|`term_statistics` or `termStatistics` -|`boolean` - Specifies if total term frequency and document frequency should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs". - -|`field_statistics` or `fieldStatistics` -|`boolean` - Specifies if document count, sum of document frequencies and sum of total term frequencies should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs". + -_Default:_ `true` - -|`fields` -|`string \| string[]` - A comma-separated list of fields to return. Applies to all returned documents unless otherwise specified in body "params" or "docs". - -|`offsets` -|`boolean` - Specifies if term offsets should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs". + -_Default:_ `true` - -|`positions` -|`boolean` - Specifies if term positions should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs". + -_Default:_ `true` - -|`payloads` -|`boolean` - Specifies if term payloads should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs". + -_Default:_ `true` - -|`preference` -|`string` - Specify the node or shard the operation should be performed on (default: random) .Applies to all returned documents unless otherwise specified in body "params" or "docs". - -|`routing` -|`string` - Specific routing value. Applies to all returned documents unless otherwise specified in body "params" or "docs". - -|`realtime` -|`boolean` - Specifies if requests are real-time as opposed to near-real-time (default: true). - -|`version` -|`number` - Explicit version number for concurrency control - -|`version_type` or `versionType` -|`'internal' \| 'external' \| 'external_gte'` - Specific version type - -|`body` -|`object` - Define ids, documents, parameters or a list of parameters per document here. You must at least provide a list of document ids. See documentation. - -|=== - -=== nodes.hotThreads - -[source,ts] ----- -client.nodes.hotThreads({ - node_id: string | string[], - interval: string, - snapshots: number, - threads: number, - ignore_idle_threads: boolean, - type: 'cpu' | 'wait' | 'block', - timeout: string -}) ----- -link:{ref}/cluster-nodes-hot-threads.html[Documentation] + -[cols=2*] -|=== -|`node_id` or `nodeId` -|`string \| string[]` - A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes - -|`interval` -|`string` - The interval for the second sampling of threads - -|`snapshots` -|`number` - Number of samples of thread stacktrace (default: 10) - -|`threads` -|`number` - Specify the number of threads to provide information for (default: 3) - -|`ignore_idle_threads` or `ignoreIdleThreads` -|`boolean` - Don't show threads that are in known-idle places, such as waiting on a socket select or pulling from an empty task queue (default: true) - -|`type` -|`'cpu' \| 'wait' \| 'block'` - The type to sample (default: cpu) - -|`timeout` -|`string` - Explicit operation timeout - -|=== - -=== nodes.info - -[source,ts] ----- -client.nodes.info({ - node_id: string | string[], - metric: string | string[], - flat_settings: boolean, - timeout: string -}) ----- -link:{ref}/cluster-nodes-info.html[Documentation] + -[cols=2*] -|=== -|`node_id` or `nodeId` -|`string \| string[]` - A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes - -|`metric` -|`string \| string[]` - A comma-separated list of metrics you wish returned. Leave empty to return all. - -|`flat_settings` or `flatSettings` -|`boolean` - Return settings in flat format (default: false) - -|`timeout` -|`string` - Explicit operation timeout - -|=== - -=== nodes.reloadSecureSettings - -[source,ts] ----- -client.nodes.reloadSecureSettings({ - node_id: string | string[], - timeout: string, - body: object -}) ----- -link:{ref}/secure-settings.html#reloadable-secure-settings[Documentation] + -[cols=2*] -|=== -|`node_id` or `nodeId` -|`string \| string[]` - A comma-separated list of node IDs to span the reload/reinit call. Should stay empty because reloading usually involves all cluster nodes. - -|`timeout` -|`string` - Explicit operation timeout - -|`body` -|`object` - An object containing the password for the elasticsearch keystore - -|=== - -=== nodes.stats - -[source,ts] ----- -client.nodes.stats({ - node_id: string | string[], - metric: string | string[], - index_metric: string | string[], - completion_fields: string | string[], - fielddata_fields: string | string[], - fields: string | string[], - groups: boolean, - level: 'indices' | 'node' | 'shards', - types: string | string[], - timeout: string, - include_segment_file_sizes: boolean -}) ----- -link:{ref}/cluster-nodes-stats.html[Documentation] + -[cols=2*] -|=== -|`node_id` or `nodeId` -|`string \| string[]` - A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes - -|`metric` -|`string \| string[]` - Limit the information returned to the specified metrics - -|`index_metric` or `indexMetric` -|`string \| string[]` - Limit the information returned for `indices` metric to the specific index metrics. Isn't used if `indices` (or `all`) metric isn't specified. - -|`completion_fields` or `completionFields` -|`string \| string[]` - A comma-separated list of fields for `fielddata` and `suggest` index metric (supports wildcards) - -|`fielddata_fields` or `fielddataFields` -|`string \| string[]` - A comma-separated list of fields for `fielddata` index metric (supports wildcards) - -|`fields` -|`string \| string[]` - A comma-separated list of fields for `fielddata` and `completion` index metric (supports wildcards) - -|`groups` -|`boolean` - A comma-separated list of search groups for `search` index metric - -|`level` -|`'indices' \| 'node' \| 'shards'` - Return indices stats aggregated at index, node or shard level + -_Default:_ `node` - -|`types` -|`string \| string[]` - A comma-separated list of document types for the `indexing` index metric - -|`timeout` -|`string` - Explicit operation timeout - -|`include_segment_file_sizes` or `includeSegmentFileSizes` -|`boolean` - Whether to report the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested) - -|=== - -=== nodes.usage - -[source,ts] ----- -client.nodes.usage({ - node_id: string | string[], - metric: string | string[], - timeout: string -}) ----- -link:{ref}/cluster-nodes-usage.html[Documentation] + -[cols=2*] -|=== -|`node_id` or `nodeId` -|`string \| string[]` - A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes - -|`metric` -|`string \| string[]` - Limit the information returned to the specified metrics - -|`timeout` -|`string` - Explicit operation timeout - -|=== - -=== ping - -[source,ts] ----- -client.ping() ----- -link:{ref}/index.html[Documentation] + - - -=== putScript - -[source,ts] ----- -client.putScript({ - id: string, - context: string, - timeout: string, - master_timeout: string, - body: object -}) ----- -link:{ref}/modules-scripting.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - Script ID - -|`context` -|`string` - Script context - -|`timeout` -|`string` - Explicit operation timeout - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|`body` -|`object` - The document - -|=== - -=== rankEval -*Stability:* experimental -[source,ts] ----- -client.rankEval({ - index: string | string[], - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - search_type: 'query_then_fetch' | 'dfs_query_then_fetch', - body: object -}) ----- -link:{ref}/search-rank-eval.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|`search_type` or `searchType` -|`'query_then_fetch' \| 'dfs_query_then_fetch'` - Search operation type - -|`body` -|`object` - The ranking evaluation search definition, including search requests, document ratings and ranking metric definition. - -|=== - -=== reindex - -[source,ts] ----- -client.reindex({ - refresh: boolean, - timeout: string, - wait_for_active_shards: string, - wait_for_completion: boolean, - requests_per_second: number, - scroll: string, - slices: number|string, - max_docs: number, - body: object -}) ----- -link:{ref}/docs-reindex.html[Documentation] + -{jsclient}/reindex_examples.html[Code Example] + -[cols=2*] -|=== -|`refresh` -|`boolean` - Should the affected indexes be refreshed? - -|`timeout` -|`string` - Time each individual bulk request should wait for shards that are unavailable. + -_Default:_ `1m` - -|`wait_for_active_shards` or `waitForActiveShards` -|`string` - Sets the number of shard copies that must be active before proceeding with the reindex operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1) - -|`wait_for_completion` or `waitForCompletion` -|`boolean` - Should the request should block until the reindex is complete. + -_Default:_ `true` - -|`requests_per_second` or `requestsPerSecond` -|`number` - The throttle to set on this request in sub-requests per second. -1 means no throttle. - -|`scroll` -|`string` - Control how long to keep the search context alive + -_Default:_ `5m` - -|`slices` -|`number\|string` - The number of slices this task should be divided into. Defaults to 1, meaning the task isn't sliced into subtasks. Can be set to `auto`. + -_Default:_ `1` - -|`max_docs` or `maxDocs` -|`number` - Maximum number of documents to process (default: all documents) - -|`body` -|`object` - The search definition using the Query DSL and the prototype for the index request. - -|=== - -=== reindexRethrottle - -[source,ts] ----- -client.reindexRethrottle({ - task_id: string, - requests_per_second: number -}) ----- -link:{ref}/docs-reindex.html[Documentation] + -[cols=2*] -|=== -|`task_id` or `taskId` -|`string` - The task id to rethrottle - -|`requests_per_second` or `requestsPerSecond` -|`number` - The throttle to set on this request in floating sub-requests per second. -1 means set no throttle. - -|=== - -=== renderSearchTemplate - -[source,ts] ----- -client.renderSearchTemplate({ - id: string, - body: object -}) ----- -link:{ref}/search-template.html#_validating_templates[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The id of the stored search template - -|`body` -|`object` - The search definition template and its params - -|=== - -=== scriptsPainlessExecute -*Stability:* experimental -[source,ts] ----- -client.scriptsPainlessExecute({ - body: object -}) ----- -link:https://www.elastic.co/guide/en/elasticsearch/painless/master/painless-execute-api.html[Documentation] + -[cols=2*] -|=== -|`body` -|`object` - The script to execute - -|=== - -=== scroll - -[source,ts] ----- -client.scroll({ - scroll_id: string, - scroll: string, - rest_total_hits_as_int: boolean, - body: object -}) ----- -link:{ref}/search-request-body.html#request-body-search-scroll[Documentation] + -{jsclient}/scroll_examples.html[Code Example] + -[cols=2*] -|=== -|`scroll_id` or `scrollId` -|`string` - The scroll ID + - -WARNING: This parameter has been deprecated. - -|`scroll` -|`string` - Specify how long a consistent view of the index should be maintained for scrolled search - -|`rest_total_hits_as_int` or `restTotalHitsAsInt` -|`boolean` - Indicates whether hits.total should be rendered as an integer or an object in the rest search response - -|`body` -|`object` - The scroll ID if not passed by URL or query parameter. - -|=== - -=== search - -[source,ts] ----- -client.search({ - index: string | string[], - analyzer: string, - analyze_wildcard: boolean, - ccs_minimize_roundtrips: boolean, - default_operator: 'AND' | 'OR', - df: string, - explain: boolean, - stored_fields: string | string[], - docvalue_fields: string | string[], - from: number, - ignore_unavailable: boolean, - ignore_throttled: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - lenient: boolean, - preference: string, - q: string, - routing: string | string[], - scroll: string, - search_type: 'query_then_fetch' | 'dfs_query_then_fetch', - size: number, - sort: string | string[], - _source: string | string[], - _source_excludes: string | string[], - _source_includes: string | string[], - terminate_after: number, - stats: string | string[], - suggest_field: string, - suggest_mode: 'missing' | 'popular' | 'always', - suggest_size: number, - suggest_text: string, - timeout: string, - track_scores: boolean, - track_total_hits: boolean|long, - allow_partial_search_results: boolean, - typed_keys: boolean, - version: boolean, - seq_no_primary_term: boolean, - request_cache: boolean, - batched_reduce_size: number, - max_concurrent_shard_requests: number, - pre_filter_shard_size: number, - rest_total_hits_as_int: boolean, - body: object -}) ----- -link:{ref}/search-search.html[Documentation] + -{jsclient}/search_examples.html[Code Example] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices - -|`analyzer` -|`string` - The analyzer to use for the query string - -|`analyze_wildcard` or `analyzeWildcard` -|`boolean` - Specify whether wildcard and prefix queries should be analyzed (default: false) - -|`ccs_minimize_roundtrips` or `ccsMinimizeRoundtrips` -|`boolean` - Indicates whether network round-trips should be minimized as part of cross-cluster search requests execution + -_Default:_ `true` - -|`default_operator` or `defaultOperator` -|`'AND' \| 'OR'` - The default operator for query string query (AND or OR) + -_Default:_ `OR` - -|`df` -|`string` - The field to use as default where no field prefix is given in the query string - -|`explain` -|`boolean` - Specify whether to return detailed information about score computation as part of a hit - -|`stored_fields` or `storedFields` -|`string \| string[]` - A comma-separated list of stored fields to return as part of a hit - -|`docvalue_fields` or `docvalueFields` -|`string \| string[]` - A comma-separated list of fields to return as the docvalue representation of a field for each hit - -|`from` -|`number` - Starting offset (default: 0) - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`ignore_throttled` or `ignoreThrottled` -|`boolean` - Whether specified concrete, expanded or aliased indices should be ignored when throttled - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|`lenient` -|`boolean` - Specify whether format-based query failures (such as providing text to a numeric field) should be ignored - -|`preference` -|`string` - Specify the node or shard the operation should be performed on (default: random) - -|`q` -|`string` - Query in the Lucene query string syntax - -|`routing` -|`string \| string[]` - A comma-separated list of specific routing values - -|`scroll` -|`string` - Specify how long a consistent view of the index should be maintained for scrolled search - -|`search_type` or `searchType` -|`'query_then_fetch' \| 'dfs_query_then_fetch'` - Search operation type - -|`size` -|`number` - Number of hits to return (default: 10) - -|`sort` -|`string \| string[]` - A comma-separated list of : pairs - -|`_source` -|`string \| string[]` - True or false to return the _source field or not, or a list of fields to return - -|`_source_excludes` or `_sourceExcludes` -|`string \| string[]` - A list of fields to exclude from the returned _source field - -|`_source_includes` or `_sourceIncludes` -|`string \| string[]` - A list of fields to extract and return from the _source field - -|`terminate_after` or `terminateAfter` -|`number` - The maximum number of documents to collect for each shard, upon reaching which the query execution will terminate early. - -|`stats` -|`string \| string[]` - Specific 'tag' of the request for logging and statistical purposes - -|`suggest_field` or `suggestField` -|`string` - Specify which field to use for suggestions - -|`suggest_mode` or `suggestMode` -|`'missing' \| 'popular' \| 'always'` - Specify suggest mode + -_Default:_ `missing` - -|`suggest_size` or `suggestSize` -|`number` - How many suggestions to return in response - -|`suggest_text` or `suggestText` -|`string` - The source text for which the suggestions should be returned - -|`timeout` -|`string` - Explicit operation timeout - -|`track_scores` or `trackScores` -|`boolean` - Whether to calculate and return scores even if they are not used for sorting - -|`track_total_hits` or `trackTotalHits` -|`boolean\|long` - Indicate if the number of documents that match the query should be tracked. A number can also be specified, to accurately track the total hit count up to the number. - -|`allow_partial_search_results` or `allowPartialSearchResults` -|`boolean` - Indicate if an error should be returned if there is a partial search failure or timeout + -_Default:_ `true` - -|`typed_keys` or `typedKeys` -|`boolean` - Specify whether aggregation and suggester names should be prefixed by their respective types in the response - -|`version` -|`boolean` - Specify whether to return document version as part of a hit - -|`seq_no_primary_term` or `seqNoPrimaryTerm` -|`boolean` - Specify whether to return sequence number and primary term of the last modification of each hit - -|`request_cache` or `requestCache` -|`boolean` - Specify if request cache should be used for this request or not, defaults to index level setting - -|`batched_reduce_size` or `batchedReduceSize` -|`number` - The number of shard results that should be reduced at once on the coordinating node. This value should be used as a protection mechanism to reduce the memory overhead per search request if the potential number of shards in the request can be large. + -_Default:_ `512` - -|`max_concurrent_shard_requests` or `maxConcurrentShardRequests` -|`number` - The number of concurrent shard requests per node this search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests + -_Default:_ `5` - -|`pre_filter_shard_size` or `preFilterShardSize` -|`number` - A threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method ie. if date filters are mandatory to match but the shard bounds and the query are disjoint. - -|`rest_total_hits_as_int` or `restTotalHitsAsInt` -|`boolean` - Indicates whether hits.total should be rendered as an integer or an object in the rest search response - -|`body` -|`object` - The search definition using the Query DSL - -|=== - -=== searchShards - -[source,ts] ----- -client.searchShards({ - index: string | string[], - preference: string, - routing: string, - local: boolean, - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all' -}) ----- -link:{ref}/search-shards.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices - -|`preference` -|`string` - Specify the node or shard the operation should be performed on (default: random) - -|`routing` -|`string` - Specific routing value - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|=== - -=== searchTemplate - -[source,ts] ----- -client.searchTemplate({ - index: string | string[], - ignore_unavailable: boolean, - ignore_throttled: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - preference: string, - routing: string | string[], - scroll: string, - search_type: 'query_then_fetch' | 'query_and_fetch' | 'dfs_query_then_fetch' | 'dfs_query_and_fetch', - explain: boolean, - profile: boolean, - typed_keys: boolean, - rest_total_hits_as_int: boolean, - ccs_minimize_roundtrips: boolean, - body: object -}) ----- -link:{ref}/search-template.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`ignore_throttled` or `ignoreThrottled` -|`boolean` - Whether specified concrete, expanded or aliased indices should be ignored when throttled - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|`preference` -|`string` - Specify the node or shard the operation should be performed on (default: random) - -|`routing` -|`string \| string[]` - A comma-separated list of specific routing values - -|`scroll` -|`string` - Specify how long a consistent view of the index should be maintained for scrolled search - -|`search_type` or `searchType` -|`'query_then_fetch' \| 'query_and_fetch' \| 'dfs_query_then_fetch' \| 'dfs_query_and_fetch'` - Search operation type - -|`explain` -|`boolean` - Specify whether to return detailed information about score computation as part of a hit - -|`profile` -|`boolean` - Specify whether to profile the query execution - -|`typed_keys` or `typedKeys` -|`boolean` - Specify whether aggregation and suggester names should be prefixed by their respective types in the response - -|`rest_total_hits_as_int` or `restTotalHitsAsInt` -|`boolean` - Indicates whether hits.total should be rendered as an integer or an object in the rest search response - -|`ccs_minimize_roundtrips` or `ccsMinimizeRoundtrips` -|`boolean` - Indicates whether network round-trips should be minimized as part of cross-cluster search requests execution + -_Default:_ `true` - -|`body` -|`object` - The search definition template and its params - -|=== - -=== snapshot.cleanupRepository - -[source,ts] ----- -client.snapshot.cleanupRepository({ - repository: string, - master_timeout: string, - timeout: string -}) ----- -link:{ref}/clean-up-snapshot-repo-api.html[Documentation] + -[cols=2*] -|=== -|`repository` -|`string` - A repository name - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`timeout` -|`string` - Explicit operation timeout - -|=== - -=== snapshot.create - -[source,ts] ----- -client.snapshot.create({ - repository: string, - snapshot: string, - master_timeout: string, - wait_for_completion: boolean, - body: object -}) ----- -link:{ref}/modules-snapshots.html[Documentation] + -[cols=2*] -|=== -|`repository` -|`string` - A repository name - -|`snapshot` -|`string` - A snapshot name - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`wait_for_completion` or `waitForCompletion` -|`boolean` - Should this request wait until the operation has completed before returning - -|`body` -|`object` - The snapshot definition - -|=== - -=== snapshot.createRepository - -[source,ts] ----- -client.snapshot.createRepository({ - repository: string, - master_timeout: string, - timeout: string, - verify: boolean, - body: object -}) ----- -link:{ref}/modules-snapshots.html[Documentation] + -[cols=2*] -|=== -|`repository` -|`string` - A repository name - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`timeout` -|`string` - Explicit operation timeout - -|`verify` -|`boolean` - Whether to verify the repository after creation - -|`body` -|`object` - The repository definition - -|=== - -=== snapshot.delete - -[source,ts] ----- -client.snapshot.delete({ - repository: string, - snapshot: string | string[], - master_timeout: string -}) ----- -link:{ref}/modules-snapshots.html[Documentation] + -[cols=2*] -|=== -|`repository` -|`string` - A repository name - -|`snapshot` -|`string \| string[]` - A comma-separated list of snapshot names - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|=== - -=== snapshot.deleteRepository - -[source,ts] ----- -client.snapshot.deleteRepository({ - repository: string | string[], - master_timeout: string, - timeout: string -}) ----- -link:{ref}/modules-snapshots.html[Documentation] + -[cols=2*] -|=== -|`repository` -|`string \| string[]` - Name of the snapshot repository to unregister. Wildcard (`*`) patterns are supported. - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`timeout` -|`string` - Explicit operation timeout - -|=== - -=== snapshot.get - -[source,ts] ----- -client.snapshot.get({ - repository: string, - snapshot: string | string[], - master_timeout: string, - ignore_unavailable: boolean, - verbose: boolean -}) ----- -link:{ref}/modules-snapshots.html[Documentation] + -[cols=2*] -|=== -|`repository` -|`string` - A repository name - -|`snapshot` -|`string \| string[]` - A comma-separated list of snapshot names - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether to ignore unavailable snapshots, defaults to false which means a SnapshotMissingException is thrown - -|`verbose` -|`boolean` - Whether to show verbose snapshot info or only show the basic info found in the repository index blob - -|=== - -=== snapshot.getRepository - -[source,ts] ----- -client.snapshot.getRepository({ - repository: string | string[], - master_timeout: string, - local: boolean -}) ----- -link:{ref}/modules-snapshots.html[Documentation] + -[cols=2*] -|=== -|`repository` -|`string \| string[]` - A comma-separated list of repository names - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|=== - -=== snapshot.restore - -[source,ts] ----- -client.snapshot.restore({ - repository: string, - snapshot: string, - master_timeout: string, - wait_for_completion: boolean, - body: object -}) ----- -link:{ref}/modules-snapshots.html[Documentation] + -[cols=2*] -|=== -|`repository` -|`string` - A repository name - -|`snapshot` -|`string` - A snapshot name - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`wait_for_completion` or `waitForCompletion` -|`boolean` - Should this request wait until the operation has completed before returning - -|`body` -|`object` - Details of what to restore - -|=== - -=== snapshot.status - -[source,ts] ----- -client.snapshot.status({ - repository: string, - snapshot: string | string[], - master_timeout: string, - ignore_unavailable: boolean -}) ----- -link:{ref}/modules-snapshots.html[Documentation] + -[cols=2*] -|=== -|`repository` -|`string` - A repository name - -|`snapshot` -|`string \| string[]` - A comma-separated list of snapshot names - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether to ignore unavailable snapshots, defaults to false which means a SnapshotMissingException is thrown - -|=== - -=== snapshot.verifyRepository - -[source,ts] ----- -client.snapshot.verifyRepository({ - repository: string, - master_timeout: string, - timeout: string -}) ----- -link:{ref}/modules-snapshots.html[Documentation] + -[cols=2*] -|=== -|`repository` -|`string` - A repository name - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`timeout` -|`string` - Explicit operation timeout - -|=== - -=== tasks.cancel - -[source,ts] ----- -client.tasks.cancel({ - task_id: string, - nodes: string | string[], - actions: string | string[], - parent_task_id: string, - wait_for_completion: boolean -}) ----- -link:{ref}/tasks.html[Documentation] + -[cols=2*] -|=== -|`task_id` or `taskId` -|`string` - Cancel the task with specified task id (node_id:task_number) - -|`nodes` -|`string \| string[]` - A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes - -|`actions` -|`string \| string[]` - A comma-separated list of actions that should be cancelled. Leave empty to cancel all. - -|`parent_task_id` or `parentTaskId` -|`string` - Cancel tasks with specified parent task id (node_id:task_number). Set to -1 to cancel all. - -|`wait_for_completion` or `waitForCompletion` -|`boolean` - Should the request block until the cancellation of the task and its descendant tasks is completed. Defaults to false - -|=== - -=== tasks.get - -[source,ts] ----- -client.tasks.get({ - task_id: string, - wait_for_completion: boolean, - timeout: string -}) ----- -link:{ref}/tasks.html[Documentation] + -[cols=2*] -|=== -|`task_id` or `taskId` -|`string` - Return the task with specified id (node_id:task_number) - -|`wait_for_completion` or `waitForCompletion` -|`boolean` - Wait for the matching tasks to complete (default: false) - -|`timeout` -|`string` - Explicit operation timeout - -|=== - -=== tasks.list - -[source,ts] ----- -client.tasks.list({ - nodes: string | string[], - actions: string | string[], - detailed: boolean, - parent_task_id: string, - wait_for_completion: boolean, - group_by: 'nodes' | 'parents' | 'none', - timeout: string -}) ----- -link:{ref}/tasks.html[Documentation] + -[cols=2*] -|=== -|`nodes` -|`string \| string[]` - A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes - -|`actions` -|`string \| string[]` - A comma-separated list of actions that should be returned. Leave empty to return all. - -|`detailed` -|`boolean` - Return detailed task information (default: false) - -|`parent_task_id` or `parentTaskId` -|`string` - Return tasks with specified parent task id (node_id:task_number). Set to -1 to return all. - -|`wait_for_completion` or `waitForCompletion` -|`boolean` - Wait for the matching tasks to complete (default: false) - -|`group_by` or `groupBy` -|`'nodes' \| 'parents' \| 'none'` - Group tasks by nodes or parent/child relationships + -_Default:_ `nodes` - -|`timeout` -|`string` - Explicit operation timeout - -|=== - -=== termvectors - -[source,ts] ----- -client.termvectors({ - index: string, - id: string, - term_statistics: boolean, - field_statistics: boolean, - fields: string | string[], - offsets: boolean, - positions: boolean, - payloads: boolean, - preference: string, - routing: string, - realtime: boolean, - version: number, - version_type: 'internal' | 'external' | 'external_gte', - body: object -}) ----- -link:{ref}/docs-termvectors.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string` - The index in which the document resides. - -|`id` -|`string` - The id of the document, when not specified a doc param should be supplied. - -|`term_statistics` or `termStatistics` -|`boolean` - Specifies if total term frequency and document frequency should be returned. - -|`field_statistics` or `fieldStatistics` -|`boolean` - Specifies if document count, sum of document frequencies and sum of total term frequencies should be returned. + -_Default:_ `true` - -|`fields` -|`string \| string[]` - A comma-separated list of fields to return. - -|`offsets` -|`boolean` - Specifies if term offsets should be returned. + -_Default:_ `true` - -|`positions` -|`boolean` - Specifies if term positions should be returned. + -_Default:_ `true` - -|`payloads` -|`boolean` - Specifies if term payloads should be returned. + -_Default:_ `true` - -|`preference` -|`string` - Specify the node or shard the operation should be performed on (default: random). - -|`routing` -|`string` - Specific routing value. - -|`realtime` -|`boolean` - Specifies if request is real-time as opposed to near-real-time (default: true). - -|`version` -|`number` - Explicit version number for concurrency control - -|`version_type` or `versionType` -|`'internal' \| 'external' \| 'external_gte'` - Specific version type - -|`body` -|`object` - Define parameters and or supply a document to get termvectors for. See documentation. - -|=== - -=== update - -[source,ts] ----- -client.update({ - id: string, - index: string, - type: string, - wait_for_active_shards: string, - _source: string | string[], - _source_excludes: string | string[], - _source_includes: string | string[], - lang: string, - refresh: 'true' | 'false' | 'wait_for', - retry_on_conflict: number, - routing: string, - timeout: string, - if_seq_no: number, - if_primary_term: number, - body: object -}) ----- -link:{ref}/docs-update.html[Documentation] + -{jsclient}/update_examples.html[Code Example] + -[cols=2*] -|=== -|`id` -|`string` - Document ID - -|`index` -|`string` - The name of the index - -|`type` -|`string` - The type of the document + - -WARNING: This parameter has been deprecated. - -|`wait_for_active_shards` or `waitForActiveShards` -|`string` - Sets the number of shard copies that must be active before proceeding with the update operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1) - -|`_source` -|`string \| string[]` - True or false to return the _source field or not, or a list of fields to return - -|`_source_excludes` or `_sourceExcludes` -|`string \| string[]` - A list of fields to exclude from the returned _source field - -|`_source_includes` or `_sourceIncludes` -|`string \| string[]` - A list of fields to extract and return from the _source field - -|`lang` -|`string` - The script language (default: painless) - -|`refresh` -|`'true' \| 'false' \| 'wait_for'` - If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` (the default) then do nothing with refreshes. - -|`retry_on_conflict` or `retryOnConflict` -|`number` - Specify how many times should the operation be retried when a conflict occurs (default: 0) - -|`routing` -|`string` - Specific routing value - -|`timeout` -|`string` - Explicit operation timeout - -|`if_seq_no` or `ifSeqNo` -|`number` - only perform the update operation if the last operation that has changed the document has the specified sequence number - -|`if_primary_term` or `ifPrimaryTerm` -|`number` - only perform the update operation if the last operation that has changed the document has the specified primary term - -|`body` -|`object` - The request definition requires either `script` or partial `doc` - -|=== - -=== updateByQuery - -[source,ts] ----- -client.updateByQuery({ - index: string | string[], - analyzer: string, - analyze_wildcard: boolean, - default_operator: 'AND' | 'OR', - df: string, - from: number, - ignore_unavailable: boolean, - allow_no_indices: boolean, - conflicts: 'abort' | 'proceed', - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - lenient: boolean, - pipeline: string, - preference: string, - q: string, - routing: string | string[], - scroll: string, - search_type: 'query_then_fetch' | 'dfs_query_then_fetch', - search_timeout: string, - max_docs: number, - sort: string | string[], - _source: string | string[], - _source_excludes: string | string[], - _source_includes: string | string[], - terminate_after: number, - stats: string | string[], - version: boolean, - version_type: boolean, - request_cache: boolean, - refresh: boolean, - timeout: string, - wait_for_active_shards: string, - scroll_size: number, - wait_for_completion: boolean, - requests_per_second: number, - slices: number|string, - body: object -}) ----- -link:{ref}/docs-update-by-query.html[Documentation] + -{jsclient}/update_by_query_examples.html[Code Example] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices - -|`analyzer` -|`string` - The analyzer to use for the query string - -|`analyze_wildcard` or `analyzeWildcard` -|`boolean` - Specify whether wildcard and prefix queries should be analyzed (default: false) - -|`default_operator` or `defaultOperator` -|`'AND' \| 'OR'` - The default operator for query string query (AND or OR) + -_Default:_ `OR` - -|`df` -|`string` - The field to use as default where no field prefix is given in the query string - -|`from` -|`number` - Starting offset (default: 0) - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`conflicts` -|`'abort' \| 'proceed'` - What to do when the update by query hits version conflicts? + -_Default:_ `abort` - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|`lenient` -|`boolean` - Specify whether format-based query failures (such as providing text to a numeric field) should be ignored - -|`pipeline` -|`string` - Ingest pipeline to set on index requests made by this action. (default: none) - -|`preference` -|`string` - Specify the node or shard the operation should be performed on (default: random) - -|`q` -|`string` - Query in the Lucene query string syntax - -|`routing` -|`string \| string[]` - A comma-separated list of specific routing values - -|`scroll` -|`string` - Specify how long a consistent view of the index should be maintained for scrolled search - -|`search_type` or `searchType` -|`'query_then_fetch' \| 'dfs_query_then_fetch'` - Search operation type - -|`search_timeout` or `searchTimeout` -|`string` - Explicit timeout for each search request. Defaults to no timeout. - -|`max_docs` or `maxDocs` -|`number` - Maximum number of documents to process (default: all documents) - -|`sort` -|`string \| string[]` - A comma-separated list of : pairs - -|`_source` -|`string \| string[]` - True or false to return the _source field or not, or a list of fields to return - -|`_source_excludes` or `_sourceExcludes` -|`string \| string[]` - A list of fields to exclude from the returned _source field - -|`_source_includes` or `_sourceIncludes` -|`string \| string[]` - A list of fields to extract and return from the _source field - -|`terminate_after` or `terminateAfter` -|`number` - The maximum number of documents to collect for each shard, upon reaching which the query execution will terminate early. - -|`stats` -|`string \| string[]` - Specific 'tag' of the request for logging and statistical purposes - -|`version` -|`boolean` - Specify whether to return document version as part of a hit - -|`version_type` or `versionType` -|`boolean` - Should the document increment the version number (internal) on hit or not (reindex) - -|`request_cache` or `requestCache` -|`boolean` - Specify if request cache should be used for this request or not, defaults to index level setting - -|`refresh` -|`boolean` - Should the affected indexes be refreshed? - -|`timeout` -|`string` - Time each individual bulk request should wait for shards that are unavailable. + -_Default:_ `1m` - -|`wait_for_active_shards` or `waitForActiveShards` -|`string` - Sets the number of shard copies that must be active before proceeding with the update by query operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1) - -|`scroll_size` or `scrollSize` -|`number` - Size on the scroll request powering the update by query + -_Default:_ `100` - -|`wait_for_completion` or `waitForCompletion` -|`boolean` - Should the request should block until the update by query operation is complete. + -_Default:_ `true` - -|`requests_per_second` or `requestsPerSecond` -|`number` - The throttle to set on this request in sub-requests per second. -1 means no throttle. - -|`slices` -|`number\|string` - The number of slices this task should be divided into. Defaults to 1, meaning the task isn't sliced into subtasks. Can be set to `auto`. + -_Default:_ `1` - -|`body` -|`object` - The search definition using the Query DSL - -|=== - -=== updateByQueryRethrottle - -[source,ts] ----- -client.updateByQueryRethrottle({ - task_id: string, - requests_per_second: number -}) ----- -link:{ref}/docs-update-by-query.html[Documentation] + -[cols=2*] -|=== -|`task_id` or `taskId` -|`string` - The task id to rethrottle - -|`requests_per_second` or `requestsPerSecond` -|`number` - The throttle to set on this request in floating sub-requests per second. -1 means set no throttle. - -|=== - -=== asyncSearch.delete - -[source,ts] ----- -client.asyncSearch.delete({ - id: string -}) ----- -link:{ref}/async-search.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The async search ID - -|=== - -=== asyncSearch.get - -[source,ts] ----- -client.asyncSearch.get({ - id: string, - wait_for_completion_timeout: string, - keep_alive: string, - typed_keys: boolean -}) ----- -link:{ref}/async-search.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The async search ID - -|`wait_for_completion_timeout` or `waitForCompletionTimeout` -|`string` - Specify the time that the request should block waiting for the final response - -|`keep_alive` or `keepAlive` -|`string` - Specify the time interval in which the results (partial or final) for this search will be available - -|`typed_keys` or `typedKeys` -|`boolean` - Specify whether aggregation and suggester names should be prefixed by their respective types in the response - -|=== - -=== asyncSearch.submit - -[source,ts] ----- -client.asyncSearch.submit({ - index: string | string[], - wait_for_completion_timeout: string, - keep_on_completion: boolean, - keep_alive: string, - batched_reduce_size: number, - request_cache: boolean, - analyzer: string, - analyze_wildcard: boolean, - default_operator: 'AND' | 'OR', - df: string, - explain: boolean, - stored_fields: string | string[], - docvalue_fields: string | string[], - from: number, - ignore_unavailable: boolean, - ignore_throttled: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - lenient: boolean, - preference: string, - q: string, - routing: string | string[], - search_type: 'query_then_fetch' | 'dfs_query_then_fetch', - size: number, - sort: string | string[], - _source: string | string[], - _source_excludes: string | string[], - _source_includes: string | string[], - terminate_after: number, - stats: string | string[], - suggest_field: string, - suggest_mode: 'missing' | 'popular' | 'always', - suggest_size: number, - suggest_text: string, - timeout: string, - track_scores: boolean, - track_total_hits: boolean|long, - allow_partial_search_results: boolean, - typed_keys: boolean, - version: boolean, - seq_no_primary_term: boolean, - max_concurrent_shard_requests: number, - body: object -}) ----- -link:{ref}/async-search.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices - -|`wait_for_completion_timeout` or `waitForCompletionTimeout` -|`string` - Specify the time that the request should block waiting for the final response + -_Default:_ `1s` - -|`keep_on_completion` or `keepOnCompletion` -|`boolean` - Control whether the response should be stored in the cluster if it completed within the provided [wait_for_completion] time (default: false) - -|`keep_alive` or `keepAlive` -|`string` - Update the time interval in which the results (partial or final) for this search will be available + -_Default:_ `5d` - -|`batched_reduce_size` or `batchedReduceSize` -|`number` - The number of shard results that should be reduced at once on the coordinating node. This value should be used as the granularity at which progress results will be made available. + -_Default:_ `5` - -|`request_cache` or `requestCache` -|`boolean` - Specify if request cache should be used for this request or not, defaults to true - -|`analyzer` -|`string` - The analyzer to use for the query string - -|`analyze_wildcard` or `analyzeWildcard` -|`boolean` - Specify whether wildcard and prefix queries should be analyzed (default: false) - -|`default_operator` or `defaultOperator` -|`'AND' \| 'OR'` - The default operator for query string query (AND or OR) + -_Default:_ `OR` - -|`df` -|`string` - The field to use as default where no field prefix is given in the query string - -|`explain` -|`boolean` - Specify whether to return detailed information about score computation as part of a hit - -|`stored_fields` or `storedFields` -|`string \| string[]` - A comma-separated list of stored fields to return as part of a hit - -|`docvalue_fields` or `docvalueFields` -|`string \| string[]` - A comma-separated list of fields to return as the docvalue representation of a field for each hit - -|`from` -|`number` - Starting offset (default: 0) - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`ignore_throttled` or `ignoreThrottled` -|`boolean` - Whether specified concrete, expanded or aliased indices should be ignored when throttled - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|`lenient` -|`boolean` - Specify whether format-based query failures (such as providing text to a numeric field) should be ignored - -|`preference` -|`string` - Specify the node or shard the operation should be performed on (default: random) - -|`q` -|`string` - Query in the Lucene query string syntax - -|`routing` -|`string \| string[]` - A comma-separated list of specific routing values - -|`search_type` or `searchType` -|`'query_then_fetch' \| 'dfs_query_then_fetch'` - Search operation type - -|`size` -|`number` - Number of hits to return (default: 10) - -|`sort` -|`string \| string[]` - A comma-separated list of : pairs - -|`_source` -|`string \| string[]` - True or false to return the _source field or not, or a list of fields to return - -|`_source_excludes` or `_sourceExcludes` -|`string \| string[]` - A list of fields to exclude from the returned _source field - -|`_source_includes` or `_sourceIncludes` -|`string \| string[]` - A list of fields to extract and return from the _source field - -|`terminate_after` or `terminateAfter` -|`number` - The maximum number of documents to collect for each shard, upon reaching which the query execution will terminate early. - -|`stats` -|`string \| string[]` - Specific 'tag' of the request for logging and statistical purposes - -|`suggest_field` or `suggestField` -|`string` - Specify which field to use for suggestions - -|`suggest_mode` or `suggestMode` -|`'missing' \| 'popular' \| 'always'` - Specify suggest mode + -_Default:_ `missing` - -|`suggest_size` or `suggestSize` -|`number` - How many suggestions to return in response - -|`suggest_text` or `suggestText` -|`string` - The source text for which the suggestions should be returned - -|`timeout` -|`string` - Explicit operation timeout - -|`track_scores` or `trackScores` -|`boolean` - Whether to calculate and return scores even if they are not used for sorting - -|`track_total_hits` or `trackTotalHits` -|`boolean\|long` - Indicate if the number of documents that match the query should be tracked. A number can also be specified, to accurately track the total hit count up to the number. - -|`allow_partial_search_results` or `allowPartialSearchResults` -|`boolean` - Indicate if an error should be returned if there is a partial search failure or timeout + -_Default:_ `true` - -|`typed_keys` or `typedKeys` -|`boolean` - Specify whether aggregation and suggester names should be prefixed by their respective types in the response - -|`version` -|`boolean` - Specify whether to return document version as part of a hit - -|`seq_no_primary_term` or `seqNoPrimaryTerm` -|`boolean` - Specify whether to return sequence number and primary term of the last modification of each hit - -|`max_concurrent_shard_requests` or `maxConcurrentShardRequests` -|`number` - The number of concurrent shard requests per node this search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests + -_Default:_ `5` - -|`body` -|`object` - The search definition using the Query DSL - -|=== - -=== autoscaling.deleteAutoscalingPolicy -*Stability:* experimental -[source,ts] ----- -client.autoscaling.deleteAutoscalingPolicy({ - name: string -}) ----- -link:{ref}/autoscaling-delete-autoscaling-policy.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - the name of the autoscaling policy - -|=== - -=== autoscaling.getAutoscalingDecision -*Stability:* experimental -[source,ts] ----- -client.autoscaling.getAutoscalingDecision() ----- -link:{ref}/autoscaling-get-autoscaling-decision.html[Documentation] + - - -=== autoscaling.getAutoscalingPolicy -*Stability:* experimental -[source,ts] ----- -client.autoscaling.getAutoscalingPolicy({ - name: string -}) ----- -link:{ref}/autoscaling-get-autoscaling-policy.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - the name of the autoscaling policy - -|=== - -=== autoscaling.putAutoscalingPolicy -*Stability:* experimental -[source,ts] ----- -client.autoscaling.putAutoscalingPolicy({ - name: string, - body: object -}) ----- -link:{ref}/autoscaling-put-autoscaling-policy.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - the name of the autoscaling policy - -|`body` -|`object` - the specification of the autoscaling policy - -|=== - -=== cat.mlDataFrameAnalytics - -[source,ts] ----- -client.cat.mlDataFrameAnalytics({ - id: string, - allow_no_match: boolean, - bytes: 'b' | 'k' | 'kb' | 'm' | 'mb' | 'g' | 'gb' | 't' | 'tb' | 'p' | 'pb', - format: string, - h: string | string[], - help: boolean, - s: string | string[], - time: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos', - v: boolean -}) ----- -link:{ref}/cat-dfanalytics.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The ID of the data frame analytics to fetch - -|`allow_no_match` or `allowNoMatch` -|`boolean` - Whether to ignore if a wildcard expression matches no configs. (This includes `_all` string or when no configs have been specified) - -|`bytes` -|`'b' \| 'k' \| 'kb' \| 'm' \| 'mb' \| 'g' \| 'gb' \| 't' \| 'tb' \| 'p' \| 'pb'` - The unit in which to display byte values - -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`h` -|`string \| string[]` - Comma-separated list of column names to display - -|`help` -|`boolean` - Return help information - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|`time` -|`'d' \| 'h' \| 'm' \| 's' \| 'ms' \| 'micros' \| 'nanos'` - The unit in which to display time values - -|`v` -|`boolean` - Verbose mode. Display column headers - -|=== - -=== cat.mlDatafeeds - -[source,ts] ----- -client.cat.mlDatafeeds({ - datafeed_id: string, - allow_no_datafeeds: boolean, - format: string, - h: string | string[], - help: boolean, - s: string | string[], - time: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos', - v: boolean -}) ----- -link:{ref}/cat-datafeeds.html[Documentation] + -[cols=2*] -|=== -|`datafeed_id` or `datafeedId` -|`string` - The ID of the datafeeds stats to fetch - -|`allow_no_datafeeds` or `allowNoDatafeeds` -|`boolean` - Whether to ignore if a wildcard expression matches no datafeeds. (This includes `_all` string or when no datafeeds have been specified) - -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`h` -|`string \| string[]` - Comma-separated list of column names to display - -|`help` -|`boolean` - Return help information - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|`time` -|`'d' \| 'h' \| 'm' \| 's' \| 'ms' \| 'micros' \| 'nanos'` - The unit in which to display time values - -|`v` -|`boolean` - Verbose mode. Display column headers - -|=== - -=== cat.mlJobs - -[source,ts] ----- -client.cat.mlJobs({ - job_id: string, - allow_no_jobs: boolean, - bytes: 'b' | 'k' | 'kb' | 'm' | 'mb' | 'g' | 'gb' | 't' | 'tb' | 'p' | 'pb', - format: string, - h: string | string[], - help: boolean, - s: string | string[], - time: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos', - v: boolean -}) ----- -link:{ref}/cat-anomaly-detectors.html[Documentation] + -[cols=2*] -|=== -|`job_id` or `jobId` -|`string` - The ID of the jobs stats to fetch - -|`allow_no_jobs` or `allowNoJobs` -|`boolean` - Whether to ignore if a wildcard expression matches no jobs. (This includes `_all` string or when no jobs have been specified) - -|`bytes` -|`'b' \| 'k' \| 'kb' \| 'm' \| 'mb' \| 'g' \| 'gb' \| 't' \| 'tb' \| 'p' \| 'pb'` - The unit in which to display byte values - -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`h` -|`string \| string[]` - Comma-separated list of column names to display - -|`help` -|`boolean` - Return help information - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|`time` -|`'d' \| 'h' \| 'm' \| 's' \| 'ms' \| 'micros' \| 'nanos'` - The unit in which to display time values - -|`v` -|`boolean` - Verbose mode. Display column headers - -|=== - -=== cat.mlTrainedModels - -[source,ts] ----- -client.cat.mlTrainedModels({ - model_id: string, - allow_no_match: boolean, - from: number, - size: number, - bytes: 'b' | 'k' | 'kb' | 'm' | 'mb' | 'g' | 'gb' | 't' | 'tb' | 'p' | 'pb', - format: string, - h: string | string[], - help: boolean, - s: string | string[], - time: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos', - v: boolean -}) ----- -link:{ref}/cat-trained-model.html[Documentation] + -[cols=2*] -|=== -|`model_id` or `modelId` -|`string` - The ID of the trained models stats to fetch - -|`allow_no_match` or `allowNoMatch` -|`boolean` - Whether to ignore if a wildcard expression matches no trained models. (This includes `_all` string or when no trained models have been specified) + -_Default:_ `true` - -|`from` -|`number` - skips a number of trained models - -|`size` -|`number` - specifies a max number of trained models to get + -_Default:_ `100` - -|`bytes` -|`'b' \| 'k' \| 'kb' \| 'm' \| 'mb' \| 'g' \| 'gb' \| 't' \| 'tb' \| 'p' \| 'pb'` - The unit in which to display byte values - -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`h` -|`string \| string[]` - Comma-separated list of column names to display - -|`help` -|`boolean` - Return help information - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|`time` -|`'d' \| 'h' \| 'm' \| 's' \| 'ms' \| 'micros' \| 'nanos'` - The unit in which to display time values - -|`v` -|`boolean` - Verbose mode. Display column headers - -|=== - -=== cat.transforms - -[source,ts] ----- -client.cat.transforms({ - transform_id: string, - from: number, - size: number, - allow_no_match: boolean, - format: string, - h: string | string[], - help: boolean, - s: string | string[], - time: 'd' | 'h' | 'm' | 's' | 'ms' | 'micros' | 'nanos', - v: boolean -}) ----- -link:{ref}/cat-transforms.html[Documentation] + -[cols=2*] -|=== -|`transform_id` or `transformId` -|`string` - The id of the transform for which to get stats. '_all' or '*' implies all transforms - -|`from` -|`number` - skips a number of transform configs, defaults to 0 - -|`size` -|`number` - specifies a max number of transforms to get, defaults to 100 - -|`allow_no_match` or `allowNoMatch` -|`boolean` - Whether to ignore if a wildcard expression matches no transforms. (This includes `_all` string or when no transforms have been specified) - -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`h` -|`string \| string[]` - Comma-separated list of column names to display - -|`help` -|`boolean` - Return help information - -|`s` -|`string \| string[]` - Comma-separated list of column names or column aliases to sort by - -|`time` -|`'d' \| 'h' \| 'm' \| 's' \| 'ms' \| 'micros' \| 'nanos'` - The unit in which to display time values - -|`v` -|`boolean` - Verbose mode. Display column headers - -|=== - -=== ccr.deleteAutoFollowPattern - -[source,ts] ----- -client.ccr.deleteAutoFollowPattern({ - name: string -}) ----- -link:{ref}/ccr-delete-auto-follow-pattern.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - The name of the auto follow pattern. - -|=== - -=== ccr.follow - -[source,ts] ----- -client.ccr.follow({ - index: string, - wait_for_active_shards: string, - body: object -}) ----- -link:{ref}/ccr-put-follow.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string` - The name of the follower index - -|`wait_for_active_shards` or `waitForActiveShards` -|`string` - Sets the number of shard copies that must be active before returning. Defaults to 0. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1) + -_Default:_ `0` - -|`body` -|`object` - The name of the leader index and other optional ccr related parameters - -|=== - -=== ccr.followInfo - -[source,ts] ----- -client.ccr.followInfo({ - index: string | string[] -}) ----- -link:{ref}/ccr-get-follow-info.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index patterns; use `_all` to perform the operation on all indices - -|=== - -=== ccr.followStats - -[source,ts] ----- -client.ccr.followStats({ - index: string | string[] -}) ----- -link:{ref}/ccr-get-follow-stats.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index patterns; use `_all` to perform the operation on all indices - -|=== - -=== ccr.forgetFollower - -[source,ts] ----- -client.ccr.forgetFollower({ - index: string, - body: object -}) ----- -link:{ref}/ccr-post-forget-follower.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string` - the name of the leader index for which specified follower retention leases should be removed - -|`body` -|`object` - the name and UUID of the follower index, the name of the cluster containing the follower index, and the alias from the perspective of that cluster for the remote cluster containing the leader index - -|=== - -=== ccr.getAutoFollowPattern - -[source,ts] ----- -client.ccr.getAutoFollowPattern({ - name: string -}) ----- -link:{ref}/ccr-get-auto-follow-pattern.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - The name of the auto follow pattern. - -|=== - -=== ccr.pauseAutoFollowPattern - -[source,ts] ----- -client.ccr.pauseAutoFollowPattern({ - name: string -}) ----- -link:{ref}/ccr-pause-auto-follow-pattern.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - The name of the auto follow pattern that should pause discovering new indices to follow. - -|=== - -=== ccr.pauseFollow - -[source,ts] ----- -client.ccr.pauseFollow({ - index: string -}) ----- -link:{ref}/ccr-post-pause-follow.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string` - The name of the follower index that should pause following its leader index. - -|=== - -=== ccr.putAutoFollowPattern - -[source,ts] ----- -client.ccr.putAutoFollowPattern({ - name: string, - body: object -}) ----- -link:{ref}/ccr-put-auto-follow-pattern.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - The name of the auto follow pattern. - -|`body` -|`object` - The specification of the auto follow pattern - -|=== - -=== ccr.resumeAutoFollowPattern - -[source,ts] ----- -client.ccr.resumeAutoFollowPattern({ - name: string -}) ----- -link:{ref}/ccr-resume-auto-follow-pattern.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - The name of the auto follow pattern to resume discovering new indices to follow. - -|=== - -=== ccr.resumeFollow - -[source,ts] ----- -client.ccr.resumeFollow({ - index: string, - body: object -}) ----- -link:{ref}/ccr-post-resume-follow.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string` - The name of the follow index to resume following. - -|`body` -|`object` - The name of the leader index and other optional ccr related parameters - -|=== - -=== ccr.stats - -[source,ts] ----- -client.ccr.stats() ----- -link:{ref}/ccr-get-stats.html[Documentation] + - - -=== ccr.unfollow - -[source,ts] ----- -client.ccr.unfollow({ - index: string -}) ----- -link:{ref}/ccr-post-unfollow.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string` - The name of the follower index that should be turned into a regular index. - -|=== - -=== dataFrameTransformDeprecated.deleteTransform -*Stability:* beta -[source,ts] ----- -client.dataFrameTransformDeprecated.deleteTransform({ - transform_id: string, - force: boolean -}) ----- -link:{ref}/delete-transform.html[Documentation] + -[cols=2*] -|=== -|`transform_id` or `transformId` -|`string` - The id of the transform to delete - -|`force` -|`boolean` - When `true`, the transform is deleted regardless of its current state. The default value is `false`, meaning that the transform must be `stopped` before it can be deleted. - -|=== - -=== dataFrameTransformDeprecated.getTransform -*Stability:* beta -[source,ts] ----- -client.dataFrameTransformDeprecated.getTransform({ - transform_id: string, - from: number, - size: number, - allow_no_match: boolean -}) ----- -link:{ref}/get-transform.html[Documentation] + -[cols=2*] -|=== -|`transform_id` or `transformId` -|`string` - The id or comma delimited list of id expressions of the transforms to get, '_all' or '*' implies get all transforms - -|`from` -|`number` - skips a number of transform configs, defaults to 0 - -|`size` -|`number` - specifies a max number of transforms to get, defaults to 100 - -|`allow_no_match` or `allowNoMatch` -|`boolean` - Whether to ignore if a wildcard expression matches no transforms. (This includes `_all` string or when no transforms have been specified) - -|=== - -=== dataFrameTransformDeprecated.getTransformStats -*Stability:* beta -[source,ts] ----- -client.dataFrameTransformDeprecated.getTransformStats({ - transform_id: string, - from: number, - size: number, - allow_no_match: boolean -}) ----- -link:{ref}/get-transform-stats.html[Documentation] + -[cols=2*] -|=== -|`transform_id` or `transformId` -|`string` - The id of the transform for which to get stats. '_all' or '*' implies all transforms - -|`from` -|`number` - skips a number of transform stats, defaults to 0 - -|`size` -|`number` - specifies a max number of transform stats to get, defaults to 100 - -|`allow_no_match` or `allowNoMatch` -|`boolean` - Whether to ignore if a wildcard expression matches no transforms. (This includes `_all` string or when no transforms have been specified) - -|=== - -=== dataFrameTransformDeprecated.previewTransform -*Stability:* beta -[source,ts] ----- -client.dataFrameTransformDeprecated.previewTransform({ - body: object -}) ----- -link:{ref}/preview-transform.html[Documentation] + -[cols=2*] -|=== -|`body` -|`object` - The definition for the transform to preview - -|=== - -=== dataFrameTransformDeprecated.putTransform -*Stability:* beta -[source,ts] ----- -client.dataFrameTransformDeprecated.putTransform({ - transform_id: string, - defer_validation: boolean, - body: object -}) ----- -link:{ref}/put-transform.html[Documentation] + -[cols=2*] -|=== -|`transform_id` or `transformId` -|`string` - The id of the new transform. - -|`defer_validation` or `deferValidation` -|`boolean` - If validations should be deferred until transform starts, defaults to false. - -|`body` -|`object` - The transform definition - -|=== - -=== dataFrameTransformDeprecated.startTransform -*Stability:* beta -[source,ts] ----- -client.dataFrameTransformDeprecated.startTransform({ - transform_id: string, - timeout: string -}) ----- -link:{ref}/start-transform.html[Documentation] + -[cols=2*] -|=== -|`transform_id` or `transformId` -|`string` - The id of the transform to start - -|`timeout` -|`string` - Controls the time to wait for the transform to start - -|=== - -=== dataFrameTransformDeprecated.stopTransform -*Stability:* beta -[source,ts] ----- -client.dataFrameTransformDeprecated.stopTransform({ - transform_id: string, - wait_for_completion: boolean, - timeout: string, - allow_no_match: boolean -}) ----- -link:{ref}/stop-transform.html[Documentation] + -[cols=2*] -|=== -|`transform_id` or `transformId` -|`string` - The id of the transform to stop - -|`wait_for_completion` or `waitForCompletion` -|`boolean` - Whether to wait for the transform to fully stop before returning or not. Default to false - -|`timeout` -|`string` - Controls the time to wait until the transform has stopped. Default to 30 seconds - -|`allow_no_match` or `allowNoMatch` -|`boolean` - Whether to ignore if a wildcard expression matches no transforms. (This includes `_all` string or when no transforms have been specified) - -|=== - -=== dataFrameTransformDeprecated.updateTransform -*Stability:* beta -[source,ts] ----- -client.dataFrameTransformDeprecated.updateTransform({ - transform_id: string, - defer_validation: boolean, - body: object -}) ----- -link:{ref}/update-transform.html[Documentation] + -[cols=2*] -|=== -|`transform_id` or `transformId` -|`string` - The id of the transform. - -|`defer_validation` or `deferValidation` -|`boolean` - If validations should be deferred until transform starts, defaults to false. - -|`body` -|`object` - The update transform definition - -|=== - -=== enrich.deletePolicy - -[source,ts] ----- -client.enrich.deletePolicy({ - name: string -}) ----- -link:{ref}/delete-enrich-policy-api.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - The name of the enrich policy - -|=== - -=== enrich.executePolicy - -[source,ts] ----- -client.enrich.executePolicy({ - name: string, - wait_for_completion: boolean -}) ----- -link:{ref}/execute-enrich-policy-api.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - The name of the enrich policy - -|`wait_for_completion` or `waitForCompletion` -|`boolean` - Should the request should block until the execution is complete. + -_Default:_ `true` - -|=== - -=== enrich.getPolicy - -[source,ts] ----- -client.enrich.getPolicy({ - name: string | string[] -}) ----- -link:{ref}/get-enrich-policy-api.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string \| string[]` - A comma-separated list of enrich policy names - -|=== - -=== enrich.putPolicy - -[source,ts] ----- -client.enrich.putPolicy({ - name: string, - body: object -}) ----- -link:{ref}/put-enrich-policy-api.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - The name of the enrich policy - -|`body` -|`object` - The enrich policy to register - -|=== - -=== enrich.stats - -[source,ts] ----- -client.enrich.stats() ----- -link:{ref}/enrich-stats-api.html[Documentation] + - - -=== eql.search -*Stability:* beta -[source,ts] ----- -client.eql.search({ - index: string, - body: object -}) ----- -link:{ref}/eql-search-api.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string` - The name of the index to scope the operation - -|`body` -|`object` - Eql request body. Use the `query` to limit the query scope. - -|=== - -=== graph.explore - -[source,ts] ----- -client.graph.explore({ - index: string | string[], - routing: string, - timeout: string, - body: object -}) ----- -link:{ref}/graph-explore-api.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices - -|`routing` -|`string` - Specific routing value - -|`timeout` -|`string` - Explicit operation timeout - -|`body` -|`object` - Graph Query DSL - -|=== - -=== ilm.deleteLifecycle - -[source,ts] ----- -client.ilm.deleteLifecycle({ - policy: string -}) ----- -link:{ref}/ilm-delete-lifecycle.html[Documentation] + -[cols=2*] -|=== -|`policy` -|`string` - The name of the index lifecycle policy - -|=== - -=== ilm.explainLifecycle - -[source,ts] ----- -client.ilm.explainLifecycle({ - index: string, - only_managed: boolean, - only_errors: boolean -}) ----- -link:{ref}/ilm-explain-lifecycle.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string` - The name of the index to explain - -|`only_managed` or `onlyManaged` -|`boolean` - filters the indices included in the response to ones managed by ILM - -|`only_errors` or `onlyErrors` -|`boolean` - filters the indices included in the response to ones in an ILM error state, implies only_managed - -|=== - -=== ilm.getLifecycle - -[source,ts] ----- -client.ilm.getLifecycle({ - policy: string -}) ----- -link:{ref}/ilm-get-lifecycle.html[Documentation] + -[cols=2*] -|=== -|`policy` -|`string` - The name of the index lifecycle policy - -|=== - -=== ilm.getStatus - -[source,ts] ----- -client.ilm.getStatus() ----- -link:{ref}/ilm-get-status.html[Documentation] + - - -=== ilm.moveToStep - -[source,ts] ----- -client.ilm.moveToStep({ - index: string, - body: object -}) ----- -link:{ref}/ilm-move-to-step.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string` - The name of the index whose lifecycle step is to change - -|`body` -|`object` - The new lifecycle step to move to - -|=== - -=== ilm.putLifecycle - -[source,ts] ----- -client.ilm.putLifecycle({ - policy: string, - body: object -}) ----- -link:{ref}/ilm-put-lifecycle.html[Documentation] + -[cols=2*] -|=== -|`policy` -|`string` - The name of the index lifecycle policy - -|`body` -|`object` - The lifecycle policy definition to register - -|=== - -=== ilm.removePolicy - -[source,ts] ----- -client.ilm.removePolicy({ - index: string -}) ----- -link:{ref}/ilm-remove-policy.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string` - The name of the index to remove policy on - -|=== - -=== ilm.retry - -[source,ts] ----- -client.ilm.retry({ - index: string -}) ----- -link:{ref}/ilm-retry-policy.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string` - The name of the indices (comma-separated) whose failed lifecycle step is to be retry - -|=== - -=== ilm.start - -[source,ts] ----- -client.ilm.start() ----- -link:{ref}/ilm-start.html[Documentation] + - - -=== ilm.stop - -[source,ts] ----- -client.ilm.stop() ----- -link:{ref}/ilm-stop.html[Documentation] + - - -=== indices.freeze - -[source,ts] ----- -client.indices.freeze({ - index: string, - timeout: string, - master_timeout: string, - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - wait_for_active_shards: string -}) ----- -link:{ref}/freeze-index-api.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string` - The name of the index to freeze - -|`timeout` -|`string` - Explicit operation timeout - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `closed` - -|`wait_for_active_shards` or `waitForActiveShards` -|`string` - Sets the number of active shards to wait for before the operation returns. - -|=== - -=== indices.reloadSearchAnalyzers - -[source,ts] ----- -client.indices.reloadSearchAnalyzers({ - index: string | string[], - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all' -}) ----- -link:{ref}/indices-reload-analyzers.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names to reload analyzers for - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|=== - -=== indices.unfreeze - -[source,ts] ----- -client.indices.unfreeze({ - index: string, - timeout: string, - master_timeout: string, - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - wait_for_active_shards: string -}) ----- -link:{ref}/unfreeze-index-api.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string` - The name of the index to unfreeze - -|`timeout` -|`string` - Explicit operation timeout - -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for connection to master - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `closed` - -|`wait_for_active_shards` or `waitForActiveShards` -|`string` - Sets the number of active shards to wait for before the operation returns. - -|=== - -=== license.delete - -[source,ts] ----- -client.license.delete() ----- -link:{ref}/delete-license.html[Documentation] + - - -=== license.get - -[source,ts] ----- -client.license.get({ - local: boolean, - accept_enterprise: boolean -}) ----- -link:{ref}/get-license.html[Documentation] + -[cols=2*] -|=== -|`local` -|`boolean` - Return local information, do not retrieve the state from master node (default: false) - -|`accept_enterprise` or `acceptEnterprise` -|`boolean` - Supported for backwards compatibility with 7.x. If this param is used it must be set to true + - -WARNING: This parameter has been deprecated. - -|=== - -=== license.getBasicStatus - -[source,ts] ----- -client.license.getBasicStatus() ----- -link:{ref}/get-basic-status.html[Documentation] + - - -=== license.getTrialStatus - -[source,ts] ----- -client.license.getTrialStatus() ----- -link:{ref}/get-trial-status.html[Documentation] + - - -=== license.post - -[source,ts] ----- -client.license.post({ - acknowledge: boolean, - body: object -}) ----- -link:{ref}/update-license.html[Documentation] + -[cols=2*] -|=== -|`acknowledge` -|`boolean` - whether the user has acknowledged acknowledge messages (default: false) - -|`body` -|`object` - licenses to be installed - -|=== - -=== license.postStartBasic - -[source,ts] ----- -client.license.postStartBasic({ - acknowledge: boolean -}) ----- -link:{ref}/start-basic.html[Documentation] + -[cols=2*] -|=== -|`acknowledge` -|`boolean` - whether the user has acknowledged acknowledge messages (default: false) - -|=== - -=== license.postStartTrial - -[source,ts] ----- -client.license.postStartTrial({ - type: string, - acknowledge: boolean -}) ----- -link:{ref}/start-trial.html[Documentation] + -[cols=2*] -|=== -|`type` -|`string` - The type of trial license to generate (default: "trial") - -|`acknowledge` -|`boolean` - whether the user has acknowledged acknowledge messages (default: false) - -|=== - -=== migration.deprecations - -[source,ts] ----- -client.migration.deprecations({ - index: string -}) ----- -link:{ref}/migration-api-deprecation.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string` - Index pattern - -|=== - -=== ml.closeJob - -[source,ts] ----- -client.ml.closeJob({ - job_id: string, - allow_no_jobs: boolean, - force: boolean, - timeout: string, - body: object -}) ----- -link:{ref}/ml-close-job.html[Documentation] + -[cols=2*] -|=== -|`job_id` or `jobId` -|`string` - The name of the job to close - -|`allow_no_jobs` or `allowNoJobs` -|`boolean` - Whether to ignore if a wildcard expression matches no jobs. (This includes `_all` string or when no jobs have been specified) - -|`force` -|`boolean` - True if the job should be forcefully closed - -|`timeout` -|`string` - Controls the time to wait until a job has closed. Default to 30 minutes - -|`body` -|`object` - The URL params optionally sent in the body - -|=== - -=== ml.deleteCalendar - -[source,ts] ----- -client.ml.deleteCalendar({ - calendar_id: string -}) ----- -link:{ref}/ml-delete-calendar.html[Documentation] + -[cols=2*] -|=== -|`calendar_id` or `calendarId` -|`string` - The ID of the calendar to delete - -|=== - -=== ml.deleteCalendarEvent - -[source,ts] ----- -client.ml.deleteCalendarEvent({ - calendar_id: string, - event_id: string -}) ----- -link:{ref}/ml-delete-calendar-event.html[Documentation] + -[cols=2*] -|=== -|`calendar_id` or `calendarId` -|`string` - The ID of the calendar to modify - -|`event_id` or `eventId` -|`string` - The ID of the event to remove from the calendar - -|=== - -=== ml.deleteCalendarJob - -[source,ts] ----- -client.ml.deleteCalendarJob({ - calendar_id: string, - job_id: string -}) ----- -link:{ref}/ml-delete-calendar-job.html[Documentation] + -[cols=2*] -|=== -|`calendar_id` or `calendarId` -|`string` - The ID of the calendar to modify - -|`job_id` or `jobId` -|`string` - The ID of the job to remove from the calendar - -|=== - -=== ml.deleteDataFrameAnalytics -*Stability:* experimental -[source,ts] ----- -client.ml.deleteDataFrameAnalytics({ - id: string, - force: boolean -}) ----- -link:{ref}/delete-dfanalytics.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The ID of the data frame analytics to delete - -|`force` -|`boolean` - True if the job should be forcefully deleted - -|=== - -=== ml.deleteDatafeed - -[source,ts] ----- -client.ml.deleteDatafeed({ - datafeed_id: string, - force: boolean -}) ----- -link:{ref}/ml-delete-datafeed.html[Documentation] + -[cols=2*] -|=== -|`datafeed_id` or `datafeedId` -|`string` - The ID of the datafeed to delete - -|`force` -|`boolean` - True if the datafeed should be forcefully deleted - -|=== - -=== ml.deleteExpiredData - -[source,ts] ----- -client.ml.deleteExpiredData({ - body: object -}) ----- -link:{ref}/ml-delete-expired-data.html[Documentation] + -[cols=2*] -|=== -|`body` -|`object` - deleting expired data parameters - -|=== - -=== ml.deleteFilter - -[source,ts] ----- -client.ml.deleteFilter({ - filter_id: string -}) ----- -link:{ref}/ml-delete-filter.html[Documentation] + -[cols=2*] -|=== -|`filter_id` or `filterId` -|`string` - The ID of the filter to delete - -|=== - -=== ml.deleteForecast - -[source,ts] ----- -client.ml.deleteForecast({ - job_id: string, - forecast_id: string, - allow_no_forecasts: boolean, - timeout: string -}) ----- -link:{ref}/ml-delete-forecast.html[Documentation] + -[cols=2*] -|=== -|`job_id` or `jobId` -|`string` - The ID of the job from which to delete forecasts - -|`forecast_id` or `forecastId` -|`string` - The ID of the forecast to delete, can be comma delimited list. Leaving blank implies `_all` - -|`allow_no_forecasts` or `allowNoForecasts` -|`boolean` - Whether to ignore if `_all` matches no forecasts - -|`timeout` -|`string` - Controls the time to wait until the forecast(s) are deleted. Default to 30 seconds - -|=== - -=== ml.deleteJob - -[source,ts] ----- -client.ml.deleteJob({ - job_id: string, - force: boolean, - wait_for_completion: boolean -}) ----- -link:{ref}/ml-delete-job.html[Documentation] + -[cols=2*] -|=== -|`job_id` or `jobId` -|`string` - The ID of the job to delete - -|`force` -|`boolean` - True if the job should be forcefully deleted - -|`wait_for_completion` or `waitForCompletion` -|`boolean` - Should this request wait until the operation has completed before returning + -_Default:_ `true` - -|=== - -=== ml.deleteModelSnapshot - -[source,ts] ----- -client.ml.deleteModelSnapshot({ - job_id: string, - snapshot_id: string -}) ----- -link:{ref}/ml-delete-snapshot.html[Documentation] + -[cols=2*] -|=== -|`job_id` or `jobId` -|`string` - The ID of the job to fetch - -|`snapshot_id` or `snapshotId` -|`string` - The ID of the snapshot to delete - -|=== - -=== ml.deleteTrainedModel -*Stability:* experimental -[source,ts] ----- -client.ml.deleteTrainedModel({ - model_id: string -}) ----- -link:{ref}/delete-inference.html[Documentation] + -[cols=2*] -|=== -|`model_id` or `modelId` -|`string` - The ID of the trained model to delete - -|=== - -=== ml.estimateModelMemory - -[source,ts] ----- -client.ml.estimateModelMemory({ - body: object -}) ----- -link:{ref}/ml-apis.html[Documentation] + -[cols=2*] -|=== -|`body` -|`object` - The analysis config, plus cardinality estimates for fields it references - -|=== - -=== ml.evaluateDataFrame -*Stability:* experimental -[source,ts] ----- -client.ml.evaluateDataFrame({ - body: object -}) ----- -link:{ref}/evaluate-dfanalytics.html[Documentation] + -[cols=2*] -|=== -|`body` -|`object` - The evaluation definition - -|=== - -=== ml.explainDataFrameAnalytics -*Stability:* experimental -[source,ts] ----- -client.ml.explainDataFrameAnalytics({ - id: string, - body: object -}) ----- -link:{ref}/explain-dfanalytics.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The ID of the data frame analytics to explain - -|`body` -|`object` - The data frame analytics config to explain - -|=== - -=== ml.findFileStructure -*Stability:* experimental -[source,ts] ----- -client.ml.findFileStructure({ - lines_to_sample: number, - line_merge_size_limit: number, - timeout: string, - charset: string, - format: 'ndjson' | 'xml' | 'delimited' | 'semi_structured_text', - has_header_row: boolean, - column_names: string | string[], - delimiter: string, - quote: string, - should_trim_fields: boolean, - grok_pattern: string, - timestamp_field: string, - timestamp_format: string, - explain: boolean, - body: object -}) ----- -link:{ref}/ml-find-file-structure.html[Documentation] + -[cols=2*] -|=== -|`lines_to_sample` or `linesToSample` -|`number` - How many lines of the file should be included in the analysis + -_Default:_ `1000` - -|`line_merge_size_limit` or `lineMergeSizeLimit` -|`number` - Maximum number of characters permitted in a single message when lines are merged to create messages. + -_Default:_ `10000` - -|`timeout` -|`string` - Timeout after which the analysis will be aborted + -_Default:_ `25s` - -|`charset` -|`string` - Optional parameter to specify the character set of the file - -|`format` -|`'ndjson' \| 'xml' \| 'delimited' \| 'semi_structured_text'` - Optional parameter to specify the high level file format - -|`has_header_row` or `hasHeaderRow` -|`boolean` - Optional parameter to specify whether a delimited file includes the column names in its first row - -|`column_names` or `columnNames` -|`string \| string[]` - Optional parameter containing a comma separated list of the column names for a delimited file - -|`delimiter` -|`string` - Optional parameter to specify the delimiter character for a delimited file - must be a single character - -|`quote` -|`string` - Optional parameter to specify the quote character for a delimited file - must be a single character - -|`should_trim_fields` or `shouldTrimFields` -|`boolean` - Optional parameter to specify whether the values between delimiters in a delimited file should have whitespace trimmed from them - -|`grok_pattern` or `grokPattern` -|`string` - Optional parameter to specify the Grok pattern that should be used to extract fields from messages in a semi-structured text file - -|`timestamp_field` or `timestampField` -|`string` - Optional parameter to specify the timestamp field in the file - -|`timestamp_format` or `timestampFormat` -|`string` - Optional parameter to specify the timestamp format in the file - may be either a Joda or Java time format - -|`explain` -|`boolean` - Whether to include a commentary on how the structure was derived - -|`body` -|`object` - The contents of the file to be analyzed - -|=== - -=== ml.flushJob - -[source,ts] ----- -client.ml.flushJob({ - job_id: string, - calc_interim: boolean, - start: string, - end: string, - advance_time: string, - skip_time: string, - body: object -}) ----- -link:{ref}/ml-flush-job.html[Documentation] + -[cols=2*] -|=== -|`job_id` or `jobId` -|`string` - The name of the job to flush - -|`calc_interim` or `calcInterim` -|`boolean` - Calculates interim results for the most recent bucket or all buckets within the latency period - -|`start` -|`string` - When used in conjunction with calc_interim, specifies the range of buckets on which to calculate interim results - -|`end` -|`string` - When used in conjunction with calc_interim, specifies the range of buckets on which to calculate interim results - -|`advance_time` or `advanceTime` -|`string` - Advances time to the given value generating results and updating the model for the advanced interval - -|`skip_time` or `skipTime` -|`string` - Skips time to the given value without generating results or updating the model for the skipped interval - -|`body` -|`object` - Flush parameters - -|=== - -=== ml.forecast - -[source,ts] ----- -client.ml.forecast({ - job_id: string, - duration: string, - expires_in: string, - max_model_memory: string -}) ----- -link:{ref}/ml-forecast.html[Documentation] + -[cols=2*] -|=== -|`job_id` or `jobId` -|`string` - The ID of the job to forecast for - -|`duration` -|`string` - The duration of the forecast - -|`expires_in` or `expiresIn` -|`string` - The time interval after which the forecast expires. Expired forecasts will be deleted at the first opportunity. - -|`max_model_memory` or `maxModelMemory` -|`string` - The max memory able to be used by the forecast. Default is 20mb. - -|=== - -=== ml.getBuckets - -[source,ts] ----- -client.ml.getBuckets({ - job_id: string, - timestamp: string, - expand: boolean, - exclude_interim: boolean, - from: number, - size: number, - start: string, - end: string, - anomaly_score: number, - sort: string, - desc: boolean, - body: object -}) ----- -link:{ref}/ml-get-bucket.html[Documentation] + -[cols=2*] -|=== -|`job_id` or `jobId` -|`string` - ID of the job to get bucket results from - -|`timestamp` -|`string` - The timestamp of the desired single bucket result - -|`expand` -|`boolean` - Include anomaly records - -|`exclude_interim` or `excludeInterim` -|`boolean` - Exclude interim results - -|`from` -|`number` - skips a number of buckets - -|`size` -|`number` - specifies a max number of buckets to get - -|`start` -|`string` - Start time filter for buckets - -|`end` -|`string` - End time filter for buckets - -|`anomaly_score` or `anomalyScore` -|`number` - Filter for the most anomalous buckets - -|`sort` -|`string` - Sort buckets by a particular field - -|`desc` -|`boolean` - Set the sort direction - -|`body` -|`object` - Bucket selection details if not provided in URI - -|=== - -=== ml.getCalendarEvents - -[source,ts] ----- -client.ml.getCalendarEvents({ - calendar_id: string, - job_id: string, - start: string, - end: string, - from: number, - size: number -}) ----- -link:{ref}/ml-get-calendar-event.html[Documentation] + -[cols=2*] -|=== -|`calendar_id` or `calendarId` -|`string` - The ID of the calendar containing the events - -|`job_id` or `jobId` -|`string` - Get events for the job. When this option is used calendar_id must be '_all' - -|`start` -|`string` - Get events after this time - -|`end` -|`string` - Get events before this time - -|`from` -|`number` - Skips a number of events - -|`size` -|`number` - Specifies a max number of events to get - -|=== - -=== ml.getCalendars - -[source,ts] ----- -client.ml.getCalendars({ - calendar_id: string, - from: number, - size: number, - body: object -}) ----- -link:{ref}/ml-get-calendar.html[Documentation] + -[cols=2*] -|=== -|`calendar_id` or `calendarId` -|`string` - The ID of the calendar to fetch - -|`from` -|`number` - skips a number of calendars - -|`size` -|`number` - specifies a max number of calendars to get - -|`body` -|`object` - The from and size parameters optionally sent in the body - -|=== - -=== ml.getCategories - -[source,ts] ----- -client.ml.getCategories({ - job_id: string, - category_id: number, - from: number, - size: number, - body: object -}) ----- -link:{ref}/ml-get-category.html[Documentation] + -[cols=2*] -|=== -|`job_id` or `jobId` -|`string` - The name of the job - -|`category_id` or `categoryId` -|`number` - The identifier of the category definition of interest - -|`from` -|`number` - skips a number of categories - -|`size` -|`number` - specifies a max number of categories to get - -|`body` -|`object` - Category selection details if not provided in URI - -|=== - -=== ml.getDataFrameAnalytics -*Stability:* experimental -[source,ts] ----- -client.ml.getDataFrameAnalytics({ - id: string, - allow_no_match: boolean, - from: number, - size: number -}) ----- -link:{ref}/get-dfanalytics.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The ID of the data frame analytics to fetch - -|`allow_no_match` or `allowNoMatch` -|`boolean` - Whether to ignore if a wildcard expression matches no data frame analytics. (This includes `_all` string or when no data frame analytics have been specified) + -_Default:_ `true` - -|`from` -|`number` - skips a number of analytics - -|`size` -|`number` - specifies a max number of analytics to get + -_Default:_ `100` - -|=== - -=== ml.getDataFrameAnalyticsStats -*Stability:* experimental -[source,ts] ----- -client.ml.getDataFrameAnalyticsStats({ - id: string, - allow_no_match: boolean, - from: number, - size: number -}) ----- -link:{ref}/get-dfanalytics-stats.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The ID of the data frame analytics stats to fetch - -|`allow_no_match` or `allowNoMatch` -|`boolean` - Whether to ignore if a wildcard expression matches no data frame analytics. (This includes `_all` string or when no data frame analytics have been specified) + -_Default:_ `true` - -|`from` -|`number` - skips a number of analytics - -|`size` -|`number` - specifies a max number of analytics to get + -_Default:_ `100` - -|=== - -=== ml.getDatafeedStats - -[source,ts] ----- -client.ml.getDatafeedStats({ - datafeed_id: string, - allow_no_datafeeds: boolean -}) ----- -link:{ref}/ml-get-datafeed-stats.html[Documentation] + -[cols=2*] -|=== -|`datafeed_id` or `datafeedId` -|`string` - The ID of the datafeeds stats to fetch - -|`allow_no_datafeeds` or `allowNoDatafeeds` -|`boolean` - Whether to ignore if a wildcard expression matches no datafeeds. (This includes `_all` string or when no datafeeds have been specified) - -|=== - -=== ml.getDatafeeds - -[source,ts] ----- -client.ml.getDatafeeds({ - datafeed_id: string, - allow_no_datafeeds: boolean -}) ----- -link:{ref}/ml-get-datafeed.html[Documentation] + -[cols=2*] -|=== -|`datafeed_id` or `datafeedId` -|`string` - The ID of the datafeeds to fetch - -|`allow_no_datafeeds` or `allowNoDatafeeds` -|`boolean` - Whether to ignore if a wildcard expression matches no datafeeds. (This includes `_all` string or when no datafeeds have been specified) - -|=== - -=== ml.getFilters - -[source,ts] ----- -client.ml.getFilters({ - filter_id: string, - from: number, - size: number -}) ----- -link:{ref}/ml-get-filter.html[Documentation] + -[cols=2*] -|=== -|`filter_id` or `filterId` -|`string` - The ID of the filter to fetch - -|`from` -|`number` - skips a number of filters - -|`size` -|`number` - specifies a max number of filters to get - -|=== - -=== ml.getInfluencers - -[source,ts] ----- -client.ml.getInfluencers({ - job_id: string, - exclude_interim: boolean, - from: number, - size: number, - start: string, - end: string, - influencer_score: number, - sort: string, - desc: boolean, - body: object -}) ----- -link:{ref}/ml-get-influencer.html[Documentation] + -[cols=2*] -|=== -|`job_id` or `jobId` -|`string` - Identifier for the anomaly detection job - -|`exclude_interim` or `excludeInterim` -|`boolean` - Exclude interim results - -|`from` -|`number` - skips a number of influencers - -|`size` -|`number` - specifies a max number of influencers to get - -|`start` -|`string` - start timestamp for the requested influencers - -|`end` -|`string` - end timestamp for the requested influencers - -|`influencer_score` or `influencerScore` -|`number` - influencer score threshold for the requested influencers - -|`sort` -|`string` - sort field for the requested influencers - -|`desc` -|`boolean` - whether the results should be sorted in decending order - -|`body` -|`object` - Influencer selection criteria - -|=== - -=== ml.getJobStats - -[source,ts] ----- -client.ml.getJobStats({ - job_id: string, - allow_no_jobs: boolean -}) ----- -link:{ref}/ml-get-job-stats.html[Documentation] + -[cols=2*] -|=== -|`job_id` or `jobId` -|`string` - The ID of the jobs stats to fetch - -|`allow_no_jobs` or `allowNoJobs` -|`boolean` - Whether to ignore if a wildcard expression matches no jobs. (This includes `_all` string or when no jobs have been specified) - -|=== - -=== ml.getJobs - -[source,ts] ----- -client.ml.getJobs({ - job_id: string, - allow_no_jobs: boolean -}) ----- -link:{ref}/ml-get-job.html[Documentation] + -[cols=2*] -|=== -|`job_id` or `jobId` -|`string` - The ID of the jobs to fetch - -|`allow_no_jobs` or `allowNoJobs` -|`boolean` - Whether to ignore if a wildcard expression matches no jobs. (This includes `_all` string or when no jobs have been specified) - -|=== - -=== ml.getModelSnapshots - -[source,ts] ----- -client.ml.getModelSnapshots({ - job_id: string, - snapshot_id: string, - from: number, - size: number, - start: string, - end: string, - sort: string, - desc: boolean, - body: object -}) ----- -link:{ref}/ml-get-snapshot.html[Documentation] + -[cols=2*] -|=== -|`job_id` or `jobId` -|`string` - The ID of the job to fetch - -|`snapshot_id` or `snapshotId` -|`string` - The ID of the snapshot to fetch - -|`from` -|`number` - Skips a number of documents - -|`size` -|`number` - The default number of documents returned in queries as a string. - -|`start` -|`string` - The filter 'start' query parameter - -|`end` -|`string` - The filter 'end' query parameter - -|`sort` -|`string` - Name of the field to sort on - -|`desc` -|`boolean` - True if the results should be sorted in descending order - -|`body` -|`object` - Model snapshot selection criteria - -|=== - -=== ml.getOverallBuckets - -[source,ts] ----- -client.ml.getOverallBuckets({ - job_id: string, - top_n: number, - bucket_span: string, - overall_score: number, - exclude_interim: boolean, - start: string, - end: string, - allow_no_jobs: boolean, - body: object -}) ----- -link:{ref}/ml-get-overall-buckets.html[Documentation] + -[cols=2*] -|=== -|`job_id` or `jobId` -|`string` - The job IDs for which to calculate overall bucket results - -|`top_n` or `topN` -|`number` - The number of top job bucket scores to be used in the overall_score calculation - -|`bucket_span` or `bucketSpan` -|`string` - The span of the overall buckets. Defaults to the longest job bucket_span - -|`overall_score` or `overallScore` -|`number` - Returns overall buckets with overall scores higher than this value - -|`exclude_interim` or `excludeInterim` -|`boolean` - If true overall buckets that include interim buckets will be excluded - -|`start` -|`string` - Returns overall buckets with timestamps after this time - -|`end` -|`string` - Returns overall buckets with timestamps earlier than this time - -|`allow_no_jobs` or `allowNoJobs` -|`boolean` - Whether to ignore if a wildcard expression matches no jobs. (This includes `_all` string or when no jobs have been specified) - -|`body` -|`object` - Overall bucket selection details if not provided in URI - -|=== - -=== ml.getRecords - -[source,ts] ----- -client.ml.getRecords({ - job_id: string, - exclude_interim: boolean, - from: number, - size: number, - start: string, - end: string, - record_score: number, - sort: string, - desc: boolean, - body: object -}) ----- -link:{ref}/ml-get-record.html[Documentation] + -[cols=2*] -|=== -|`job_id` or `jobId` -|`string` - The ID of the job - -|`exclude_interim` or `excludeInterim` -|`boolean` - Exclude interim results - -|`from` -|`number` - skips a number of records - -|`size` -|`number` - specifies a max number of records to get - -|`start` -|`string` - Start time filter for records - -|`end` -|`string` - End time filter for records - -|`record_score` or `recordScore` -|`number` - Returns records with anomaly scores greater or equal than this value - -|`sort` -|`string` - Sort records by a particular field - -|`desc` -|`boolean` - Set the sort direction - -|`body` -|`object` - Record selection criteria - -|=== - -=== ml.getTrainedModels -*Stability:* experimental -[source,ts] ----- -client.ml.getTrainedModels({ - model_id: string, - allow_no_match: boolean, - include_model_definition: boolean, - decompress_definition: boolean, - from: number, - size: number, - tags: string | string[], - for_export: boolean -}) ----- -link:{ref}/get-inference.html[Documentation] + -[cols=2*] -|=== -|`model_id` or `modelId` -|`string` - The ID of the trained models to fetch - -|`allow_no_match` or `allowNoMatch` -|`boolean` - Whether to ignore if a wildcard expression matches no trained models. (This includes `_all` string or when no trained models have been specified) + -_Default:_ `true` - -|`include_model_definition` or `includeModelDefinition` -|`boolean` - Should the full model definition be included in the results. These definitions can be large. So be cautious when including them. Defaults to false. - -|`decompress_definition` or `decompressDefinition` -|`boolean` - Should the model definition be decompressed into valid JSON or returned in a custom compressed format. Defaults to true. + -_Default:_ `true` - -|`from` -|`number` - skips a number of trained models - -|`size` -|`number` - specifies a max number of trained models to get + -_Default:_ `100` - -|`tags` -|`string \| string[]` - A comma-separated list of tags that the model must have. - -|`for_export` or `forExport` -|`boolean` - Omits fields that are illegal to set on model PUT - -|=== - -=== ml.getTrainedModelsStats -*Stability:* experimental -[source,ts] ----- -client.ml.getTrainedModelsStats({ - model_id: string, - allow_no_match: boolean, - from: number, - size: number -}) ----- -link:{ref}/get-inference-stats.html[Documentation] + -[cols=2*] -|=== -|`model_id` or `modelId` -|`string` - The ID of the trained models stats to fetch - -|`allow_no_match` or `allowNoMatch` -|`boolean` - Whether to ignore if a wildcard expression matches no trained models. (This includes `_all` string or when no trained models have been specified) + -_Default:_ `true` - -|`from` -|`number` - skips a number of trained models - -|`size` -|`number` - specifies a max number of trained models to get + -_Default:_ `100` - -|=== - -=== ml.info - -[source,ts] ----- -client.ml.info() ----- -link:{ref}/get-ml-info.html[Documentation] + - - -=== ml.openJob - -[source,ts] ----- -client.ml.openJob({ - job_id: string -}) ----- -link:{ref}/ml-open-job.html[Documentation] + -[cols=2*] -|=== -|`job_id` or `jobId` -|`string` - The ID of the job to open - -|=== - -=== ml.postCalendarEvents - -[source,ts] ----- -client.ml.postCalendarEvents({ - calendar_id: string, - body: object -}) ----- -link:{ref}/ml-post-calendar-event.html[Documentation] + -[cols=2*] -|=== -|`calendar_id` or `calendarId` -|`string` - The ID of the calendar to modify - -|`body` -|`object` - A list of events - -|=== - -=== ml.postData - -[source,ts] ----- -client.ml.postData({ - job_id: string, - reset_start: string, - reset_end: string, - body: object -}) ----- -link:{ref}/ml-post-data.html[Documentation] + -[cols=2*] -|=== -|`job_id` or `jobId` -|`string` - The name of the job receiving the data - -|`reset_start` or `resetStart` -|`string` - Optional parameter to specify the start of the bucket resetting range - -|`reset_end` or `resetEnd` -|`string` - Optional parameter to specify the end of the bucket resetting range - -|`body` -|`object` - The data to process - -|=== - -=== ml.previewDatafeed - -[source,ts] ----- -client.ml.previewDatafeed({ - datafeed_id: string -}) ----- -link:{ref}/ml-preview-datafeed.html[Documentation] + -[cols=2*] -|=== -|`datafeed_id` or `datafeedId` -|`string` - The ID of the datafeed to preview - -|=== - -=== ml.putCalendar - -[source,ts] ----- -client.ml.putCalendar({ - calendar_id: string, - body: object -}) ----- -link:{ref}/ml-put-calendar.html[Documentation] + -[cols=2*] -|=== -|`calendar_id` or `calendarId` -|`string` - The ID of the calendar to create - -|`body` -|`object` - The calendar details - -|=== - -=== ml.putCalendarJob - -[source,ts] ----- -client.ml.putCalendarJob({ - calendar_id: string, - job_id: string -}) ----- -link:{ref}/ml-put-calendar-job.html[Documentation] + -[cols=2*] -|=== -|`calendar_id` or `calendarId` -|`string` - The ID of the calendar to modify - -|`job_id` or `jobId` -|`string` - The ID of the job to add to the calendar - -|=== - -=== ml.putDataFrameAnalytics -*Stability:* experimental -[source,ts] ----- -client.ml.putDataFrameAnalytics({ - id: string, - body: object -}) ----- -link:{ref}/put-dfanalytics.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The ID of the data frame analytics to create - -|`body` -|`object` - The data frame analytics configuration - -|=== - -=== ml.putDatafeed - -[source,ts] ----- -client.ml.putDatafeed({ - datafeed_id: string, - ignore_unavailable: boolean, - allow_no_indices: boolean, - ignore_throttled: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - body: object -}) ----- -link:{ref}/ml-put-datafeed.html[Documentation] + -[cols=2*] -|=== -|`datafeed_id` or `datafeedId` -|`string` - The ID of the datafeed to create - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Ignore unavailable indexes (default: false) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Ignore if the source indices expressions resolves to no concrete indices (default: true) - -|`ignore_throttled` or `ignoreThrottled` -|`boolean` - Ignore indices that are marked as throttled (default: true) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether source index expressions should get expanded to open or closed indices (default: open) - -|`body` -|`object` - The datafeed config - -|=== - -=== ml.putFilter - -[source,ts] ----- -client.ml.putFilter({ - filter_id: string, - body: object -}) ----- -link:{ref}/ml-put-filter.html[Documentation] + -[cols=2*] -|=== -|`filter_id` or `filterId` -|`string` - The ID of the filter to create - -|`body` -|`object` - The filter details - -|=== - -=== ml.putJob - -[source,ts] ----- -client.ml.putJob({ - job_id: string, - body: object -}) ----- -link:{ref}/ml-put-job.html[Documentation] + -[cols=2*] -|=== -|`job_id` or `jobId` -|`string` - The ID of the job to create - -|`body` -|`object` - The job - -|=== - -=== ml.putTrainedModel -*Stability:* experimental -[source,ts] ----- -client.ml.putTrainedModel({ - model_id: string, - body: object -}) ----- -link:{ref}/put-inference.html[Documentation] + -[cols=2*] -|=== -|`model_id` or `modelId` -|`string` - The ID of the trained models to store - -|`body` -|`object` - The trained model configuration - -|=== - -=== ml.revertModelSnapshot - -[source,ts] ----- -client.ml.revertModelSnapshot({ - job_id: string, - snapshot_id: string, - delete_intervening_results: boolean, - body: object -}) ----- -link:{ref}/ml-revert-snapshot.html[Documentation] + -[cols=2*] -|=== -|`job_id` or `jobId` -|`string` - The ID of the job to fetch - -|`snapshot_id` or `snapshotId` -|`string` - The ID of the snapshot to revert to - -|`delete_intervening_results` or `deleteInterveningResults` -|`boolean` - Should we reset the results back to the time of the snapshot? - -|`body` -|`object` - Reversion options - -|=== - -=== ml.setUpgradeMode - -[source,ts] ----- -client.ml.setUpgradeMode({ - enabled: boolean, - timeout: string -}) ----- -link:{ref}/ml-set-upgrade-mode.html[Documentation] + -[cols=2*] -|=== -|`enabled` -|`boolean` - Whether to enable upgrade_mode ML setting or not. Defaults to false. - -|`timeout` -|`string` - Controls the time to wait before action times out. Defaults to 30 seconds - -|=== - -=== ml.startDataFrameAnalytics -*Stability:* experimental -[source,ts] ----- -client.ml.startDataFrameAnalytics({ - id: string, - timeout: string, - body: object -}) ----- -link:{ref}/start-dfanalytics.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The ID of the data frame analytics to start - -|`timeout` -|`string` - Controls the time to wait until the task has started. Defaults to 20 seconds - -|`body` -|`object` - The start data frame analytics parameters - -|=== - -=== ml.startDatafeed - -[source,ts] ----- -client.ml.startDatafeed({ - datafeed_id: string, - start: string, - end: string, - timeout: string, - body: object -}) ----- -link:{ref}/ml-start-datafeed.html[Documentation] + -[cols=2*] -|=== -|`datafeed_id` or `datafeedId` -|`string` - The ID of the datafeed to start - -|`start` -|`string` - The start time from where the datafeed should begin - -|`end` -|`string` - The end time when the datafeed should stop. When not set, the datafeed continues in real time - -|`timeout` -|`string` - Controls the time to wait until a datafeed has started. Default to 20 seconds - -|`body` -|`object` - The start datafeed parameters - -|=== - -=== ml.stopDataFrameAnalytics -*Stability:* experimental -[source,ts] ----- -client.ml.stopDataFrameAnalytics({ - id: string, - allow_no_match: boolean, - force: boolean, - timeout: string, - body: object -}) ----- -link:{ref}/stop-dfanalytics.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The ID of the data frame analytics to stop - -|`allow_no_match` or `allowNoMatch` -|`boolean` - Whether to ignore if a wildcard expression matches no data frame analytics. (This includes `_all` string or when no data frame analytics have been specified) - -|`force` -|`boolean` - True if the data frame analytics should be forcefully stopped - -|`timeout` -|`string` - Controls the time to wait until the task has stopped. Defaults to 20 seconds - -|`body` -|`object` - The stop data frame analytics parameters - -|=== - -=== ml.stopDatafeed - -[source,ts] ----- -client.ml.stopDatafeed({ - datafeed_id: string, - allow_no_datafeeds: boolean, - force: boolean, - timeout: string -}) ----- -link:{ref}/ml-stop-datafeed.html[Documentation] + -[cols=2*] -|=== -|`datafeed_id` or `datafeedId` -|`string` - The ID of the datafeed to stop - -|`allow_no_datafeeds` or `allowNoDatafeeds` -|`boolean` - Whether to ignore if a wildcard expression matches no datafeeds. (This includes `_all` string or when no datafeeds have been specified) - -|`force` -|`boolean` - True if the datafeed should be forcefully stopped. - -|`timeout` -|`string` - Controls the time to wait until a datafeed has stopped. Default to 20 seconds - -|=== - -=== ml.updateDatafeed - -[source,ts] ----- -client.ml.updateDatafeed({ - datafeed_id: string, - ignore_unavailable: boolean, - allow_no_indices: boolean, - ignore_throttled: boolean, - expand_wildcards: 'open' | 'closed' | 'hidden' | 'none' | 'all', - body: object -}) ----- -link:{ref}/ml-update-datafeed.html[Documentation] + -[cols=2*] -|=== -|`datafeed_id` or `datafeedId` -|`string` - The ID of the datafeed to update - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Ignore unavailable indexes (default: false) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Ignore if the source indices expressions resolves to no concrete indices (default: true) - -|`ignore_throttled` or `ignoreThrottled` -|`boolean` - Ignore indices that are marked as throttled (default: true) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'hidden' \| 'none' \| 'all'` - Whether source index expressions should get expanded to open or closed indices (default: open) - -|`body` -|`object` - The datafeed update settings - -|=== - -=== ml.updateFilter - -[source,ts] ----- -client.ml.updateFilter({ - filter_id: string, - body: object -}) ----- -link:{ref}/ml-update-filter.html[Documentation] + -[cols=2*] -|=== -|`filter_id` or `filterId` -|`string` - The ID of the filter to update - -|`body` -|`object` - The filter update - -|=== - -=== ml.updateJob - -[source,ts] ----- -client.ml.updateJob({ - job_id: string, - body: object -}) ----- -link:{ref}/ml-update-job.html[Documentation] + -[cols=2*] -|=== -|`job_id` or `jobId` -|`string` - The ID of the job to create - -|`body` -|`object` - The job update settings - -|=== - -=== ml.updateModelSnapshot - -[source,ts] ----- -client.ml.updateModelSnapshot({ - job_id: string, - snapshot_id: string, - body: object -}) ----- -link:{ref}/ml-update-snapshot.html[Documentation] + -[cols=2*] -|=== -|`job_id` or `jobId` -|`string` - The ID of the job to fetch - -|`snapshot_id` or `snapshotId` -|`string` - The ID of the snapshot to update - -|`body` -|`object` - The model snapshot properties to update - -|=== - -=== ml.validate - -[source,ts] ----- -client.ml.validate({ - body: object -}) ----- -link:https://www.elastic.co/guide/en/machine-learning/current/ml-jobs.html[Documentation] + -[cols=2*] -|=== -|`body` -|`object` - The job config - -|=== - -=== ml.validateDetector - -[source,ts] ----- -client.ml.validateDetector({ - body: object -}) ----- -link:https://www.elastic.co/guide/en/machine-learning/current/ml-jobs.html[Documentation] + -[cols=2*] -|=== -|`body` -|`object` - The detector - -|=== - -=== monitoring.bulk -*Stability:* experimental -[source,ts] ----- -client.monitoring.bulk({ - type: string, - system_id: string, - system_api_version: string, - interval: string, - body: object -}) ----- -link:{ref}/monitor-elasticsearch-cluster.html[Documentation] + -[cols=2*] -|=== -|`type` -|`string` - Default document type for items which don't provide one + - -WARNING: This parameter has been deprecated. - -|`system_id` or `systemId` -|`string` - Identifier of the monitored system - -|`system_api_version` or `systemApiVersion` -|`string` - API Version of the monitored system - -|`interval` -|`string` - Collection interval (e.g., '10s' or '10000ms') of the payload - -|`body` -|`object` - The operation definition and data (action-data pairs), separated by newlines - -|=== - -=== rollup.deleteJob -*Stability:* experimental -[source,ts] ----- -client.rollup.deleteJob({ - id: string -}) ----- -link:{ref}/rollup-delete-job.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The ID of the job to delete - -|=== - -=== rollup.getJobs -*Stability:* experimental -[source,ts] ----- -client.rollup.getJobs({ - id: string -}) ----- -link:{ref}/rollup-get-job.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The ID of the job(s) to fetch. Accepts glob patterns, or left blank for all jobs - -|=== - -=== rollup.getRollupCaps -*Stability:* experimental -[source,ts] ----- -client.rollup.getRollupCaps({ - id: string -}) ----- -link:{ref}/rollup-get-rollup-caps.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The ID of the index to check rollup capabilities on, or left blank for all jobs - -|=== - -=== rollup.getRollupIndexCaps -*Stability:* experimental -[source,ts] ----- -client.rollup.getRollupIndexCaps({ - index: string -}) ----- -link:{ref}/rollup-get-rollup-index-caps.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string` - The rollup index or index pattern to obtain rollup capabilities from. - -|=== - -=== rollup.putJob -*Stability:* experimental -[source,ts] ----- -client.rollup.putJob({ - id: string, - body: object -}) ----- -link:{ref}/rollup-put-job.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The ID of the job to create - -|`body` -|`object` - The job configuration - -|=== - -=== rollup.rollupSearch -*Stability:* experimental -[source,ts] ----- -client.rollup.rollupSearch({ - index: string | string[], - type: string, - typed_keys: boolean, - rest_total_hits_as_int: boolean, - body: object -}) ----- -link:{ref}/rollup-search.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - The indices or index-pattern(s) (containing rollup or regular data) that should be searched - -|`type` -|`string` - The doc type inside the index + - -WARNING: This parameter has been deprecated. - -|`typed_keys` or `typedKeys` -|`boolean` - Specify whether aggregation and suggester names should be prefixed by their respective types in the response - -|`rest_total_hits_as_int` or `restTotalHitsAsInt` -|`boolean` - Indicates whether hits.total should be rendered as an integer or an object in the rest search response - -|`body` -|`object` - The search request body - -|=== - -=== rollup.startJob -*Stability:* experimental -[source,ts] ----- -client.rollup.startJob({ - id: string -}) ----- -link:{ref}/rollup-start-job.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The ID of the job to start - -|=== - -=== rollup.stopJob -*Stability:* experimental -[source,ts] ----- -client.rollup.stopJob({ - id: string, - wait_for_completion: boolean, - timeout: string -}) ----- -link:{ref}/rollup-stop-job.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - The ID of the job to stop - -|`wait_for_completion` or `waitForCompletion` -|`boolean` - True if the API should block until the job has fully stopped, false if should be executed async. Defaults to false. - -|`timeout` -|`string` - Block for (at maximum) the specified duration while waiting for the job to stop. Defaults to 30s. - -|=== - -=== searchableSnapshots.clearCache -*Stability:* experimental -[source,ts] ----- -client.searchableSnapshots.clearCache({ - index: string | string[], - ignore_unavailable: boolean, - allow_no_indices: boolean, - expand_wildcards: 'open' | 'closed' | 'none' | 'all' -}) ----- -link:{ref}/searchable-snapshots-api-clear-cache.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names - -|`ignore_unavailable` or `ignoreUnavailable` -|`boolean` - Whether specified concrete indices should be ignored when unavailable (missing or closed) - -|`allow_no_indices` or `allowNoIndices` -|`boolean` - Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - -|`expand_wildcards` or `expandWildcards` -|`'open' \| 'closed' \| 'none' \| 'all'` - Whether to expand wildcard expression to concrete indices that are open, closed or both. + -_Default:_ `open` - -|=== - -=== searchableSnapshots.mount -*Stability:* experimental -[source,ts] ----- -client.searchableSnapshots.mount({ - repository: string, - snapshot: string, - master_timeout: string, - wait_for_completion: boolean, - body: object -}) ----- -link:{ref}/searchable-snapshots-api-mount-snapshot.html[Documentation] + -[cols=2*] -|=== -|`repository` -|`string` - The name of the repository containing the snapshot of the index to mount - -|`snapshot` -|`string` - The name of the snapshot of the index to mount - -|`master_timeout` or `masterTimeout` -|`string` - Explicit operation timeout for connection to master node - -|`wait_for_completion` or `waitForCompletion` -|`boolean` - Should this request wait until the operation has completed before returning - -|`body` -|`object` - The restore configuration for mounting the snapshot as searchable - -|=== - -=== searchableSnapshots.repositoryStats -*Stability:* experimental -[source,ts] ----- -client.searchableSnapshots.repositoryStats({ - repository: string -}) ----- -link:{ref}/searchable-snapshots-repository-stats.html[Documentation] + -[cols=2*] -|=== -|`repository` -|`string` - The repository for which to get the stats for - -|=== - -=== searchableSnapshots.stats -*Stability:* experimental -[source,ts] ----- -client.searchableSnapshots.stats({ - index: string | string[] -}) ----- -link:{ref}/searchable-snapshots-api-stats.html[Documentation] + -[cols=2*] -|=== -|`index` -|`string \| string[]` - A comma-separated list of index names - -|=== - -=== security.authenticate - -[source,ts] ----- -client.security.authenticate() ----- -link:{ref}/security-api-authenticate.html[Documentation] + - - -=== security.changePassword - -[source,ts] ----- -client.security.changePassword({ - username: string, - refresh: 'true' | 'false' | 'wait_for', - body: object -}) ----- -link:{ref}/security-api-change-password.html[Documentation] + -[cols=2*] -|=== -|`username` -|`string` - The username of the user to change the password for - -|`refresh` -|`'true' \| 'false' \| 'wait_for'` - If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -|`body` -|`object` - the new password for the user - -|=== - -=== security.clearCachedRealms - -[source,ts] ----- -client.security.clearCachedRealms({ - realms: string | string[], - usernames: string | string[] -}) ----- -link:{ref}/security-api-clear-cache.html[Documentation] + -[cols=2*] -|=== -|`realms` -|`string \| string[]` - Comma-separated list of realms to clear - -|`usernames` -|`string \| string[]` - Comma-separated list of usernames to clear from the cache - -|=== - -=== security.clearCachedRoles - -[source,ts] ----- -client.security.clearCachedRoles({ - name: string | string[] -}) ----- -link:{ref}/security-api-clear-role-cache.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string \| string[]` - Role name - -|=== - -=== security.createApiKey - -[source,ts] ----- -client.security.createApiKey({ - refresh: 'true' | 'false' | 'wait_for', - body: object -}) ----- -link:{ref}/security-api-create-api-key.html[Documentation] + -[cols=2*] -|=== -|`refresh` -|`'true' \| 'false' \| 'wait_for'` - If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -|`body` -|`object` - The api key request to create an API key - -|=== - -=== security.deletePrivileges - -[source,ts] ----- -client.security.deletePrivileges({ - application: string, - name: string, - refresh: 'true' | 'false' | 'wait_for' -}) ----- -link:{ref}/security-api-delete-privilege.html[Documentation] + -[cols=2*] -|=== -|`application` -|`string` - Application name - -|`name` -|`string` - Privilege name - -|`refresh` -|`'true' \| 'false' \| 'wait_for'` - If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -|=== - -=== security.deleteRole - -[source,ts] ----- -client.security.deleteRole({ - name: string, - refresh: 'true' | 'false' | 'wait_for' -}) ----- -link:{ref}/security-api-delete-role.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - Role name - -|`refresh` -|`'true' \| 'false' \| 'wait_for'` - If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -|=== - -=== security.deleteRoleMapping - -[source,ts] ----- -client.security.deleteRoleMapping({ - name: string, - refresh: 'true' | 'false' | 'wait_for' -}) ----- -link:{ref}/security-api-delete-role-mapping.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - Role-mapping name - -|`refresh` -|`'true' \| 'false' \| 'wait_for'` - If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -|=== - -=== security.deleteUser - -[source,ts] ----- -client.security.deleteUser({ - username: string, - refresh: 'true' | 'false' | 'wait_for' -}) ----- -link:{ref}/security-api-delete-user.html[Documentation] + -[cols=2*] -|=== -|`username` -|`string` - username - -|`refresh` -|`'true' \| 'false' \| 'wait_for'` - If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -|=== - -=== security.disableUser - -[source,ts] ----- -client.security.disableUser({ - username: string, - refresh: 'true' | 'false' | 'wait_for' -}) ----- -link:{ref}/security-api-disable-user.html[Documentation] + -[cols=2*] -|=== -|`username` -|`string` - The username of the user to disable - -|`refresh` -|`'true' \| 'false' \| 'wait_for'` - If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -|=== - -=== security.enableUser - -[source,ts] ----- -client.security.enableUser({ - username: string, - refresh: 'true' | 'false' | 'wait_for' -}) ----- -link:{ref}/security-api-enable-user.html[Documentation] + -[cols=2*] -|=== -|`username` -|`string` - The username of the user to enable - -|`refresh` -|`'true' \| 'false' \| 'wait_for'` - If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -|=== - -=== security.getApiKey - -[source,ts] ----- -client.security.getApiKey({ - id: string, - name: string, - username: string, - realm_name: string, - owner: boolean -}) ----- -link:{ref}/security-api-get-api-key.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - API key id of the API key to be retrieved - -|`name` -|`string` - API key name of the API key to be retrieved - -|`username` -|`string` - user name of the user who created this API key to be retrieved - -|`realm_name` or `realmName` -|`string` - realm name of the user who created this API key to be retrieved - -|`owner` -|`boolean` - flag to query API keys owned by the currently authenticated user - -|=== - -=== security.getBuiltinPrivileges - -[source,ts] ----- -client.security.getBuiltinPrivileges() ----- -link:{ref}/security-api-get-builtin-privileges.html[Documentation] + - - -=== security.getPrivileges - -[source,ts] ----- -client.security.getPrivileges({ - application: string, - name: string -}) ----- -link:{ref}/security-api-get-privileges.html[Documentation] + -[cols=2*] -|=== -|`application` -|`string` - Application name - -|`name` -|`string` - Privilege name - -|=== - -=== security.getRole - -[source,ts] ----- -client.security.getRole({ - name: string | string[] -}) ----- -link:{ref}/security-api-get-role.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string \| string[]` - A comma-separated list of role names - -|=== - -=== security.getRoleMapping - -[source,ts] ----- -client.security.getRoleMapping({ - name: string | string[] -}) ----- -link:{ref}/security-api-get-role-mapping.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string \| string[]` - A comma-separated list of role-mapping names - -|=== - -=== security.getToken - -[source,ts] ----- -client.security.getToken({ - body: object -}) ----- -link:{ref}/security-api-get-token.html[Documentation] + -[cols=2*] -|=== -|`body` -|`object` - The token request to get - -|=== - -=== security.getUser - -[source,ts] ----- -client.security.getUser({ - username: string | string[] -}) ----- -link:{ref}/security-api-get-user.html[Documentation] + -[cols=2*] -|=== -|`username` -|`string \| string[]` - A comma-separated list of usernames - -|=== - -=== security.getUserPrivileges - -[source,ts] ----- -client.security.getUserPrivileges() ----- -link:{ref}/security-api-get-privileges.html[Documentation] + - - -=== security.hasPrivileges - -[source,ts] ----- -client.security.hasPrivileges({ - user: string, - body: object -}) ----- -link:{ref}/security-api-has-privileges.html[Documentation] + -[cols=2*] -|=== -|`user` -|`string` - Username - -|`body` -|`object` - The privileges to test - -|=== - -=== security.invalidateApiKey - -[source,ts] ----- -client.security.invalidateApiKey({ - body: object -}) ----- -link:{ref}/security-api-invalidate-api-key.html[Documentation] + -[cols=2*] -|=== -|`body` -|`object` - The api key request to invalidate API key(s) - -|=== - -=== security.invalidateToken - -[source,ts] ----- -client.security.invalidateToken({ - body: object -}) ----- -link:{ref}/security-api-invalidate-token.html[Documentation] + -[cols=2*] -|=== -|`body` -|`object` - The token to invalidate - -|=== - -=== security.putPrivileges - -[source,ts] ----- -client.security.putPrivileges({ - refresh: 'true' | 'false' | 'wait_for', - body: object -}) ----- -link:{ref}/security-api-put-privileges.html[Documentation] + -[cols=2*] -|=== -|`refresh` -|`'true' \| 'false' \| 'wait_for'` - If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -|`body` -|`object` - The privilege(s) to add - -|=== - -=== security.putRole - -[source,ts] ----- -client.security.putRole({ - name: string, - refresh: 'true' | 'false' | 'wait_for', - body: object -}) ----- -link:{ref}/security-api-put-role.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - Role name - -|`refresh` -|`'true' \| 'false' \| 'wait_for'` - If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -|`body` -|`object` - The role to add - -|=== - -=== security.putRoleMapping - -[source,ts] ----- -client.security.putRoleMapping({ - name: string, - refresh: 'true' | 'false' | 'wait_for', - body: object -}) ----- -link:{ref}/security-api-put-role-mapping.html[Documentation] + -[cols=2*] -|=== -|`name` -|`string` - Role-mapping name - -|`refresh` -|`'true' \| 'false' \| 'wait_for'` - If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -|`body` -|`object` - The role mapping to add - -|=== - -=== security.putUser - -[source,ts] ----- -client.security.putUser({ - username: string, - refresh: 'true' | 'false' | 'wait_for', - body: object -}) ----- -link:{ref}/security-api-put-user.html[Documentation] + -[cols=2*] -|=== -|`username` -|`string` - The username of the User - -|`refresh` -|`'true' \| 'false' \| 'wait_for'` - If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - -|`body` -|`object` - The user to add - -|=== - -=== slm.deleteLifecycle - -[source,ts] ----- -client.slm.deleteLifecycle({ - policy_id: string -}) ----- -link:{ref}/slm-api-delete-policy.html[Documentation] + -[cols=2*] -|=== -|`policy_id` or `policyId` -|`string` - The id of the snapshot lifecycle policy to remove - -|=== - -=== slm.executeLifecycle - -[source,ts] ----- -client.slm.executeLifecycle({ - policy_id: string -}) ----- -link:{ref}/slm-api-execute-lifecycle.html[Documentation] + -[cols=2*] -|=== -|`policy_id` or `policyId` -|`string` - The id of the snapshot lifecycle policy to be executed - -|=== - -=== slm.executeRetention - -[source,ts] ----- -client.slm.executeRetention() ----- -link:{ref}/slm-api-execute-retention.html[Documentation] + - - -=== slm.getLifecycle - -[source,ts] ----- -client.slm.getLifecycle({ - policy_id: string | string[] -}) ----- -link:{ref}/slm-api-get-policy.html[Documentation] + -[cols=2*] -|=== -|`policy_id` or `policyId` -|`string \| string[]` - Comma-separated list of snapshot lifecycle policies to retrieve - -|=== - -=== slm.getStats - -[source,ts] ----- -client.slm.getStats() ----- -link:{ref}/slm-api-get-stats.html[Documentation] + - - -=== slm.getStatus - -[source,ts] ----- -client.slm.getStatus() ----- -link:{ref}/slm-api-get-status.html[Documentation] + - - -=== slm.putLifecycle - -[source,ts] ----- -client.slm.putLifecycle({ - policy_id: string, - body: object -}) ----- -link:{ref}/slm-api-put-policy.html[Documentation] + -[cols=2*] -|=== -|`policy_id` or `policyId` -|`string` - The id of the snapshot lifecycle policy - -|`body` -|`object` - The snapshot lifecycle policy definition to register - -|=== - -=== slm.start - -[source,ts] ----- -client.slm.start() ----- -link:{ref}/slm-api-start.html[Documentation] + - - -=== slm.stop - -[source,ts] ----- -client.slm.stop() ----- -link:{ref}/slm-api-stop.html[Documentation] + - - -=== sql.clearCursor - -[source,ts] ----- -client.sql.clearCursor({ - body: object -}) ----- -link:{ref}/sql-pagination.html[Documentation] + -[cols=2*] -|=== -|`body` -|`object` - Specify the cursor value in the `cursor` element to clean the cursor. - -|=== - -=== sql.query - -[source,ts] ----- -client.sql.query({ - format: string, - body: object -}) ----- -link:{ref}/sql-rest-overview.html[Documentation] + -{jsclient}/sql_query_examples.html[Code Example] + -[cols=2*] -|=== -|`format` -|`string` - a short version of the Accept header, e.g. json, yaml - -|`body` -|`object` - Use the `query` element to start a query. Use the `cursor` element to continue a query. - -|=== - -=== sql.translate - -[source,ts] ----- -client.sql.translate({ - body: object -}) ----- -link:{ref}/sql-translate.html[Documentation] + -[cols=2*] -|=== -|`body` -|`object` - Specify the query in the `query` element. - -|=== - -=== ssl.certificates - -[source,ts] ----- -client.ssl.certificates() ----- -link:{ref}/security-api-ssl.html[Documentation] + - - -=== transform.deleteTransform - -[source,ts] ----- -client.transform.deleteTransform({ - transform_id: string, - force: boolean -}) ----- -link:{ref}/delete-transform.html[Documentation] + -[cols=2*] -|=== -|`transform_id` or `transformId` -|`string` - The id of the transform to delete - -|`force` -|`boolean` - When `true`, the transform is deleted regardless of its current state. The default value is `false`, meaning that the transform must be `stopped` before it can be deleted. - -|=== - -=== transform.getTransform - -[source,ts] ----- -client.transform.getTransform({ - transform_id: string, - from: number, - size: number, - allow_no_match: boolean -}) ----- -link:{ref}/get-transform.html[Documentation] + -[cols=2*] -|=== -|`transform_id` or `transformId` -|`string` - The id or comma delimited list of id expressions of the transforms to get, '_all' or '*' implies get all transforms - -|`from` -|`number` - skips a number of transform configs, defaults to 0 - -|`size` -|`number` - specifies a max number of transforms to get, defaults to 100 - -|`allow_no_match` or `allowNoMatch` -|`boolean` - Whether to ignore if a wildcard expression matches no transforms. (This includes `_all` string or when no transforms have been specified) - -|=== - -=== transform.getTransformStats - -[source,ts] ----- -client.transform.getTransformStats({ - transform_id: string, - from: number, - size: number, - allow_no_match: boolean -}) ----- -link:{ref}/get-transform-stats.html[Documentation] + -[cols=2*] -|=== -|`transform_id` or `transformId` -|`string` - The id of the transform for which to get stats. '_all' or '*' implies all transforms - -|`from` -|`number` - skips a number of transform stats, defaults to 0 - -|`size` -|`number` - specifies a max number of transform stats to get, defaults to 100 - -|`allow_no_match` or `allowNoMatch` -|`boolean` - Whether to ignore if a wildcard expression matches no transforms. (This includes `_all` string or when no transforms have been specified) - -|=== - -=== transform.previewTransform - -[source,ts] ----- -client.transform.previewTransform({ - body: object -}) ----- -link:{ref}/preview-transform.html[Documentation] + -[cols=2*] -|=== -|`body` -|`object` - The definition for the transform to preview - -|=== - -=== transform.putTransform - -[source,ts] ----- -client.transform.putTransform({ - transform_id: string, - defer_validation: boolean, - body: object -}) ----- -link:{ref}/put-transform.html[Documentation] + -[cols=2*] -|=== -|`transform_id` or `transformId` -|`string` - The id of the new transform. - -|`defer_validation` or `deferValidation` -|`boolean` - If validations should be deferred until transform starts, defaults to false. - -|`body` -|`object` - The transform definition - -|=== - -=== transform.startTransform - -[source,ts] ----- -client.transform.startTransform({ - transform_id: string, - timeout: string -}) ----- -link:{ref}/start-transform.html[Documentation] + -[cols=2*] -|=== -|`transform_id` or `transformId` -|`string` - The id of the transform to start - -|`timeout` -|`string` - Controls the time to wait for the transform to start - -|=== - -=== transform.stopTransform - -[source,ts] ----- -client.transform.stopTransform({ - transform_id: string, - force: boolean, - wait_for_completion: boolean, - timeout: string, - allow_no_match: boolean, - wait_for_checkpoint: boolean -}) ----- -link:{ref}/stop-transform.html[Documentation] + -[cols=2*] -|=== -|`transform_id` or `transformId` -|`string` - The id of the transform to stop - -|`force` -|`boolean` - Whether to force stop a failed transform or not. Default to false - -|`wait_for_completion` or `waitForCompletion` -|`boolean` - Whether to wait for the transform to fully stop before returning or not. Default to false - -|`timeout` -|`string` - Controls the time to wait until the transform has stopped. Default to 30 seconds - -|`allow_no_match` or `allowNoMatch` -|`boolean` - Whether to ignore if a wildcard expression matches no transforms. (This includes `_all` string or when no transforms have been specified) - -|`wait_for_checkpoint` or `waitForCheckpoint` -|`boolean` - Whether to wait for the transform to reach a checkpoint before stopping. Default to false - -|=== - -=== transform.updateTransform - -[source,ts] ----- -client.transform.updateTransform({ - transform_id: string, - defer_validation: boolean, - body: object -}) ----- -link:{ref}/update-transform.html[Documentation] + -[cols=2*] -|=== -|`transform_id` or `transformId` -|`string` - The id of the transform. - -|`defer_validation` or `deferValidation` -|`boolean` - If validations should be deferred until transform starts, defaults to false. - -|`body` -|`object` - The update transform definition - -|=== - -=== watcher.ackWatch - -[source,ts] ----- -client.watcher.ackWatch({ - watch_id: string, - action_id: string | string[] -}) ----- -link:{ref}/watcher-api-ack-watch.html[Documentation] + -[cols=2*] -|=== -|`watch_id` or `watchId` -|`string` - Watch ID - -|`action_id` or `actionId` -|`string \| string[]` - A comma-separated list of the action ids to be acked - -|=== - -=== watcher.activateWatch - -[source,ts] ----- -client.watcher.activateWatch({ - watch_id: string -}) ----- -link:{ref}/watcher-api-activate-watch.html[Documentation] + -[cols=2*] -|=== -|`watch_id` or `watchId` -|`string` - Watch ID - -|=== - -=== watcher.deactivateWatch - -[source,ts] ----- -client.watcher.deactivateWatch({ - watch_id: string -}) ----- -link:{ref}/watcher-api-deactivate-watch.html[Documentation] + -[cols=2*] -|=== -|`watch_id` or `watchId` -|`string` - Watch ID - -|=== - -=== watcher.deleteWatch - -[source,ts] ----- -client.watcher.deleteWatch({ - id: string -}) ----- -link:{ref}/watcher-api-delete-watch.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - Watch ID - -|=== - -=== watcher.executeWatch - -[source,ts] ----- -client.watcher.executeWatch({ - id: string, - debug: boolean, - body: object -}) ----- -link:{ref}/watcher-api-execute-watch.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - Watch ID - -|`debug` -|`boolean` - indicates whether the watch should execute in debug mode - -|`body` -|`object` - Execution control - -|=== - -=== watcher.getWatch - -[source,ts] ----- -client.watcher.getWatch({ - id: string -}) ----- -link:{ref}/watcher-api-get-watch.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - Watch ID - -|=== - -=== watcher.putWatch - -[source,ts] ----- -client.watcher.putWatch({ - id: string, - active: boolean, - version: number, - if_seq_no: number, - if_primary_term: number, - body: object -}) ----- -link:{ref}/watcher-api-put-watch.html[Documentation] + -[cols=2*] -|=== -|`id` -|`string` - Watch ID - -|`active` -|`boolean` - Specify whether the watch is in/active by default - -|`version` -|`number` - Explicit version number for concurrency control - -|`if_seq_no` or `ifSeqNo` -|`number` - only update the watch if the last operation that has changed the watch has the specified sequence number - -|`if_primary_term` or `ifPrimaryTerm` -|`number` - only update the watch if the last operation that has changed the watch has the specified primary term - -|`body` -|`object` - The watch - -|=== - -=== watcher.start - -[source,ts] ----- -client.watcher.start() ----- -link:{ref}/watcher-api-start.html[Documentation] + - - -=== watcher.stats - -[source,ts] ----- -client.watcher.stats({ - metric: string | string[], - emit_stacktraces: boolean -}) ----- -link:{ref}/watcher-api-stats.html[Documentation] + -[cols=2*] -|=== -|`metric` -|`string \| string[]` - Controls what additional stat metrics should be include in the response - -|`emit_stacktraces` or `emitStacktraces` -|`boolean` - Emits stack traces of currently running watches - -|=== - -=== watcher.stop - -[source,ts] ----- -client.watcher.stop() ----- -link:{ref}/watcher-api-stop.html[Documentation] + - - -=== xpack.info - -[source,ts] ----- -client.xpack.info({ - categories: string | string[] -}) ----- -link:{ref}/info-api.html[Documentation] + -[cols=2*] -|=== -|`categories` -|`string \| string[]` - Comma-separated list of info categories. Can be any of: build, license, features - -|=== - -=== xpack.usage - -[source,ts] ----- -client.xpack.usage({ - master_timeout: string -}) ----- -link:{ref}/usage-api.html[Documentation] + -[cols=2*] -|=== -|`master_timeout` or `masterTimeout` -|`string` - Specify timeout for watch write operation - -|=== diff --git a/docs/reference/advanced-config.md b/docs/reference/advanced-config.md new file mode 100644 index 000000000..435a09c1d --- /dev/null +++ b/docs/reference/advanced-config.md @@ -0,0 +1,183 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/advanced-config.html +--- + +# Advanced configuration [advanced-config] + +If you need to customize the client behavior heavily, you are in the right place! The client enables you to customize the following internals: + +* `ConnectionPool` class +* `Connection` class +* `Serializer` class + +::::{note} +For information about the `Transport` class, refer to [Transport](/reference/transport.md). +:::: + + + +## `ConnectionPool` [_connectionpool] + +This class is responsible for keeping in memory all the {{es}} connections that you are using. There is a single `Connection` for every node. The connection pool handles the resurrection strategies and the updates of the pool. + +```js +const { Client, ConnectionPool } = require('@elastic/elasticsearch') + +class MyConnectionPool extends ConnectionPool { + markAlive (connection) { + // your code + super.markAlive(connection) + } +} + +const client = new Client({ + ConnectionPool: MyConnectionPool, + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) +``` + + +## `Connection` [_connection] + +This class represents a single node, it holds every information we have on the node, such as roles, id, URL, custom headers and so on. The actual HTTP request is performed here, this means that if you want to swap the default HTTP client ([Undici `Pool`](https://undici.nodejs.org/#/docs/api/Pool.md)), you should override the `request` method of this class. + +```js +const { Client, BaseConnection } = require('@elastic/elasticsearch') + +class MyConnection extends BaseConnection { + request (params, callback) { + // your code + } +} + +const client = new Client({ + Connection: MyConnection, + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) +``` + +`@elastic/transport` provides two `Connection` implementations: + +- `UndiciConnection`: manages HTTP connections using [Undici](https://undici.nodejs.org/), Node.js's high-performance HTTP client implementation; this is the default value of `Connection` and is recommended unless you have a use case that is not yet supported by Undici or `UndiciConnection` +- `HttpConnection`: manages HTTP connections using [the `http` package](https://nodejs.org/api/http.html) from Node.js's standard library + +## `Serializer` [_serializer] + +This class is responsible for the serialization of every request, it offers the following methods: + +* `serialize(object: any): string;` serializes request objects. +* `deserialize(json: string): any;` deserializes response strings. +* `ndserialize(array: any[]): string;` serializes bulk request objects. +* `qserialize(object: any): string;` serializes request query parameters. + +```js +const { Client, Serializer } = require('@elastic/elasticsearch') + +class MySerializer extends Serializer { + serialize (object) { + // your code + } +} + +const client = new Client({ + Serializer: MySerializer, + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) +``` + + +## Redaction of potentially sensitive data [redaction] + +When the client raises an `Error` that originated at the HTTP layer, like a `ConnectionError` or `TimeoutError`, a `meta` object is often attached to the error object that includes metadata useful for debugging, like request and response information. Because this can include potentially sensitive data, like authentication secrets in an `Authorization` header, the client takes measures to redact common sources of sensitive data when this metadata is attached and serialized. + +If your configuration requires extra headers or other configurations that may include sensitive data, you may want to adjust these settings to account for that. + +By default, the `redaction` option is set to `{ type: 'replace' }`, which recursively searches for sensitive key names, case insensitive, and replaces their values with the string `[redacted]`. + +```js +const { Client } = require('@elastic/elasticsearch') + +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' }, +}) + +try { + await client.indices.create({ index: 'my_index' }) +} catch (err) { + console.log(err.meta.meta.request.options.headers.authorization) // prints "[redacted]" +} +``` + +If you would like to redact additional properties, you can include additional key names to search and replace: + +```js +const { Client } = require('@elastic/elasticsearch') + +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' }, + headers: { 'X-My-Secret-Password': 'shhh it's a secret!' }, + redaction: { + type: "replace", + additionalKeys: ["x-my-secret-password"] + } +}) + +try { + await client.indices.create({ index: 'my_index' }) +} catch (err) { + console.log(err.meta.meta.request.options.headers['X-My-Secret-Password']) // prints "[redacted]" +} +``` + +Alternatively, if you know you’re not going to use the metadata at all, setting the redaction type to `remove` will remove all optional sources of potentially sensitive data entirely, or replacing them with `null` for required properties. + +```js +const { Client } = require('@elastic/elasticsearch') + +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' }, + redaction: { type: "remove" } +}) + +try { + await client.indices.create({ index: 'my_index' }) +} catch (err) { + console.log(err.meta.meta.request.options.headers) // undefined +} +``` + +Finally, if you prefer to turn off redaction altogether, perhaps while debugging on a local developer environment, you can set the redaction type to `off`. This will revert the client to pre-8.11.0 behavior, where basic redaction is only performed during common serialization methods like `console.log` and `JSON.stringify`. + +::::{warning} +Setting `redaction.type` to `off` is not recommended in production environments. +:::: + + +```js +const { Client } = require('@elastic/elasticsearch') + +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' }, + redaction: { type: "off" } +}) + +try { + await client.indices.create({ index: 'my_index' }) +} catch (err) { + console.log(err.meta.meta.request.options.headers.authorization) // the actual header value will be logged +} +``` + + +## Migrate to v8 [_migrate_to_v8] + +The Node.js client can be configured to emit an HTTP header `Accept: application/vnd.elasticsearch+json; compatible-with=7` which signals to {{es}} that the client is requesting `7.x` version of request and response bodies. This allows for upgrading from 7.x to 8.x version of {{es}} without upgrading everything at once. {{es}} should be upgraded first after the compatibility header is configured and clients should be upgraded second. To enable to setting, configure the environment variable `ELASTIC_CLIENT_APIVERSIONING` to `true`. + diff --git a/docs/reference/api-reference.md b/docs/reference/api-reference.md new file mode 100644 index 000000000..f04242f1b --- /dev/null +++ b/docs/reference/api-reference.md @@ -0,0 +1,15801 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/api-reference.html +comment: | + IMPORTANT: This file is autogenerated, DO NOT send pull requests that change this file directly. + You should update the script that does the generation, which can be found in: + https://github.com/elastic/elastic-client-generator-js +--- + +# API Reference [api-reference] + +## client.bulk [_bulk] +Bulk index or delete documents. +Perform multiple `index`, `create`, `delete`, and `update` actions in a single request. +This reduces overhead and can greatly increase indexing speed. + +If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: + +* To use the `create` action, you must have the `create_doc`, `create`, `index`, or `write` index privilege. Data streams support only the `create` action. +* To use the `index` action, you must have the `create`, `index`, or `write` index privilege. +* To use the `delete` action, you must have the `delete` or `write` index privilege. +* To use the `update` action, you must have the `index` or `write` index privilege. +* To automatically create a data stream or index with a bulk API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. +* To make the result of a bulk operation visible to search using the `refresh` parameter, you must have the `maintenance` or `manage` index privilege. + +Automatic data stream creation requires a matching index template with data stream enabled. + +The actions are specified in the request body using a newline delimited JSON (NDJSON) structure: + +``` +action_and_meta_data\n +optional_source\n +action_and_meta_data\n +optional_source\n +.... +action_and_meta_data\n +optional_source\n +``` + +The `index` and `create` actions expect a source on the next line and have the same semantics as the `op_type` parameter in the standard index API. +A `create` action fails if a document with the same ID already exists in the target +An `index` action adds or replaces a document as necessary. + +NOTE: Data streams support only the `create` action. +To update or delete a document in a data stream, you must target the backing index containing the document. + +An `update` action expects that the partial doc, upsert, and script and its options are specified on the next line. + +A `delete` action does not expect a source on the next line and has the same semantics as the standard delete API. + +NOTE: The final line of data must end with a newline character (`\n`). +Each newline character may be preceded by a carriage return (`\r`). +When sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header of `application/json` or `application/x-ndjson`. +Because this format uses literal newline characters (`\n`) as delimiters, make sure that the JSON actions and sources are not pretty printed. + +If you provide a target in the request path, it is used for any actions that don't explicitly specify an `_index` argument. + +A note on the format: the idea here is to make processing as fast as possible. +As some of the actions are redirected to other shards on other nodes, only `action_meta_data` is parsed on the receiving node side. + +Client libraries using this protocol should try and strive to do something similar on the client side, and reduce buffering as much as possible. + +There is no "correct" number of actions to perform in a single bulk request. +Experiment with different settings to find the optimal size for your particular workload. +Note that Elasticsearch limits the maximum size of a HTTP request to 100mb by default so clients must ensure that no request exceeds this size. +It is not possible to index a single document that exceeds the size limit, so you must pre-process any such documents into smaller pieces before sending them to Elasticsearch. +For instance, split documents into pages or chapters before indexing them, or store raw binary data in a system outside Elasticsearch and replace the raw data with a link to the external system in the documents that you send to Elasticsearch. + +**Client suppport for bulk requests** + +Some of the officially supported clients provide helpers to assist with bulk requests and reindexing: + +* Go: Check out `esutil.BulkIndexer` +* Perl: Check out `Search::Elasticsearch::Client::5_0::Bulk` and `Search::Elasticsearch::Client::5_0::Scroll` +* Python: Check out `elasticsearch.helpers.*` +* JavaScript: Check out `client.helpers.*` +* .NET: Check out `BulkAllObservable` +* PHP: Check out bulk indexing. +* Ruby: Check out `Elasticsearch::Helpers::BulkHelper` + +**Submitting bulk requests with cURL** + +If you're providing text file input to `curl`, you must use the `--data-binary` flag instead of plain `-d`. +The latter doesn't preserve newlines. For example: + +``` +$ cat requests +{ "index" : { "_index" : "test", "_id" : "1" } } +{ "field1" : "value1" } +$ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk --data-binary "@requests"; echo +{"took":7, "errors": false, "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]} +``` + +**Optimistic concurrency control** + +Each `index` and `delete` action within a bulk API call may include the `if_seq_no` and `if_primary_term` parameters in their respective action and meta data lines. +The `if_seq_no` and `if_primary_term` parameters control how operations are run, based on the last modification to existing documents. See Optimistic concurrency control for more details. + +**Versioning** + +Each bulk item can include the version value using the `version` field. +It automatically follows the behavior of the index or delete operation based on the `_version` mapping. +It also support the `version_type`. + +**Routing** + +Each bulk item can include the routing value using the `routing` field. +It automatically follows the behavior of the index or delete operation based on the `_routing` mapping. + +NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. + +**Wait for active shards** + +When making bulk calls, you can set the `wait_for_active_shards` parameter to require a minimum number of shard copies to be active before starting to process the bulk request. + +**Refresh** + +Control when the changes made by this request are visible to search. + +NOTE: Only the shards that receive the bulk request will be affected by refresh. +Imagine a `_bulk?refresh=wait_for` request with three documents in it that happen to be routed to different shards in an index with five shards. +The request will only wait for those three shards to refresh. +The other two shards that make up the index do not participate in the `_bulk` request at all. + +You might want to disable the refresh interval temporarily to improve indexing throughput for large bulk requests. +Refer to the linked documentation for step-by-step instructions using the index settings API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk) + +```ts +client.bulk({ ... }) +``` +### Arguments [_arguments_bulk] + +#### Request (object) [_request_bulk] + +- **`index` (Optional, string)**: The name of the data stream, index, or index alias to perform bulk actions on. +- **`operations` (Optional, { index, create, update, delete } \| { detect_noop, doc, doc_as_upsert, script, scripted_upsert, _source, upsert } \| object[])** +- **`include_source_on_error` (Optional, boolean)**: True or false if to include the document source in the error message in case of parsing errors. +- **`list_executed_pipelines` (Optional, boolean)**: If `true`, the response will include the ingest pipelines that were run for each index or create. +- **`pipeline` (Optional, string)**: The pipeline identifier to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final pipeline is configured, it will always run regardless of the value of this parameter. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, wait for a refresh to make this operation visible to search. If `false`, do nothing with refreshes. Valid values: `true`, `false`, `wait_for`. +- **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. +- **`_source` (Optional, boolean \| string \| string[])**: Indicates whether to return the `_source` field (`true` or `false`) or contains a list of fields to return. +- **`_source_excludes` (Optional, string \| string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`_source_includes` (Optional, string \| string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`timeout` (Optional, string \| -1 \| 0)**: The period each action waits for the following operations: automatic index creation, dynamic mapping updates, and waiting for active shards. The default is `1m` (one minute), which guarantees Elasticsearch waits for at least the timeout before failing. The actual wait time could be longer, particularly when multiple waits occur. +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default is `1`, which waits for each primary shard to be active. +- **`require_alias` (Optional, boolean)**: If `true`, the request's actions must target an index alias. +- **`require_data_stream` (Optional, boolean)**: If `true`, the request's actions must target a data stream (existing or to be created). + +## client.clearScroll [_clear_scroll] +Clear a scrolling search. +Clear the search context and results for a scrolling search. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-clear-scroll) + +```ts +client.clearScroll({ ... }) +``` +### Arguments [_arguments_clear_scroll] + +#### Request (object) [_request_clear_scroll] + +- **`scroll_id` (Optional, string \| string[])**: A list of scroll IDs to clear. To clear all scroll IDs, use `_all`. IMPORTANT: Scroll IDs can be long. It is recommended to specify scroll IDs in the request body parameter. + +## client.closePointInTime [_close_point_in_time] +Close a point in time. +A point in time must be opened explicitly before being used in search requests. +The `keep_alive` parameter tells Elasticsearch how long it should persist. +A point in time is automatically closed when the `keep_alive` period has elapsed. +However, keeping points in time has a cost; close them as soon as they are no longer required for search requests. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-open-point-in-time) + +```ts +client.closePointInTime({ id }) +``` +### Arguments [_arguments_close_point_in_time] + +#### Request (object) [_request_close_point_in_time] + +- **`id` (string)**: The ID of the point-in-time. + +## client.count [_count] +Count search results. +Get the number of documents matching a query. + +The query can be provided either by using a simple query string as a parameter, or by defining Query DSL within the request body. +The query is optional. When no query is provided, the API uses `match_all` to count all the documents. + +The count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices. + +The operation is broadcast across all shards. +For each shard ID group, a replica is chosen and the search is run against it. +This means that replicas increase the scalability of the count. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-count) + +```ts +client.count({ ... }) +``` +### Arguments [_arguments_count] + +#### Request (object) [_request_count] + +- **`index` (Optional, string \| string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Defines the search query using Query DSL. A request body query cannot be used with the `q` query string parameter. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +- **`analyzer` (Optional, string)**: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. +- **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. +- **`default_operator` (Optional, Enum("and" \| "or"))**: The default operator for query string query: `and` or `or`. This parameter can be used only when the `q` query string parameter is specified. +- **`df` (Optional, string)**: The field to use as a default when no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. +- **`ignore_throttled` (Optional, boolean)**: If `true`, concrete, expanded, or aliased indices are ignored when frozen. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. +- **`min_score` (Optional, number)**: The minimum `_score` value that documents must have to be included in the result. +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. By default, it is random. +- **`project_routing` (Optional, string)**: Specifies a subset of projects to target for the search using project metadata tags in a subset of Lucene query syntax. Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). Examples: _alias:my-project _alias:_origin _alias:*pr* Supported in serverless only. +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +- **`terminate_after` (Optional, number)**: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. +- **`q` (Optional, string)**: The query in Lucene query string syntax. This parameter cannot be used with a request body. + +## client.create [_create] +Create a new document in the index. + +You can index a new JSON document with the `//_doc/` or `//_create/<_id>` APIs +Using `_create` guarantees that the document is indexed only if it does not already exist. +It returns a 409 response when a document with a same ID already exists in the index. +To update an existing document, you must use the `//_doc/` API. + +If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: + +* To add a document using the `PUT //_create/<_id>` or `POST //_create/<_id>` request formats, you must have the `create_doc`, `create`, `index`, or `write` index privilege. +* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. + +Automatic data stream creation requires a matching index template with data stream enabled. + +**Automatically create data streams and indices** + +If the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream. + +If the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates. + +NOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation. + +If no mapping exists, the index operation creates a dynamic mapping. +By default, new fields and objects are automatically added to the mapping if needed. + +Automatic index creation is controlled by the `action.auto_create_index` setting. +If it is `true`, any index can be created automatically. +You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely. +Specify a list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked. +When a list is specified, the default behaviour is to disallow. + +NOTE: The `action.auto_create_index` setting affects the automatic creation of indices only. +It does not affect the creation of data streams. + +**Routing** + +By default, shard placement — or routing — is controlled by using a hash of the document's ID value. +For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter. + +When setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself. +This does come at the (very minimal) cost of an additional document parsing pass. +If the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted. + +NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. + +**Distributed** + +The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard. +After the primary shard completes the operation, if needed, the update is distributed to applicable replicas. + +**Active shards** + +To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation. +If the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs. +By default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`). +This default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`. +To alter this behavior per operation, use the `wait_for_active_shards request` parameter. + +Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1). +Specifying a negative value or a number greater than the number of shard copies will throw an error. + +For example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). +If you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding. +This means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data. +If `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. +This requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard. +However, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. +The operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard. + +It is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts. +After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. +The `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create) + +```ts +client.create({ id, index }) +``` +### Arguments [_arguments_create] + +#### Request (object) [_request_create] + +- **`id` (string)**: A unique identifier for the document. To automatically generate a document ID, use the `POST //_doc/` request format. +- **`index` (string)**: The name of the data stream or index to target. If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. If the target doesn't exist and doesn’t match a data stream template, this request creates the index. +- **`document` (Optional, object)**: A document. +- **`include_source_on_error` (Optional, boolean)**: True or false if to include the document source in the error message in case of parsing errors. +- **`pipeline` (Optional, string)**: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final pipeline is configured, it will always run regardless of the value of this parameter. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. +- **`require_alias` (Optional, boolean)**: If `true`, the destination must be an index alias. +- **`require_data_stream` (Optional, boolean)**: If `true`, the request's actions must target a data stream (existing or to be created). +- **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. +- **`timeout` (Optional, string \| -1 \| 0)**: The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. Elasticsearch waits for at least the specified timeout period before failing. The actual wait time could be longer, particularly when multiple waits occur. This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. The actual wait time could be longer, particularly when multiple waits occur. +- **`version` (Optional, number)**: The explicit version number for concurrency control. It must be a non-negative long number. +- **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte"))**: The version type. +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. + +## client.delete [_delete] +Delete a document. + +Remove a JSON document from the specified index. + +NOTE: You cannot send deletion requests directly to a data stream. +To delete a document in a data stream, you must target the backing index containing the document. + +**Optimistic concurrency control** + +Delete operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters. +If a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`. + +**Versioning** + +Each document indexed is versioned. +When deleting a document, the version can be specified to make sure the relevant document you are trying to delete is actually being deleted and it has not changed in the meantime. +Every write operation run on a document, deletes included, causes its version to be incremented. +The version number of a deleted document remains available for a short time after deletion to allow for control of concurrent operations. +The length of time for which a deleted document's version remains available is determined by the `index.gc_deletes` index setting. + +**Routing** + +If routing is used during indexing, the routing value also needs to be specified to delete a document. + +If the `_routing` mapping is set to `required` and no routing value is specified, the delete API throws a `RoutingMissingException` and rejects the request. + +For example: + +``` +DELETE /my-index-000001/_doc/1?routing=shard-1 +``` + +This request deletes the document with ID 1, but it is routed based on the user. +The document is not deleted if the correct routing is not specified. + +**Distributed** + +The delete operation gets hashed into a specific shard ID. +It then gets redirected into the primary shard within that ID group and replicated (if needed) to shard replicas within that ID group. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete) + +```ts +client.delete({ id, index }) +``` +### Arguments [_arguments_delete] + +#### Request (object) [_request_delete] + +- **`id` (string)**: A unique identifier for the document. +- **`index` (string)**: The name of the target index. +- **`if_primary_term` (Optional, number)**: Only perform the operation if the document has this primary term. +- **`if_seq_no` (Optional, number)**: Only perform the operation if the document has this sequence number. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for active shards. This parameter is useful for situations where the primary shard assigned to perform the delete operation might not be available when the delete operation runs. Some reasons for this might be that the primary shard is currently recovering from a store or undergoing relocation. By default, the delete operation will wait on the primary shard to become available for up to 1 minute before failing and responding with an error. +- **`version` (Optional, number)**: An explicit version number for concurrency control. It must match the current version of the document for the request to succeed. +- **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte"))**: The version type. +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The minimum number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. + +## client.deleteByQuery [_delete_by_query] +Delete documents. + +Deletes documents that match the specified query. + +If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias: + +* `read` +* `delete` or `write` + +You can specify the query criteria in the request URI or the request body using the same syntax as the search API. +When you submit a delete by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and deletes matching documents using internal versioning. +If a document changes between the time that the snapshot is taken and the delete operation is processed, it results in a version conflict and the delete operation fails. + +NOTE: Documents with a version equal to 0 cannot be deleted using delete by query because internal versioning does not support 0 as a valid version number. + +While processing a delete by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents to delete. +A bulk delete request is performed for each batch of matching documents. +If a search or bulk request is rejected, the requests are retried up to 10 times, with exponential back off. +If the maximum retry limit is reached, processing halts and all failed requests are returned in the response. +Any delete requests that completed successfully still stick, they are not rolled back. + +You can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`. +Note that if you opt to count version conflicts the operation could attempt to delete more documents from the source than `max_docs` until it has successfully deleted `max_docs documents`, or it has gone through every document in the source query. + +**Throttling delete requests** + +To control the rate at which delete by query issues batches of delete operations, you can set `requests_per_second` to any positive decimal number. +This pads each batch with a wait time to throttle the rate. +Set `requests_per_second` to `-1` to disable throttling. + +Throttling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account. +The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. +By default the batch size is `1000`, so if `requests_per_second` is set to `500`: + +``` +target_time = 1000 / 500 per second = 2 seconds +wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds +``` + +Since the batch is issued as a single `_bulk` request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. +This is "bursty" instead of "smooth". + +**Slicing** + +Delete by query supports sliced scroll to parallelize the delete process. +This can improve efficiency and provide a convenient way to break the request down into smaller parts. + +Setting `slices` to `auto` lets Elasticsearch choose the number of slices to use. +This setting will use one slice per shard, up to a certain limit. +If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards. +Adding slices to the delete by query operation creates sub-requests which means it has some quirks: + +* You can see these requests in the tasks APIs. These sub-requests are "child" tasks of the task for the request with slices. +* Fetching the status of the task for the request with slices only contains the status of completed slices. +* These sub-requests are individually addressable for things like cancellation and rethrottling. +* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. +* Canceling the request with `slices` will cancel each sub-request. +* Due to the nature of `slices` each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. +* Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the earlier point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being deleted. +* Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time. + +If you're slicing manually or otherwise tuning automatic slicing, keep in mind that: + +* Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many `slices` hurts performance. Setting `slices` higher than the number of shards generally does not improve efficiency and adds overhead. +* Delete performance scales linearly across available resources with the number of slices. + +Whether query or delete performance dominates the runtime depends on the documents being reindexed and cluster resources. + +**Cancel a delete by query operation** + +Any delete by query can be canceled using the task cancel API. For example: + +``` +POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel +``` + +The task ID can be found by using the get tasks API. + +Cancellation should happen quickly but might take a few seconds. +The get task status API will continue to list the delete by query task until this task checks that it has been cancelled and terminates itself. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-by-query) + +```ts +client.deleteByQuery({ index }) +``` +### Arguments [_arguments_delete_by_query] + +#### Request (object) [_request_delete_by_query] + +- **`index` (string \| string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. +- **`max_docs` (Optional, number)**: The maximum number of documents to delete. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The documents to delete specified with Query DSL. +- **`slice` (Optional, { field, id, max })**: Slice the request manually using the provided slice ID and total number of slices. +- **`sort` (Optional, string \| { _score, _doc, _geo_distance, _script } \| string \| { _score, _doc, _geo_distance, _script }[])**: A sort object that specifies the order of deleted documents. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +- **`analyzer` (Optional, string)**: Analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. +- **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. +- **`conflicts` (Optional, Enum("abort" \| "proceed"))**: What to do if delete by query hits version conflicts: `abort` or `proceed`. +- **`default_operator` (Optional, Enum("and" \| "or"))**: The default operator for query string query: `and` or `or`. This parameter can be used only when the `q` query string parameter is specified. +- **`df` (Optional, string)**: The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. +- **`from` (Optional, number)**: Skips the specified number of documents. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. It is random by default. +- **`refresh` (Optional, boolean)**: If `true`, Elasticsearch refreshes all shards involved in the delete by query after the request completes. This is different than the delete API's `refresh` parameter, which causes just the shard that received the delete request to be refreshed. Unlike the delete API, it does not support `wait_for`. +- **`request_cache` (Optional, boolean)**: If `true`, the request cache is used for this request. Defaults to the index-level setting. +- **`requests_per_second` (Optional, float)**: The throttle for this request in sub-requests per second. +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +- **`q` (Optional, string)**: A query in the Lucene query string syntax. +- **`scroll` (Optional, string \| -1 \| 0)**: The period to retain the search context for scrolling. +- **`scroll_size` (Optional, number)**: The size of the scroll request that powers the operation. +- **`search_timeout` (Optional, string \| -1 \| 0)**: The explicit timeout for each search request. It defaults to no timeout. +- **`search_type` (Optional, Enum("query_then_fetch" \| "dfs_query_then_fetch"))**: The type of the search operation. Available options include `query_then_fetch` and `dfs_query_then_fetch`. +- **`slices` (Optional, number \| Enum("auto"))**: The number of slices this task should be divided into. +- **`stats` (Optional, string[])**: The specific `tag` of the request for logging and statistical purposes. +- **`terminate_after` (Optional, number)**: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. +- **`timeout` (Optional, string \| -1 \| 0)**: The period each deletion request waits for active shards. +- **`version` (Optional, boolean)**: If `true`, returns the document version as part of a hit. +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The `timeout` value controls how long each write request waits for unavailable shards to become available. +- **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the operation is complete. If `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. When you are done with a task, you should delete the task document so Elasticsearch can reclaim the space. + +## client.deleteByQueryRethrottle [_delete_by_query_rethrottle] +Throttle a delete by query operation. + +Change the number of requests per second for a particular delete by query operation. +Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-by-query-rethrottle) + +```ts +client.deleteByQueryRethrottle({ task_id }) +``` +### Arguments [_arguments_delete_by_query_rethrottle] + +#### Request (object) [_request_delete_by_query_rethrottle] + +- **`task_id` (string)**: The ID for the task. +- **`requests_per_second` (Optional, float)**: The throttle for this request in sub-requests per second. To disable throttling, set it to `-1`. + +## client.deleteScript [_delete_script] +Delete a script or search template. +Deletes a stored script or search template. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-script) + +```ts +client.deleteScript({ id }) +``` +### Arguments [_arguments_delete_script] + +#### Request (object) [_request_delete_script] + +- **`id` (string)**: The identifier for the stored script or search template. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. + +## client.exists [_exists] +Check a document. + +Verify that a document exists. +For example, check to see if a document with the `_id` 0 exists: + +``` +HEAD my-index-000001/_doc/0 +``` + +If the document exists, the API returns a status code of `200 - OK`. +If the document doesn’t exist, the API returns `404 - Not Found`. + +**Versioning support** + +You can use the `version` parameter to check the document only if its current version is equal to the specified one. + +Internally, Elasticsearch has marked the old document as deleted and added an entirely new document. +The old version of the document doesn't disappear immediately, although you won't be able to access it. +Elasticsearch cleans up deleted documents in the background as you continue to index more data. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get) + +```ts +client.exists({ id, index }) +``` +### Arguments [_arguments_exists] + +#### Request (object) [_request_exists] + +- **`id` (string)**: A unique document identifier. +- **`index` (string)**: A list of data streams, indices, and aliases. It supports wildcards (`*`). +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. If it is set to `_local`, the operation will prefer to be run on a local allocated shard when possible. If it is set to a custom value, the value is used to guarantee that the same shards will be used for the same custom value. This can help with "jumping values" when hitting different shards in different refresh states. A sample value can be something like the web session ID or the user name. +- **`realtime` (Optional, boolean)**: If `true`, the request is real-time as opposed to near-real-time. +- **`refresh` (Optional, boolean)**: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +- **`_source` (Optional, boolean \| string \| string[])**: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. +- **`_source_excludes` (Optional, string \| string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`_source_includes` (Optional, string \| string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`stored_fields` (Optional, string \| string[])**: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. +- **`version` (Optional, number)**: Explicit version number for concurrency control. The specified version must match the current version of the document for the request to succeed. +- **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte"))**: The version type. + +## client.existsSource [_exists_source] +Check for a document source. + +Check whether a document source exists in an index. +For example: + +``` +HEAD my-index-000001/_source/1 +``` + +A document's source is not available if it is disabled in the mapping. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get) + +```ts +client.existsSource({ id, index }) +``` +### Arguments [_arguments_exists_source] + +#### Request (object) [_request_exists_source] + +- **`id` (string)**: A unique identifier for the document. +- **`index` (string)**: A list of data streams, indices, and aliases. It supports wildcards (`*`). +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. +- **`realtime` (Optional, boolean)**: If `true`, the request is real-time as opposed to near-real-time. +- **`refresh` (Optional, boolean)**: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +- **`_source` (Optional, boolean \| string \| string[])**: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. +- **`_source_excludes` (Optional, string \| string[])**: A list of source fields to exclude in the response. +- **`_source_includes` (Optional, string \| string[])**: A list of source fields to include in the response. +- **`version` (Optional, number)**: The version number for concurrency control. It must match the current version of the document for the request to succeed. +- **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte"))**: The version type. + +## client.explain [_explain] +Explain a document match result. +Get information about why a specific document matches, or doesn't match, a query. +It computes a score explanation for a query and a specific document. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-explain) + +```ts +client.explain({ id, index }) +``` +### Arguments [_arguments_explain] + +#### Request (object) [_request_explain] + +- **`id` (string)**: The document identifier. +- **`index` (string)**: Index names that are used to limit the request. Only a single index name can be provided to this parameter. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Defines the search definition using the Query DSL. +- **`analyzer` (Optional, string)**: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. +- **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. +- **`default_operator` (Optional, Enum("and" \| "or"))**: The default operator for query string query: `and` or `or`. This parameter can be used only when the `q` query string parameter is specified. +- **`df` (Optional, string)**: The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. +- **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. It is random by default. +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +- **`_source` (Optional, boolean \| string \| string[])**: `True` or `false` to return the `_source` field or not or a list of fields to return. +- **`_source_excludes` (Optional, string \| string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`_source_includes` (Optional, string \| string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`stored_fields` (Optional, string \| string[])**: A list of stored fields to return in the response. +- **`q` (Optional, string)**: The query in the Lucene query string syntax. + +## client.fieldCaps [_field_caps] +Get the field capabilities. + +Get information about the capabilities of fields among multiple indices. + +For data streams, the API returns field capabilities among the stream’s backing indices. +It returns runtime fields like any other field. +For example, a runtime field with a type of keyword is returned the same as any other field that belongs to the `keyword` family. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-field-caps) + +```ts +client.fieldCaps({ ... }) +``` +### Arguments [_arguments_field_caps] + +#### Request (object) [_request_field_caps] + +- **`index` (Optional, string \| string[])**: A list of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this parameter or use * or _all. +- **`fields` (Optional, string \| string[])**: A list of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported. +- **`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Filter indices if the provided query rewrites to `match_none` on every shard. IMPORTANT: The filtering is done on a best-effort basis, it uses index statistics and mappings to rewrite queries to `match_none` instead of fully running the request. For instance a range query over a date field can rewrite to `match_none` if all documents within a shard (including deleted documents) are outside of the provided range. However, not all queries can rewrite to `match_none` so this API may return an index even if the provided filter matches no document. +- **`runtime_mappings` (Optional, Record)**: Define ad-hoc runtime fields in the request similar to the way it is done in search requests. These fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings. +- **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with foo but no index starts with bar. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. +- **`ignore_unavailable` (Optional, boolean)**: If `true`, missing or closed indices are not included in the response. +- **`include_unmapped` (Optional, boolean)**: If true, unmapped fields are included in the response. +- **`filters` (Optional, string \| string[])**: A list of filters to apply to the response. +- **`types` (Optional, string[])**: A list of field types to include. Any fields that do not match one of these types will be excluded from the results. It defaults to empty, meaning that all field types are returned. +- **`include_empty_fields` (Optional, boolean)**: If false, empty fields are not included in the response. +- **`project_routing` (Optional, string)**: Specifies a subset of projects to target for the field-caps query using project metadata tags in a subset of Lucene query syntax. Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). Examples: _alias:my-project _alias:_origin _alias:*pr* Supported in serverless only. + +## client.get [_get] +Get a document by its ID. + +Get a document and its source or stored fields from an index. + +By default, this API is realtime and is not affected by the refresh rate of the index (when data will become visible for search). +In the case where stored fields are requested with the `stored_fields` parameter and the document has been updated but is not yet refreshed, the API will have to parse and analyze the source to extract the stored fields. +To turn off realtime behavior, set the `realtime` parameter to false. + +**Source filtering** + +By default, the API returns the contents of the `_source` field unless you have used the `stored_fields` parameter or the `_source` field is turned off. +You can turn off `_source` retrieval by using the `_source` parameter: + +``` +GET my-index-000001/_doc/0?_source=false +``` + +If you only need one or two fields from the `_source`, use the `_source_includes` or `_source_excludes` parameters to include or filter out particular fields. +This can be helpful with large documents where partial retrieval can save on network overhead +Both parameters take a comma separated list of fields or wildcard expressions. +For example: + +``` +GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities +``` + +If you only want to specify includes, you can use a shorter notation: + +``` +GET my-index-000001/_doc/0?_source=*.id +``` + +**Routing** + +If routing is used during indexing, the routing value also needs to be specified to retrieve a document. +For example: + +``` +GET my-index-000001/_doc/2?routing=user1 +``` + +This request gets the document with ID 2, but it is routed based on the user. +The document is not fetched if the correct routing is not specified. + +**Distributed** + +The GET operation is hashed into a specific shard ID. +It is then redirected to one of the replicas within that shard ID and returns the result. +The replicas are the primary shard and its replicas within that shard ID group. +This means that the more replicas you have, the better your GET scaling will be. + +**Versioning support** + +You can use the `version` parameter to retrieve the document only if its current version is equal to the specified one. + +Internally, Elasticsearch has marked the old document as deleted and added an entirely new document. +The old version of the document doesn't disappear immediately, although you won't be able to access it. +Elasticsearch cleans up deleted documents in the background as you continue to index more data. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get) + +```ts +client.get({ id, index }) +``` +### Arguments [_arguments_get] + +#### Request (object) [_request_get] + +- **`id` (string)**: A unique document identifier. +- **`index` (string)**: The name of the index that contains the document. +- **`force_synthetic_source` (Optional, boolean)**: Indicates whether the request forces synthetic `_source`. Use this parameter to test if the mapping supports synthetic `_source` and to get a sense of the worst case performance. Fetches with this parameter enabled will be slower than enabling synthetic source natively in the index. +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. If it is set to `_local`, the operation will prefer to be run on a local allocated shard when possible. If it is set to a custom value, the value is used to guarantee that the same shards will be used for the same custom value. This can help with "jumping values" when hitting different shards in different refresh states. A sample value can be something like the web session ID or the user name. +- **`realtime` (Optional, boolean)**: If `true`, the request is real-time as opposed to near-real-time. +- **`refresh` (Optional, boolean)**: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +- **`_source` (Optional, boolean \| string \| string[])**: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. +- **`_source_excludes` (Optional, string \| string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`_source_exclude_vectors` (Optional, boolean)**: Whether vectors should be excluded from _source +- **`_source_includes` (Optional, string \| string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`stored_fields` (Optional, string \| string[])**: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. Only leaf fields can be retrieved with the `stored_fields` option. Object fields can't be returned; if specified, the request fails. +- **`version` (Optional, number)**: The version number for concurrency control. It must match the current version of the document for the request to succeed. +- **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte"))**: The version type. + +## client.getScript [_get_script] +Get a script or search template. +Retrieves a stored script or search template. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script) + +```ts +client.getScript({ id }) +``` +### Arguments [_arguments_get_script] + +#### Request (object) [_request_get_script] + +- **`id` (string)**: The identifier for the stored script or search template. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. + +## client.getScriptContext [_get_script_context] +Get script contexts. + +Get a list of supported script contexts and their methods. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script-context) + +```ts +client.getScriptContext() +``` + +## client.getScriptLanguages [_get_script_languages] +Get script languages. + +Get a list of available script types, languages, and contexts. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script-languages) + +```ts +client.getScriptLanguages() +``` + +## client.getSource [_get_source] +Get a document's source. + +Get the source of a document. +For example: + +``` +GET my-index-000001/_source/1 +``` + +You can use the source filtering parameters to control which parts of the `_source` are returned: + +``` +GET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities +``` + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get) + +```ts +client.getSource({ id, index }) +``` +### Arguments [_arguments_get_source] + +#### Request (object) [_request_get_source] + +- **`id` (string)**: A unique document identifier. +- **`index` (string)**: The name of the index that contains the document. +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. +- **`realtime` (Optional, boolean)**: If `true`, the request is real-time as opposed to near-real-time. +- **`refresh` (Optional, boolean)**: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +- **`_source` (Optional, boolean \| string \| string[])**: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. +- **`_source_excludes` (Optional, string \| string[])**: A list of source fields to exclude in the response. +- **`_source_includes` (Optional, string \| string[])**: A list of source fields to include in the response. +- **`version` (Optional, number)**: The version number for concurrency control. It must match the current version of the document for the request to succeed. +- **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte"))**: The version type. + +## client.healthReport [_health_report] +Get the cluster health. +Get a report with the health status of an Elasticsearch cluster. +The report contains a list of indicators that compose Elasticsearch functionality. + +Each indicator has a health status of: green, unknown, yellow or red. +The indicator will provide an explanation and metadata describing the reason for its current health status. + +The cluster’s status is controlled by the worst indicator status. + +In the event that an indicator’s status is non-green, a list of impacts may be present in the indicator result which detail the functionalities that are negatively affected by the health issue. +Each impact carries with it a severity level, an area of the system that is affected, and a simple description of the impact on the system. + +Some health indicators can determine the root cause of a health problem and prescribe a set of steps that can be performed in order to improve the health of the system. +The root cause and remediation steps are encapsulated in a diagnosis. +A diagnosis contains a cause detailing a root cause analysis, an action containing a brief description of the steps to take to fix the problem, the list of affected resources (if applicable), and a detailed step-by-step troubleshooting guide to fix the diagnosed problem. + +NOTE: The health indicators perform root cause analysis of non-green health statuses. This can be computationally expensive when called frequently. +When setting up automated polling of the API for health status, set verbose to false to disable the more expensive analysis logic. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-health-report) + +```ts +client.healthReport({ ... }) +``` +### Arguments [_arguments_health_report] + +#### Request (object) [_request_health_report] + +- **`feature` (Optional, string \| string[])**: A feature of the cluster, as returned by the top-level health report API. +- **`timeout` (Optional, string \| -1 \| 0)**: Explicit operation timeout. +- **`verbose` (Optional, boolean)**: Opt-in for more information about the health of the system. +- **`size` (Optional, number)**: Limit the number of affected resources the health report API returns. + +## client.index [_index] +Create or update a document in an index. + +Add a JSON document to the specified data stream or index and make it searchable. +If the target is an index and the document already exists, the request updates the document and increments its version. + +NOTE: You cannot use this API to send update requests for existing documents in a data stream. + +If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: + +* To add or overwrite a document using the `PUT //_doc/<_id>` request format, you must have the `create`, `index`, or `write` index privilege. +* To add a document using the `POST //_doc/` request format, you must have the `create_doc`, `create`, `index`, or `write` index privilege. +* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. + +Automatic data stream creation requires a matching index template with data stream enabled. + +NOTE: Replica shards might not all be started when an indexing operation returns successfully. +By default, only the primary is required. Set `wait_for_active_shards` to change this default behavior. + +**Automatically create data streams and indices** + +If the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream. + +If the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates. + +NOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation. + +If no mapping exists, the index operation creates a dynamic mapping. +By default, new fields and objects are automatically added to the mapping if needed. + +Automatic index creation is controlled by the `action.auto_create_index` setting. +If it is `true`, any index can be created automatically. +You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely. +Specify a list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked. +When a list is specified, the default behaviour is to disallow. + +NOTE: The `action.auto_create_index` setting affects the automatic creation of indices only. +It does not affect the creation of data streams. + +**Optimistic concurrency control** + +Index operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters. +If a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`. + +**Routing** + +By default, shard placement — or routing — is controlled by using a hash of the document's ID value. +For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter. + +When setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself. +This does come at the (very minimal) cost of an additional document parsing pass. +If the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted. + +NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. + +**Distributed** + +The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard. +After the primary shard completes the operation, if needed, the update is distributed to applicable replicas. + +**Active shards** + +To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation. +If the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs. +By default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`). +This default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`. +To alter this behavior per operation, use the `wait_for_active_shards request` parameter. + +Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1). +Specifying a negative value or a number greater than the number of shard copies will throw an error. + +For example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). +If you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding. +This means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data. +If `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. +This requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard. +However, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. +The operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard. + +It is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts. +After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. +The `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed. + +**No operation (noop) updates** + +When updating a document by using this API, a new version of the document is always created even if the document hasn't changed. +If this isn't acceptable use the `_update` API with `detect_noop` set to `true`. +The `detect_noop` option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source. + +There isn't a definitive rule for when noop updates aren't acceptable. +It's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates. + +**Versioning** + +Each indexed document is given a version number. +By default, internal versioning is used that starts at 1 and increments with each update, deletes included. +Optionally, the version number can be set to an external value (for example, if maintained in a database). +To enable this functionality, `version_type` should be set to `external`. +The value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`. + +NOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations. +If no version is provided, the operation runs without any version checks. + +When using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document. +If true, the document will be indexed and the new version number used. +If the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example: + +``` +PUT my-index-000001/_doc/1?version=2&version_type=external +{ + "user": { + "id": "elkbee" + } +} + +In this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1. +If the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code). + +A nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used. +Even the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create) + +```ts +client.index({ index }) +``` +### Arguments [_arguments_index] + +#### Request (object) [_request_index] + +- **`index` (string)**: The name of the data stream or index to target. If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. If the target doesn't exist and doesn't match a data stream template, this request creates the index. You can check for existing targets with the resolve index API. +- **`id` (Optional, string)**: A unique identifier for the document. To automatically generate a document ID, use the `POST //_doc/` request format and omit this parameter. +- **`document` (Optional, object)**: A document. +- **`if_primary_term` (Optional, number)**: Only perform the operation if the document has this primary term. +- **`if_seq_no` (Optional, number)**: Only perform the operation if the document has this sequence number. +- **`include_source_on_error` (Optional, boolean)**: True or false if to include the document source in the error message in case of parsing errors. +- **`op_type` (Optional, Enum("index" \| "create"))**: Set to `create` to only index the document if it does not already exist (put if absent). If a document with the specified `_id` already exists, the indexing operation will fail. The behavior is the same as using the `/_create` endpoint. If a document ID is specified, this paramater defaults to `index`. Otherwise, it defaults to `create`. If the request targets a data stream, an `op_type` of `create` is required. +- **`pipeline` (Optional, string)**: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. +- **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. +- **`timeout` (Optional, string \| -1 \| 0)**: The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. The actual wait time could be longer, particularly when multiple waits occur. +- **`version` (Optional, number)**: An explicit version number for concurrency control. It must be a non-negative long number. +- **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte"))**: The version type. +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. +- **`require_alias` (Optional, boolean)**: If `true`, the destination must be an index alias. +- **`require_data_stream` (Optional, boolean)**: If `true`, the request's actions must target a data stream (existing or to be created). + +## client.info [_info] +Get cluster info. +Get basic build, version, and cluster information. +::: In Serverless, this API is retained for backward compatibility only. Some response fields, such as the version number, should be ignored. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-info) + +```ts +client.info() +``` + +## client.knnSearch [_knn_search] +Performs a kNN search +```ts +client.knnSearch() +``` + +## client.mget [_mget] +Get multiple documents. + +Get multiple JSON documents by ID from one or more indices. +If you specify an index in the request URI, you only need to specify the document IDs in the request body. +To ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail. + +**Filter source fields** + +By default, the `_source` field is returned for every document (if stored). +Use the `_source` and `_source_include` or `source_exclude` attributes to filter what fields are returned for a particular document. +You can include the `_source`, `_source_includes`, and `_source_excludes` query parameters in the request URI to specify the defaults to use when there are no per-document instructions. + +**Get stored fields** + +Use the `stored_fields` attribute to specify the set of stored fields you want to retrieve. +Any requested fields that are not stored are ignored. +You can include the `stored_fields` query parameter in the request URI to specify the defaults to use when there are no per-document instructions. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mget) + +```ts +client.mget({ ... }) +``` +### Arguments [_arguments_mget] + +#### Request (object) [_request_mget] + +- **`index` (Optional, string)**: Name of the index to retrieve documents from when `ids` are specified, or when a document in the `docs` array does not specify an index. +- **`docs` (Optional, { _id, _index, routing, _source, stored_fields, version, version_type }[])**: The documents you want to retrieve. Required if no index is specified in the request URI. +- **`ids` (Optional, string \| string[])**: The IDs of the documents you want to retrieve. Allowed when the index is specified in the request URI. +- **`force_synthetic_source` (Optional, boolean)**: Should this request force synthetic _source? Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. Fetches with this enabled will be slower the enabling synthetic source natively in the index. +- **`preference` (Optional, string)**: Specifies the node or shard the operation should be performed on. Random by default. +- **`realtime` (Optional, boolean)**: If `true`, the request is real-time as opposed to near-real-time. +- **`refresh` (Optional, boolean)**: If `true`, the request refreshes relevant shards before retrieving documents. +- **`routing` (Optional, string)**: Custom value used to route operations to a specific shard. +- **`_source` (Optional, boolean \| string \| string[])**: True or false to return the `_source` field or not, or a list of fields to return. +- **`_source_excludes` (Optional, string \| string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. +- **`_source_includes` (Optional, string \| string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`stored_fields` (Optional, string \| string[])**: If `true`, retrieves the document fields stored in the index rather than the document `_source`. + +## client.msearch [_msearch] +Run multiple searches. + +The format of the request is similar to the bulk API format and makes use of the newline delimited JSON (NDJSON) format. +The structure is as follows: + +``` +header\n +body\n +header\n +body\n +``` + +This structure is specifically optimized to reduce parsing if a specific search ends up redirected to another node. + +IMPORTANT: The final line of data must end with a newline character `\n`. +Each newline character may be preceded by a carriage return `\r`. +When sending requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch) + +```ts +client.msearch({ ... }) +``` +### Arguments [_arguments_msearch] + +#### Request (object) [_request_msearch] + +- **`index` (Optional, string \| string[])**: List of data streams, indices, and index aliases to search. +- **`searches` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, project_routing, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } \| { aggregations, collapse, explain, ext, from, highlight, track_total_hits, indices_boost, docvalue_fields, knn, rank, min_score, post_filter, profile, query, rescore, retriever, script_fields, search_after, size, slice, sort, _source, fields, suggest, terminate_after, timeout, track_scores, version, seq_no_primary_term, stored_fields, pit, runtime_mappings, stats }[])** +- **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. +- **`ccs_minimize_roundtrips` (Optional, boolean)**: If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +- **`ignore_throttled` (Optional, boolean)**: If true, concrete, expanded or aliased indices are ignored when frozen. +- **`ignore_unavailable` (Optional, boolean)**: If true, missing or closed indices are not included in the response. +- **`include_named_queries_score` (Optional, boolean)**: Indicates whether hit.matched_queries should be rendered as a map that includes the name of the matched query associated with its score (true) or as an array containing the name of the matched queries (false) This functionality reruns each named query on every hit in a search response. Typically, this adds a small overhead to a request. However, using computationally expensive named queries on a large number of hits may add significant overhead. +- **`max_concurrent_searches` (Optional, number)**: Maximum number of concurrent searches the multi search API can execute. Defaults to `max(1, (# of data nodes * min(search thread pool size, 10)))`. +- **`max_concurrent_shard_requests` (Optional, number)**: Maximum number of concurrent shard requests that each sub-search request executes per node. +- **`pre_filter_shard_size` (Optional, number)**: Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint. +- **`project_routing` (Optional, string)**: Specifies a subset of projects to target for a search using project metadata tags in a subset Lucene syntax. Allowed Lucene queries: the _alias tag and a single value (possible wildcarded). Examples: _alias:my-project _alias:_origin _alias:*pr* Supported in serverless only. +- **`rest_total_hits_as_int` (Optional, boolean)**: If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object. +- **`routing` (Optional, string)**: Custom routing value used to route search operations to a specific shard. +- **`search_type` (Optional, Enum("query_then_fetch" \| "dfs_query_then_fetch"))**: Indicates whether global term and document frequencies should be used when scoring returned documents. +- **`typed_keys` (Optional, boolean)**: Specifies whether aggregation and suggester names should be prefixed by their respective types in the response. + +## client.msearchTemplate [_msearch_template] +Run multiple templated searches. + +Run multiple templated searches with a single request. +If you are providing a text file or text input to `curl`, use the `--data-binary` flag instead of `-d` to preserve newlines. +For example: + +``` +$ cat requests +{ "index": "my-index" } +{ "id": "my-search-template", "params": { "query_string": "hello world", "from": 0, "size": 10 }} +{ "index": "my-other-index" } +{ "id": "my-other-search-template", "params": { "query_type": "match_all" }} + +$ curl -H "Content-Type: application/x-ndjson" -XGET localhost:9200/_msearch/template --data-binary "@requests"; echo +``` + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch-template) + +```ts +client.msearchTemplate({ ... }) +``` +### Arguments [_arguments_msearch_template] + +#### Request (object) [_request_msearch_template] + +- **`index` (Optional, string \| string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*`. +- **`search_templates` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, project_routing, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } \| { aggregations, collapse, explain, ext, from, highlight, track_total_hits, indices_boost, docvalue_fields, knn, rank, min_score, post_filter, profile, query, rescore, retriever, script_fields, search_after, size, slice, sort, _source, fields, suggest, terminate_after, timeout, track_scores, version, seq_no_primary_term, stored_fields, pit, runtime_mappings, stats }[])** +- **`ccs_minimize_roundtrips` (Optional, boolean)**: If `true`, network round-trips are minimized for cross-cluster search requests. +- **`max_concurrent_searches` (Optional, number)**: The maximum number of concurrent searches the API can run. +- **`project_routing` (Optional, string)**: Specifies a subset of projects to target for the search using project metadata tags in a subset of Lucene query syntax. Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). Examples: _alias:my-project _alias:_origin _alias:*pr* Supported in serverless only. +- **`search_type` (Optional, Enum("query_then_fetch" \| "dfs_query_then_fetch"))**: The type of the search operation. +- **`rest_total_hits_as_int` (Optional, boolean)**: If `true`, the response returns `hits.total` as an integer. If `false`, it returns `hits.total` as an object. +- **`typed_keys` (Optional, boolean)**: If `true`, the response prefixes aggregation and suggester names with their respective types. + +## client.mtermvectors [_mtermvectors] +Get multiple term vectors. + +Get multiple term vectors with a single request. +You can specify existing documents by index and ID or provide artificial documents in the body of the request. +You can specify the index in the request body or request URI. +The response contains a `docs` array with all the fetched termvectors. +Each element has the structure provided by the termvectors API. + +**Artificial documents** + +You can also use `mtermvectors` to generate term vectors for artificial documents provided in the body of the request. +The mapping used is determined by the specified `_index`. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mtermvectors) + +```ts +client.mtermvectors({ ... }) +``` +### Arguments [_arguments_mtermvectors] + +#### Request (object) [_request_mtermvectors] + +- **`index` (Optional, string)**: The name of the index that contains the documents. +- **`docs` (Optional, { _id, _index, routing, _source, stored_fields, version, version_type }[])**: An array of existing or artificial documents. +- **`ids` (Optional, string[])**: A simplified syntax to specify documents by their ID if they're in the same index. +- **`fields` (Optional, string \| string[])**: A list or wildcard expressions of fields to include in the statistics. It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. +- **`field_statistics` (Optional, boolean)**: If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies. +- **`offsets` (Optional, boolean)**: If `true`, the response includes term offsets. +- **`payloads` (Optional, boolean)**: If `true`, the response includes term payloads. +- **`positions` (Optional, boolean)**: If `true`, the response includes term positions. +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. It is random by default. +- **`realtime` (Optional, boolean)**: If true, the request is real-time as opposed to near-real-time. +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +- **`term_statistics` (Optional, boolean)**: If true, the response includes term frequency and document frequency. +- **`version` (Optional, number)**: If `true`, returns the document version as part of a hit. +- **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte"))**: The version type. + +## client.openPointInTime [_open_point_in_time] +Open a point in time. + +A search request by default runs against the most recent visible data of the target indices, +which is called point in time. Elasticsearch pit (point in time) is a lightweight view into the +state of the data as it existed when initiated. In some cases, it’s preferred to perform multiple +search requests using the same point in time. For example, if refreshes happen between +`search_after` requests, then the results of those requests might not be consistent as changes happening +between searches are only visible to the more recent point in time. + +A point in time must be opened explicitly before being used in search requests. + +A subsequent search request with the `pit` parameter must not specify `index`, `routing`, or `preference` values as these parameters are copied from the point in time. + +Just like regular searches, you can use `from` and `size` to page through point in time search results, up to the first 10,000 hits. +If you want to retrieve more hits, use PIT with `search_after`. + +IMPORTANT: The open point in time request and each subsequent search request can return different identifiers; always use the most recently received ID for the next search request. + +When a PIT that contains shard failures is used in a search request, the missing are always reported in the search response as a `NoShardAvailableActionException` exception. +To get rid of these exceptions, a new PIT needs to be created so that shards missing from the previous PIT can be handled, assuming they become available in the meantime. + +**Keeping point in time alive** + +The `keep_alive` parameter, which is passed to a open point in time request and search request, extends the time to live of the corresponding point in time. +The value does not need to be long enough to process all data — it just needs to be long enough for the next request. + +Normally, the background merge process optimizes the index by merging together smaller segments to create new, bigger segments. +Once the smaller segments are no longer needed they are deleted. +However, open point-in-times prevent the old segments from being deleted since they are still in use. + +TIP: Keeping older segments alive means that more disk space and file handles are needed. +Ensure that you have configured your nodes to have ample free file handles. + +Additionally, if a segment contains deleted or updated documents then the point in time must keep track of whether each document in the segment was live at the time of the initial search request. +Ensure that your nodes have sufficient heap space if you have many open point-in-times on an index that is subject to ongoing deletes or updates. +Note that a point-in-time doesn't prevent its associated indices from being deleted. +You can check how many point-in-times (that is, search contexts) are open with the nodes stats API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-open-point-in-time) + +```ts +client.openPointInTime({ index, keep_alive }) +``` +### Arguments [_arguments_open_point_in_time] + +#### Request (object) [_request_open_point_in_time] + +- **`index` (string \| string[])**: A list of index names to open point in time; use `_all` or empty string to perform the operation on all indices +- **`keep_alive` (string \| -1 \| 0)**: Extend the length of time that the point in time persists. +- **`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Filter indices if the provided query rewrites to `match_none` on every shard. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. By default, it is random. +- **`project_routing` (Optional, string)**: Specifies a subset of projects to target for the PIT request using project metadata tags in a subset of Lucene query syntax. Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). Examples: _alias:my-project _alias:_origin _alias:*pr* Supported in serverless only. +- **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. +- **`allow_partial_search_results` (Optional, boolean)**: Indicates whether the point in time tolerates unavailable shards or shard failures when initially creating the PIT. If `false`, creating a point in time request when a shard is missing or unavailable will throw an exception. If `true`, the point in time will contain all the shards that are available at the time of the request. +- **`max_concurrent_shard_requests` (Optional, number)**: Maximum number of concurrent shard requests that each sub-search request executes per node. + +## client.ping [_ping] +Ping the cluster. +Get information about whether the cluster is running. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-cluster) + +```ts +client.ping() +``` + +## client.putScript [_put_script] +Create or update a script or search template. +Creates or updates a stored script or search template. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-put-script) + +```ts +client.putScript({ id, script }) +``` +### Arguments [_arguments_put_script] + +#### Request (object) [_request_put_script] + +- **`id` (string)**: The identifier for the stored script or search template. It must be unique within the cluster. +- **`script` ({ lang, options, source })**: The script or search template, its parameters, and its language. +- **`context` (Optional, string)**: The context in which the script or search template should run. To prevent errors, the API immediately compiles the script or template in this context. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. + +## client.rankEval [_rank_eval] +Evaluate ranked search results. + +Evaluate the quality of ranked search results over a set of typical search queries. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rank-eval) + +```ts +client.rankEval({ requests }) +``` +### Arguments [_arguments_rank_eval] + +#### Request (object) [_request_rank_eval] + +- **`requests` ({ id, request, ratings, template_id, params }[])**: A set of typical search requests, together with their provided ratings. +- **`index` (Optional, string \| string[])**: A list of data streams, indices, and index aliases used to limit the request. Wildcard (`*`) expressions are supported. To target all data streams and indices in a cluster, omit this parameter or use `_all` or `*`. +- **`metric` (Optional, { precision, recall, mean_reciprocal_rank, dcg, expected_reciprocal_rank })**: Definition of the evaluation metric to calculate. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. +- **`ignore_unavailable` (Optional, boolean)**: If `true`, missing or closed indices are not included in the response. +- **`search_type` (Optional, string)**: Search operation type + +## client.reindex [_reindex] +Reindex documents. + +Copy documents from a source to a destination. +You can copy all documents to the destination index or reindex a subset of the documents. +The source can be any existing index, alias, or data stream. +The destination must differ from the source. +For example, you cannot reindex a data stream into itself. + +IMPORTANT: Reindex requires `_source` to be enabled for all documents in the source. +The destination should be configured as wanted before calling the reindex API. +Reindex does not copy the settings from the source or its associated template. +Mappings, shard counts, and replicas, for example, must be configured ahead of time. + +If the Elasticsearch security features are enabled, you must have the following security privileges: + +* The `read` index privilege for the source data stream, index, or alias. +* The `write` index privilege for the destination data stream, index, or index alias. +* To automatically create a data stream or index with a reindex API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege for the destination data stream, index, or alias. +* If reindexing from a remote cluster, the `source.remote.user` must have the `monitor` cluster privilege and the `read` index privilege for the source data stream, index, or alias. + +If reindexing from a remote cluster, you must explicitly allow the remote host in the `reindex.remote.whitelist` setting. +Automatic data stream creation requires a matching index template with data stream enabled. + +The `dest` element can be configured like the index API to control optimistic concurrency control. +Omitting `version_type` or setting it to `internal` causes Elasticsearch to blindly dump documents into the destination, overwriting any that happen to have the same ID. + +Setting `version_type` to `external` causes Elasticsearch to preserve the `version` from the source, create any documents that are missing, and update any documents that have an older version in the destination than they do in the source. + +Setting `op_type` to `create` causes the reindex API to create only missing documents in the destination. +All existing documents will cause a version conflict. + +IMPORTANT: Because data streams are append-only, any reindex request to a destination data stream must have an `op_type` of `create`. +A reindex can only add new documents to a destination data stream. +It cannot update existing documents in a destination data stream. + +By default, version conflicts abort the reindex process. +To continue reindexing if there are conflicts, set the `conflicts` request body property to `proceed`. +In this case, the response includes a count of the version conflicts that were encountered. +Note that the handling of other error types is unaffected by the `conflicts` property. +Additionally, if you opt to count version conflicts, the operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query. + +It's recommended to reindex on indices with a green status. Reindexing can fail when a node shuts down or crashes. +* When requested with `wait_for_completion=true` (default), the request fails if the node shuts down. +* When requested with `wait_for_completion=false`, a task id is returned, for use with the task management APIs. The task may disappear or fail if the node shuts down. +When retrying a failed reindex operation, it might be necessary to set `conflicts=proceed` or to first delete the partial destination index. +Additionally, dry runs, checking disk space, and fetching index recovery information can help address the root cause. + +Refer to the linked documentation for examples of how to reindex documents. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex) + +```ts +client.reindex({ dest, source }) +``` +### Arguments [_arguments_reindex] + +#### Request (object) [_request_reindex] + +- **`dest` ({ index, op_type, pipeline, routing, version_type })**: The destination you are copying to. +- **`source` ({ index, query, remote, size, slice, sort, _source, runtime_mappings })**: The source you are copying from. +- **`conflicts` (Optional, Enum("abort" \| "proceed"))**: Indicates whether to continue reindexing even when there are conflicts. +- **`max_docs` (Optional, number)**: The maximum number of documents to reindex. By default, all documents are reindexed. If it is a value less then or equal to `scroll_size`, a scroll will not be used to retrieve the results for the operation. If `conflicts` is set to `proceed`, the reindex operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query. +- **`script` (Optional, { source, id, params, lang, options })**: The script to run to update the document source or metadata when reindexing. +- **`refresh` (Optional, boolean)**: If `true`, the request refreshes affected shards to make this operation visible to search. +- **`requests_per_second` (Optional, float)**: The throttle for this request in sub-requests per second. By default, there is no throttle. +- **`scroll` (Optional, string \| -1 \| 0)**: The period of time that a consistent view of the index should be maintained for scrolled search. +- **`slices` (Optional, number \| Enum("auto"))**: The number of slices this task should be divided into. It defaults to one slice, which means the task isn't sliced into subtasks. Reindex supports sliced scroll to parallelize the reindexing process. This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts. NOTE: Reindexing from remote clusters does not support manual or automatic slicing. If set to `auto`, Elasticsearch chooses the number of slices to use. This setting will use one slice per shard, up to a certain limit. If there are multiple sources, it will choose the number of slices based on the index or backing index with the smallest number of shards. +- **`timeout` (Optional, string \| -1 \| 0)**: The period each indexing waits for automatic index creation, dynamic mapping updates, and waiting for active shards. By default, Elasticsearch waits for at least one minute before failing. The actual wait time could be longer, particularly when multiple waits occur. +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value is one, which means it waits for each primary shard to be active. +- **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the operation is complete. +- **`require_alias` (Optional, boolean)**: If `true`, the destination must be an index alias. + +## client.reindexRethrottle [_reindex_rethrottle] +Throttle a reindex operation. + +Change the number of requests per second for a particular reindex operation. +For example: + +``` +POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1 +``` + +Rethrottling that speeds up the query takes effect immediately. +Rethrottling that slows down the query will take effect after completing the current batch. +This behavior prevents scroll timeouts. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex) + +```ts +client.reindexRethrottle({ task_id }) +``` +### Arguments [_arguments_reindex_rethrottle] + +#### Request (object) [_request_reindex_rethrottle] + +- **`task_id` (string)**: The task identifier, which can be found by using the tasks API. +- **`requests_per_second` (Optional, float)**: The throttle for this request in sub-requests per second. It can be either `-1` to turn off throttling or any decimal number like `1.7` or `12` to throttle to that level. + +## client.renderSearchTemplate [_render_search_template] +Render a search template. + +Render a search template as a search request body. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-render-search-template) + +```ts +client.renderSearchTemplate({ ... }) +``` +### Arguments [_arguments_render_search_template] + +#### Request (object) [_request_render_search_template] + +- **`id` (Optional, string)**: The ID of the search template to render. If no `source` is specified, this or the `id` request body parameter is required. +- **`file` (Optional, string)** +- **`params` (Optional, Record)**: Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value. +- **`source` (Optional, string \| { aggregations, collapse, explain, ext, from, highlight, track_total_hits, indices_boost, docvalue_fields, knn, rank, min_score, post_filter, profile, query, rescore, retriever, script_fields, search_after, size, slice, sort, _source, fields, suggest, terminate_after, timeout, track_scores, version, seq_no_primary_term, stored_fields, pit, runtime_mappings, stats })**: An inline search template. It supports the same parameters as the search API's request body. These parameters also support Mustache variables. If no `id` or `` is specified, this parameter is required. + +## client.scriptsPainlessExecute [_scripts_painless_execute] +Run a script. + +Runs a script and returns a result. +Use this API to build and test scripts, such as when defining a script for a runtime field. +This API requires very few dependencies and is especially useful if you don't have permissions to write documents on a cluster. + +The API uses several _contexts_, which control how scripts are run, what variables are available at runtime, and what the return type is. + +Each context requires a script, but additional parameters depend on the context you're using for that script. + +[Endpoint documentation](https://www.elastic.co/docs/reference/scripting-languages/painless/painless-api-examples) + +```ts +client.scriptsPainlessExecute({ ... }) +``` +### Arguments [_arguments_scripts_painless_execute] + +#### Request (object) [_request_scripts_painless_execute] + +- **`context` (Optional, Enum("painless_test" \| "filter" \| "score" \| "boolean_field" \| "date_field" \| "double_field" \| "geo_point_field" \| "ip_field" \| "keyword_field" \| "long_field" \| "composite_field"))**: The context that the script should run in. NOTE: Result ordering in the field contexts is not guaranteed. +- **`context_setup` (Optional, { document, index, query })**: Additional parameters for the `context`. NOTE: This parameter is required for all contexts except `painless_test`, which is the default if no value is provided for `context`. +- **`script` (Optional, { source, id, params, lang, options })**: The Painless script to run. + +## client.scroll [_scroll] +Run a scrolling search. + +IMPORTANT: The scroll API is no longer recommend for deep pagination. If you need to preserve the index state while paging through more than 10,000 hits, use the `search_after` parameter with a point in time (PIT). + +The scroll API gets large sets of results from a single scrolling search request. +To get the necessary scroll ID, submit a search API request that includes an argument for the `scroll` query parameter. +The `scroll` parameter indicates how long Elasticsearch should retain the search context for the request. +The search response returns a scroll ID in the `_scroll_id` response body parameter. +You can then use the scroll ID with the scroll API to retrieve the next batch of results for the request. +If the Elasticsearch security features are enabled, the access to the results of a specific scroll ID is restricted to the user or API key that submitted the search. + +You can also use the scroll API to specify a new scroll parameter that extends or shortens the retention period for the search context. + +IMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-scroll) + +```ts +client.scroll({ scroll_id }) +``` +### Arguments [_arguments_scroll] + +#### Request (object) [_request_scroll] + +- **`scroll_id` (string)**: The scroll ID of the search. +- **`scroll` (Optional, string \| -1 \| 0)**: The period to retain the search context for scrolling. +- **`rest_total_hits_as_int` (Optional, boolean)**: If true, the API response’s hit.total property is returned as an integer. If false, the API response’s hit.total property is returned as an object. + +## client.search [_search] +Run a search. + +Get search hits that match the query defined in the request. +You can provide search queries using the `q` query string parameter or the request body. +If both are specified, only the query parameter is used. + +If the Elasticsearch security features are enabled, you must have the read index privilege for the target data stream, index, or alias. For cross-cluster search, refer to the documentation about configuring CCS privileges. +To search a point in time (PIT) for an alias, you must have the `read` index privilege for the alias's data streams or indices. + +**Search slicing** + +When paging through a large number of documents, it can be helpful to split the search into multiple slices to consume them independently with the `slice` and `pit` properties. +By default the splitting is done first on the shards, then locally on each shard. +The local splitting partitions the shard into contiguous ranges based on Lucene document IDs. + +For instance if the number of shards is equal to 2 and you request 4 slices, the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are assigned to the second shard. + +IMPORTANT: The same point-in-time ID should be used for all slices. +If different PIT IDs are used, slices can overlap and miss documents. +This situation can occur because the splitting criterion is based on Lucene document IDs, which are not stable across changes to the index. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search) + +```ts +client.search({ ... }) +``` +### Arguments [_arguments_search] + +#### Request (object) [_request_search] + +- **`index` (Optional, string \| string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. +- **`aggregations` (Optional, Record)**: Defines the aggregations that are run as part of the search request. +- **`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })**: Collapses search results the values of the specified field. +- **`explain` (Optional, boolean)**: If `true`, the request returns detailed information about score computation as part of a hit. +- **`ext` (Optional, Record)**: Configuration of search extensions defined by Elasticsearch plugins. +- **`from` (Optional, number)**: The starting document offset, which must be non-negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. +- **`highlight` (Optional, { encoder, fields })**: Specifies the highlighter to use for retrieving highlighted snippets from one or more fields in your search results. +- **`track_total_hits` (Optional, boolean \| number)**: Number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query. +- **`indices_boost` (Optional, Record[])**: Boost the `_score` of documents from specified indices. The boost value is the factor by which scores are multiplied. A boost value greater than `1.0` increases the score. A boost value between `0` and `1.0` decreases the score. +- **`docvalue_fields` (Optional, { field, format, include_unmapped }[])**: An array of wildcard (`*`) field patterns. The request returns doc values for field names matching these patterns in the `hits.fields` property of the response. +- **`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, visit_percentage, boost, filter, similarity, inner_hits, rescore_vector } \| { field, query_vector, query_vector_builder, k, num_candidates, visit_percentage, boost, filter, similarity, inner_hits, rescore_vector }[])**: The approximate kNN search to run. +- **`rank` (Optional, { rrf })**: The Reciprocal Rank Fusion (RRF) to use. +- **`min_score` (Optional, number)**: The minimum `_score` for matching documents. Documents with a lower `_score` are not included in search results and results collected by aggregations. +- **`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Use the `post_filter` parameter to filter search results. The search hits are filtered after the aggregations are calculated. A post filter has no impact on the aggregation results. +- **`profile` (Optional, boolean)**: Set to `true` to return detailed timing information about the execution of individual components in a search request. NOTE: This is a debugging tool and adds significant overhead to search execution. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The search definition using the Query DSL. +- **`rescore` (Optional, { window_size, query, learning_to_rank, script } \| { window_size, query, learning_to_rank, script }[])**: Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases. +- **`retriever` (Optional, { standard, knn, rrf, text_similarity_reranker, rule, rescorer, linear, pinned })**: A retriever is a specification to describe top documents returned from a search. A retriever replaces other elements of the search API that also return top documents such as `query` and `knn`. +- **`script_fields` (Optional, Record)**: Retrieve a script evaluation (based on different fields) for each hit. +- **`search_after` (Optional, number \| number \| string \| boolean \| null[])**: Used to retrieve the next page of hits using a set of sort values from the previous page. +- **`size` (Optional, number)**: The number of hits to return, which must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` property. +- **`slice` (Optional, { field, id, max })**: Split a scrolled search into multiple slices that can be consumed independently. +- **`sort` (Optional, string \| { _score, _doc, _geo_distance, _script } \| string \| { _score, _doc, _geo_distance, _script }[])**: A list of : pairs. +- **`_source` (Optional, boolean \| { exclude_vectors, excludes, includes })**: The source fields that are returned for matching documents. These fields are returned in the `hits._source` property of the search response. If the `stored_fields` property is specified, the `_source` property defaults to `false`. Otherwise, it defaults to `true`. +- **`fields` (Optional, { field, format, include_unmapped }[])**: An array of wildcard (`*`) field patterns. The request returns values for field names matching these patterns in the `hits.fields` property of the response. +- **`suggest` (Optional, { text })**: Defines a suggester that provides similar looking terms based on a provided text. +- **`terminate_after` (Optional, number)**: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this property to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this property for requests that target data streams with backing indices across multiple data tiers. If set to `0` (default), the query does not terminate early. +- **`timeout` (Optional, string)**: The period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout. +- **`track_scores` (Optional, boolean)**: If `true`, calculate and return document scores, even if the scores are not used for sorting. +- **`version` (Optional, boolean)**: If `true`, the request returns the document version as part of a hit. +- **`seq_no_primary_term` (Optional, boolean)**: If `true`, the request returns sequence number and primary term of the last modification of each hit. +- **`stored_fields` (Optional, string \| string[])**: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` property defaults to `false`. You can pass `_source: true` to return both source fields and stored fields in the search response. +- **`pit` (Optional, { id, keep_alive })**: Limit the search to a point in time (PIT). If you provide a PIT, you cannot specify an `` in the request path. +- **`runtime_mappings` (Optional, Record)**: One or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. +- **`stats` (Optional, string[])**: The stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. You can retrieve these stats using the indices stats API. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +- **`allow_partial_search_results` (Optional, boolean)**: If `true` and there are shard request timeouts or shard failures, the request returns partial results. If `false`, it returns an error with no partial results. To override the default behavior, you can set the `search.default_allow_partial_results` cluster setting to `false`. +- **`analyzer` (Optional, string)**: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. +- **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. +- **`batched_reduce_size` (Optional, number)**: The number of shard results that should be reduced at once on the coordinating node. If the potential number of shards in the request can be large, this value should be used as a protection mechanism to reduce the memory overhead per search request. +- **`ccs_minimize_roundtrips` (Optional, boolean)**: If `true`, network round-trips between the coordinating node and the remote clusters are minimized when running cross-cluster search (CCS) requests. +- **`default_operator` (Optional, Enum("and" \| "or"))**: The default operator for the query string query: `and` or `or`. This parameter can be used only when the `q` query string parameter is specified. +- **`df` (Optional, string)**: The field to use as a default when no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values such as `open,hidden`. +- **`ignore_throttled` (Optional, boolean)**: If `true`, concrete, expanded or aliased indices will be ignored when frozen. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`include_named_queries_score` (Optional, boolean)**: If `true`, the response includes the score contribution from any named queries. This functionality reruns each named query on every hit in a search response. Typically, this adds a small overhead to a request. However, using computationally expensive named queries on a large number of hits may add significant overhead. +- **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. +- **`max_concurrent_shard_requests` (Optional, number)**: The number of concurrent shard requests per node that the search runs concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests. +- **`preference` (Optional, string)**: The nodes and shards used for the search. By default, Elasticsearch selects from eligible nodes and shards using adaptive replica selection, accounting for allocation awareness. Valid values are: * `_only_local` to run the search only on shards on the local node. * `_local` to, if possible, run the search on shards on the local node, or if not, select shards using the default method. * `_only_nodes:,` to run the search on only the specified nodes IDs. If suitable shards exist on more than one selected node, use shards on those nodes using the default method. If none of the specified nodes are available, select shards from any available node using the default method. * `_prefer_nodes:,` to if possible, run the search on the specified nodes IDs. If not, select shards using the default method. * `_shards:,` to run the search only on the specified shards. You can combine this value with other `preference` values. However, the `_shards` value must come first. For example: `_shards:2,3|_local`. * `` (any string that does not start with `_`) to route searches with the same `` to the same shards in the same order. +- **`pre_filter_shard_size` (Optional, number)**: A threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method (if date filters are mandatory to match but the shard bounds and the query are disjoint). When unspecified, the pre-filter phase is executed if any of these conditions is met: * The request targets more than 128 shards. * The request targets one or more read-only index. * The primary sort of the query targets an indexed field. +- **`project_routing` (Optional, string)**: Specifies a subset of projects to target for the search using project metadata tags in a subset of Lucene query syntax. Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). Examples: _alias:my-project _alias:_origin _alias:*pr* Supported in serverless only. +- **`request_cache` (Optional, boolean)**: If `true`, the caching of search results is enabled for requests where `size` is `0`. It defaults to index level settings. +- **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. +- **`scroll` (Optional, string \| -1 \| 0)**: The period to retain the search context for scrolling. By default, this value cannot exceed `1d` (24 hours). You can change this limit by using the `search.max_keep_alive` cluster-level setting. +- **`search_type` (Optional, Enum("query_then_fetch" \| "dfs_query_then_fetch"))**: Indicates how distributed term frequencies are calculated for relevance scoring. +- **`suggest_field` (Optional, string)**: The field to use for suggestions. +- **`suggest_mode` (Optional, Enum("missing" \| "popular" \| "always"))**: The suggest mode. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. +- **`suggest_size` (Optional, number)**: The number of suggestions to return. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. +- **`suggest_text` (Optional, string)**: The source text for which the suggestions should be returned. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. +- **`typed_keys` (Optional, boolean)**: If `true`, aggregation and suggester names are be prefixed by their respective types in the response. +- **`rest_total_hits_as_int` (Optional, boolean)**: Indicates whether `hits.total` should be rendered as an integer or an object in the rest search response. +- **`_source_excludes` (Optional, string \| string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`_source_exclude_vectors` (Optional, boolean)**: Whether vectors should be excluded from _source +- **`_source_includes` (Optional, string \| string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`q` (Optional, string)**: A query in the Lucene query string syntax. Query parameter searches do not support the full Elasticsearch Query DSL but are handy for testing. IMPORTANT: This parameter overrides the query parameter in the request body. If both parameters are specified, documents matching the query request body parameter are not returned. +- **`force_synthetic_source` (Optional, boolean)**: Should this request force synthetic _source? Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. Fetches with this enabled will be slower the enabling synthetic source natively in the index. + +## client.searchMvt [_search_mvt] +Search a vector tile. + +Search a vector tile for geospatial values. +Before using this API, you should be familiar with the Mapbox vector tile specification. +The API returns results as a binary mapbox vector tile. + +Internally, Elasticsearch translates a vector tile search API request into a search containing: + +* A `geo_bounding_box` query on the ``. The query uses the `//` tile as a bounding box. +* A `geotile_grid` or `geohex_grid` aggregation on the ``. The `grid_agg` parameter determines the aggregation type. The aggregation uses the `//` tile as a bounding box. +* Optionally, a `geo_bounds` aggregation on the ``. The search only includes this aggregation if the `exact_bounds` parameter is `true`. +* If the optional parameter `with_labels` is `true`, the internal search will include a dynamic runtime field that calls the `getLabelPosition` function of the geometry doc value. This enables the generation of new point features containing suggested geometry labels, so that, for example, multi-polygons will have only one label. + +The API returns results as a binary Mapbox vector tile. +Mapbox vector tiles are encoded as Google Protobufs (PBF). By default, the tile contains three layers: + +* A `hits` layer containing a feature for each `` value matching the `geo_bounding_box` query. +* An `aggs` layer containing a feature for each cell of the `geotile_grid` or `geohex_grid`. The layer only contains features for cells with matching data. +* A meta layer containing: + * A feature containing a bounding box. By default, this is the bounding box of the tile. + * Value ranges for any sub-aggregations on the `geotile_grid` or `geohex_grid`. + * Metadata for the search. + +The API only returns features that can display at its zoom level. +For example, if a polygon feature has no area at its zoom level, the API omits it. +The API returns errors as UTF-8 encoded JSON. + +IMPORTANT: You can specify several options for this API as either a query parameter or request body parameter. +If you specify both parameters, the query parameter takes precedence. + +**Grid precision for geotile** + +For a `grid_agg` of `geotile`, you can use cells in the `aggs` layer as tiles for lower zoom levels. +`grid_precision` represents the additional zoom levels available through these cells. The final precision is computed by as follows: ` + grid_precision`. +For example, if `` is 7 and `grid_precision` is 8, then the `geotile_grid` aggregation will use a precision of 15. +The maximum final precision is 29. +The `grid_precision` also determines the number of cells for the grid as follows: `(2^grid_precision) x (2^grid_precision)`. +For example, a value of 8 divides the tile into a grid of 256 x 256 cells. +The `aggs` layer only contains features for cells with matching data. + +**Grid precision for geohex** + +For a `grid_agg` of `geohex`, Elasticsearch uses `` and `grid_precision` to calculate a final precision as follows: ` + grid_precision`. + +This precision determines the H3 resolution of the hexagonal cells produced by the `geohex` aggregation. +The following table maps the H3 resolution for each precision. +For example, if `` is 3 and `grid_precision` is 3, the precision is 6. +At a precision of 6, hexagonal cells have an H3 resolution of 2. +If `` is 3 and `grid_precision` is 4, the precision is 7. +At a precision of 7, hexagonal cells have an H3 resolution of 3. + +| Precision | Unique tile bins | H3 resolution | Unique hex bins | Ratio | +| --------- | ---------------- | ------------- | ----------------| ----- | +| 1 | 4 | 0 | 122 | 30.5 | +| 2 | 16 | 0 | 122 | 7.625 | +| 3 | 64 | 1 | 842 | 13.15625 | +| 4 | 256 | 1 | 842 | 3.2890625 | +| 5 | 1024 | 2 | 5882 | 5.744140625 | +| 6 | 4096 | 2 | 5882 | 1.436035156 | +| 7 | 16384 | 3 | 41162 | 2.512329102 | +| 8 | 65536 | 3 | 41162 | 0.6280822754 | +| 9 | 262144 | 4 | 288122 | 1.099098206 | +| 10 | 1048576 | 4 | 288122 | 0.2747745514 | +| 11 | 4194304 | 5 | 2016842 | 0.4808526039 | +| 12 | 16777216 | 6 | 14117882 | 0.8414913416 | +| 13 | 67108864 | 6 | 14117882 | 0.2103728354 | +| 14 | 268435456 | 7 | 98825162 | 0.3681524172 | +| 15 | 1073741824 | 8 | 691776122 | 0.644266719 | +| 16 | 4294967296 | 8 | 691776122 | 0.1610666797 | +| 17 | 17179869184 | 9 | 4842432842 | 0.2818666889 | +| 18 | 68719476736 | 10 | 33897029882 | 0.4932667053 | +| 19 | 274877906944 | 11 | 237279209162 | 0.8632167343 | +| 20 | 1099511627776 | 11 | 237279209162 | 0.2158041836 | +| 21 | 4398046511104 | 12 | 1660954464122 | 0.3776573213 | +| 22 | 17592186044416 | 13 | 11626681248842 | 0.6609003122 | +| 23 | 70368744177664 | 13 | 11626681248842 | 0.165225078 | +| 24 | 281474976710656 | 14 | 81386768741882 | 0.2891438866 | +| 25 | 1125899906842620 | 15 | 569707381193162 | 0.5060018015 | +| 26 | 4503599627370500 | 15 | 569707381193162 | 0.1265004504 | +| 27 | 18014398509482000 | 15 | 569707381193162 | 0.03162511259 | +| 28 | 72057594037927900 | 15 | 569707381193162 | 0.007906278149 | +| 29 | 288230376151712000 | 15 | 569707381193162 | 0.001976569537 | + +Hexagonal cells don't align perfectly on a vector tile. +Some cells may intersect more than one vector tile. +To compute the H3 resolution for each precision, Elasticsearch compares the average density of hexagonal bins at each resolution with the average density of tile bins at each zoom level. +Elasticsearch uses the H3 resolution that is closest to the corresponding geotile density. + +Learn how to use the vector tile search API with practical examples in the [Vector tile search examples](https://www.elastic.co/docs/reference/elasticsearch/rest-apis/vector-tile-search) guide. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-mvt) + +```ts +client.searchMvt({ index, field, zoom, x, y }) +``` +### Arguments [_arguments_search_mvt] + +#### Request (object) [_request_search_mvt] + +- **`index` (string \| string[])**: List of data streams, indices, or aliases to search +- **`field` (string)**: Field containing geospatial data to return +- **`zoom` (number)**: Zoom level for the vector tile to search +- **`x` (number)**: X coordinate for the vector tile to search +- **`y` (number)**: Y coordinate for the vector tile to search +- **`aggs` (Optional, Record)**: Sub-aggregations for the geotile_grid. It supports the following aggregation types: - `avg` - `boxplot` - `cardinality` - `extended stats` - `max` - `median absolute deviation` - `min` - `percentile` - `percentile-rank` - `stats` - `sum` - `value count` The aggregation names can't start with `_mvt_`. The `_mvt_` prefix is reserved for internal aggregations. +- **`buffer` (Optional, number)**: The size, in pixels, of a clipping buffer outside the tile. This allows renderers to avoid outline artifacts from geometries that extend past the extent of the tile. +- **`exact_bounds` (Optional, boolean)**: If `false`, the meta layer's feature is the bounding box of the tile. If `true`, the meta layer's feature is a bounding box resulting from a `geo_bounds` aggregation. The aggregation runs on values that intersect the `//` tile with `wrap_longitude` set to `false`. The resulting bounding box may be larger than the vector tile. +- **`extent` (Optional, number)**: The size, in pixels, of a side of the tile. Vector tiles are square with equal sides. +- **`fields` (Optional, string \| string[])**: The fields to return in the `hits` layer. It supports wildcards (`*`). This parameter does not support fields with array values. Fields with array values may return inconsistent results. +- **`grid_agg` (Optional, Enum("geotile" \| "geohex"))**: The aggregation used to create a grid for the `field`. +- **`grid_precision` (Optional, number)**: Additional zoom levels available through the aggs layer. For example, if `` is `7` and `grid_precision` is `8`, you can zoom in up to level 15. Accepts 0-8. If 0, results don't include the aggs layer. +- **`grid_type` (Optional, Enum("grid" \| "point" \| "centroid"))**: Determines the geometry type for features in the aggs layer. In the aggs layer, each feature represents a `geotile_grid` cell. If `grid, each feature is a polygon of the cells bounding box. If `point`, each feature is a Point that is the centroid of the cell. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The query DSL used to filter documents for the search. +- **`runtime_mappings` (Optional, Record)**: Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. +- **`size` (Optional, number)**: The maximum number of features to return in the hits layer. Accepts 0-10000. If 0, results don't include the hits layer. +- **`sort` (Optional, string \| { _score, _doc, _geo_distance, _script } \| string \| { _score, _doc, _geo_distance, _script }[])**: Sort the features in the hits layer. By default, the API calculates a bounding box for each feature. It sorts features based on this box's diagonal length, from longest to shortest. +- **`track_total_hits` (Optional, boolean \| number)**: The number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query. +- **`with_labels` (Optional, boolean)**: If `true`, the hits and aggs layers will contain additional point features representing suggested label positions for the original features. * `Point` and `MultiPoint` features will have one of the points selected. * `Polygon` and `MultiPolygon` features will have a single point generated, either the centroid, if it is within the polygon, or another point within the polygon selected from the sorted triangle-tree. * `LineString` features will likewise provide a roughly central point selected from the triangle-tree. * The aggregation results will provide one central point for each aggregation bucket. All attributes from the original features will also be copied to the new label features. In addition, the new features will be distinguishable using the tag `_mvt_label_position`. +- **`project_routing` (Optional, string)**: Specifies a subset of projects to target for the search using project metadata tags in a subset of Lucene query syntax. Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). Examples: _alias:my-project _alias:_origin _alias:*pr* Supported in serverless only. + +## client.searchShards [_search_shards] +Get the search shards. + +Get the indices and shards that a search request would be run against. +This information can be useful for working out issues or planning optimizations with routing and shard preferences. +When filtered aliases are used, the filter is returned as part of the `indices` section. + +If the Elasticsearch security features are enabled, you must have the `view_index_metadata` or `manage` index privilege for the target data stream, index, or alias. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-shards) + +```ts +client.searchShards({ ... }) +``` +### Arguments [_arguments_search_shards] + +#### Request (object) [_request_search_shards] + +- **`index` (Optional, string \| string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. IT can also be set to `-1` to indicate that the request should never timeout. +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. It is random by default. +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. + +## client.searchTemplate [_search_template] +Run a search with a search template. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-template) + +```ts +client.searchTemplate({ ... }) +``` +### Arguments [_arguments_search_template] + +#### Request (object) [_request_search_template] + +- **`index` (Optional, string \| string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). +- **`explain` (Optional, boolean)**: If `true`, returns detailed information about score calculation as part of each hit. If you specify both this and the `explain` query parameter, the API uses only the query parameter. +- **`id` (Optional, string)**: The ID of the search template to use. If no `source` is specified, this parameter is required. +- **`params` (Optional, Record)**: Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value. +- **`profile` (Optional, boolean)**: If `true`, the query execution is profiled. +- **`source` (Optional, string \| { aggregations, collapse, explain, ext, from, highlight, track_total_hits, indices_boost, docvalue_fields, knn, rank, min_score, post_filter, profile, query, rescore, retriever, script_fields, search_after, size, slice, sort, _source, fields, suggest, terminate_after, timeout, track_scores, version, seq_no_primary_term, stored_fields, pit, runtime_mappings, stats })**: An inline search template. Supports the same parameters as the search API's request body. It also supports Mustache variables. If no `id` is specified, this parameter is required. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +- **`ccs_minimize_roundtrips` (Optional, boolean)**: If `true`, network round-trips are minimized for cross-cluster search requests. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. +- **`ignore_throttled` (Optional, boolean)**: If `true`, specified concrete, expanded, or aliased indices are not included in the response when throttled. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. It is random by default. +- **`project_routing` (Optional, string)**: Specifies a subset of projects to target for the search using project metadata tags in a subset of Lucene query syntax. Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). Examples: _alias:my-project _alias:_origin _alias:*pr* Supported in serverless only. +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +- **`scroll` (Optional, string \| -1 \| 0)**: Specifies how long a consistent view of the index should be maintained for scrolled search. +- **`search_type` (Optional, Enum("query_then_fetch" \| "dfs_query_then_fetch"))**: The type of the search operation. +- **`rest_total_hits_as_int` (Optional, boolean)**: If `true`, `hits.total` is rendered as an integer in the response. If `false`, it is rendered as an object. +- **`typed_keys` (Optional, boolean)**: If `true`, the response prefixes aggregation and suggester names with their respective types. + +## client.termsEnum [_terms_enum] +Get terms in an index. + +Discover terms that match a partial string in an index. +This API is designed for low-latency look-ups used in auto-complete scenarios. + +> info +> The terms enum API may return terms from deleted documents. Deleted documents are initially only marked as deleted. It is not until their segments are merged that documents are actually deleted. Until that happens, the terms enum API will return terms from these documents. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-terms-enum) + +```ts +client.termsEnum({ index, field }) +``` +### Arguments [_arguments_terms_enum] + +#### Request (object) [_request_terms_enum] + +- **`index` (string \| string[])**: A list of data streams, indices, and index aliases to search. Wildcard (`*`) expressions are supported. To search all data streams or indices, omit this parameter or use `*` or `_all`. +- **`field` (string)**: The string to match at the start of indexed terms. If not provided, all terms in the field are considered. +- **`size` (Optional, number)**: The number of matching terms to return. +- **`timeout` (Optional, string \| -1 \| 0)**: The maximum length of time to spend collecting results. If the timeout is exceeded the `complete` flag set to `false` in the response and the results may be partial or empty. +- **`case_insensitive` (Optional, boolean)**: When `true`, the provided search string is matched against index terms without case sensitivity. +- **`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Filter an index shard if the provided query rewrites to `match_none`. +- **`string` (Optional, string)**: The string to match at the start of indexed terms. If it is not provided, all terms in the field are considered. > info > The prefix string cannot be larger than the largest possible keyword value, which is Lucene's term byte-length limit of 32766. +- **`search_after` (Optional, string)**: The string after which terms in the index should be returned. It allows for a form of pagination if the last result from one request is passed as the `search_after` parameter for a subsequent request. + +## client.termvectors [_termvectors] +Get term vector information. + +Get information and statistics about terms in the fields of a particular document. + +You can retrieve term vectors for documents stored in the index or for artificial documents passed in the body of the request. +You can specify the fields you are interested in through the `fields` parameter or by adding the fields to the request body. +For example: + +``` +GET /my-index-000001/_termvectors/1?fields=message +``` + +Fields can be specified using wildcards, similar to the multi match query. + +Term vectors are real-time by default, not near real-time. +This can be changed by setting `realtime` parameter to `false`. + +You can request three types of values: _term information_, _term statistics_, and _field statistics_. +By default, all term information and field statistics are returned for all fields but term statistics are excluded. + +**Term information** + +* term frequency in the field (always returned) +* term positions (`positions: true`) +* start and end offsets (`offsets: true`) +* term payloads (`payloads: true`), as base64 encoded bytes + +If the requested information wasn't stored in the index, it will be computed on the fly if possible. +Additionally, term vectors could be computed for documents not even existing in the index, but instead provided by the user. + +> warn +> Start and end offsets assume UTF-16 encoding is being used. If you want to use these offsets in order to get the original text that produced this token, you should make sure that the string you are taking a sub-string of is also encoded using UTF-16. + +**Behaviour** + +The term and field statistics are not accurate. +Deleted documents are not taken into account. +The information is only retrieved for the shard the requested document resides in. +The term and field statistics are therefore only useful as relative measures whereas the absolute numbers have no meaning in this context. +By default, when requesting term vectors of artificial documents, a shard to get the statistics from is randomly selected. +Use `routing` only to hit a particular shard. +Refer to the linked documentation for detailed examples of how to use this API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-termvectors) + +```ts +client.termvectors({ index }) +``` +### Arguments [_arguments_termvectors] + +#### Request (object) [_request_termvectors] + +- **`index` (string)**: The name of the index that contains the document. +- **`id` (Optional, string)**: A unique identifier for the document. +- **`doc` (Optional, object)**: An artificial document (a document not present in the index) for which you want to retrieve term vectors. +- **`filter` (Optional, { max_doc_freq, max_num_terms, max_term_freq, max_word_length, min_doc_freq, min_term_freq, min_word_length })**: Filter terms based on their tf-idf scores. This could be useful in order find out a good characteristic vector of a document. This feature works in a similar manner to the second phase of the More Like This Query. +- **`per_field_analyzer` (Optional, Record)**: Override the default per-field analyzer. This is useful in order to generate term vectors in any fashion, especially when using artificial documents. When providing an analyzer for a field that already stores term vectors, the term vectors will be regenerated. +- **`fields` (Optional, string[])**: A list of fields to include in the statistics. It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. +- **`field_statistics` (Optional, boolean)**: If `true`, the response includes: * The document count (how many documents contain this field). * The sum of document frequencies (the sum of document frequencies for all terms in this field). * The sum of total term frequencies (the sum of total term frequencies of each term in this field). +- **`offsets` (Optional, boolean)**: If `true`, the response includes term offsets. +- **`payloads` (Optional, boolean)**: If `true`, the response includes term payloads. +- **`positions` (Optional, boolean)**: If `true`, the response includes term positions. +- **`term_statistics` (Optional, boolean)**: If `true`, the response includes: * The total term frequency (how often a term occurs in all documents). * The document frequency (the number of documents containing the current term). By default these values are not returned since term statistics can have a serious performance impact. +- **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. +- **`version` (Optional, number)**: If `true`, returns the document version as part of a hit. +- **`version_type` (Optional, Enum("internal" \| "external" \| "external_gte"))**: The version type. +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. It is random by default. +- **`realtime` (Optional, boolean)**: If true, the request is real-time as opposed to near-real-time. + +## client.update [_update] +Update a document. + +Update a document by running a script or passing a partial document. + +If the Elasticsearch security features are enabled, you must have the `index` or `write` index privilege for the target index or index alias. + +The script can update, delete, or skip modifying the document. +The API also supports passing a partial document, which is merged into the existing document. +To fully replace an existing document, use the index API. +This operation: + +* Gets the document (collocated with the shard) from the index. +* Runs the specified script. +* Indexes the result. + +The document must still be reindexed, but using this API removes some network roundtrips and reduces chances of version conflicts between the GET and the index operation. + +The `_source` field must be enabled to use this API. +In addition to `_source`, you can access the following variables through the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the current timestamp). +For usage examples such as partial updates, upserts, and scripted updates, see the External documentation. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update) + +```ts +client.update({ id, index }) +``` +### Arguments [_arguments_update] + +#### Request (object) [_request_update] + +- **`id` (string)**: A unique identifier for the document to be updated. +- **`index` (string)**: The name of the target index. By default, the index is created automatically if it doesn't exist. +- **`detect_noop` (Optional, boolean)**: If `true`, the `result` in the response is set to `noop` (no operation) when there are no changes to the document. +- **`doc` (Optional, object)**: A partial update to an existing document. If both `doc` and `script` are specified, `doc` is ignored. +- **`doc_as_upsert` (Optional, boolean)**: If `true`, use the contents of 'doc' as the value of 'upsert'. NOTE: Using ingest pipelines with `doc_as_upsert` is not supported. +- **`script` (Optional, { source, id, params, lang, options })**: The script to run to update the document. +- **`scripted_upsert` (Optional, boolean)**: If `true`, run the script whether or not the document exists. +- **`_source` (Optional, boolean \| { exclude_vectors, excludes, includes })**: If `false`, turn off source retrieval. You can also specify a list of the fields you want to retrieve. +- **`upsert` (Optional, object)**: If the document does not already exist, the contents of 'upsert' are inserted as a new document. If the document exists, the 'script' is run. +- **`if_primary_term` (Optional, number)**: Only perform the operation if the document has this primary term. +- **`if_seq_no` (Optional, number)**: Only perform the operation if the document has this sequence number. +- **`include_source_on_error` (Optional, boolean)**: True or false if to include the document source in the error message in case of parsing errors. +- **`lang` (Optional, string)**: The script language. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. If 'wait_for', it waits for a refresh to make this operation visible to search. If 'false', it does nothing with refreshes. +- **`require_alias` (Optional, boolean)**: If `true`, the destination must be an index alias. +- **`retry_on_conflict` (Optional, number)**: The number of times the operation should be retried when a conflict occurs. +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for the following operations: dynamic mapping updates and waiting for active shards. Elasticsearch waits for at least the timeout period before failing. The actual wait time could be longer, particularly when multiple waits occur. +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The number of copies of each shard that must be active before proceeding with the operation. Set to 'all' or any positive integer up to the total number of shards in the index (`number_of_replicas`+1). The default value of `1` means it waits for each primary shard to be active. +- **`_source_excludes` (Optional, string \| string[])**: The source fields you want to exclude. +- **`_source_includes` (Optional, string \| string[])**: The source fields you want to retrieve. + +## client.updateByQuery [_update_by_query] +Update documents. +Updates documents that match the specified query. +If no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes. + +If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias: + +* `read` +* `index` or `write` + +You can specify the query criteria in the request URI or the request body using the same syntax as the search API. + +When you submit an update by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and updates matching documents using internal versioning. +When the versions match, the document is updated and the version number is incremented. +If a document changes between the time that the snapshot is taken and the update operation is processed, it results in a version conflict and the operation fails. +You can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`. +Note that if you opt to count version conflicts, the operation could attempt to update more documents from the source than `max_docs` until it has successfully updated `max_docs` documents or it has gone through every document in the source query. + +NOTE: Documents with a version equal to 0 cannot be updated using update by query because internal versioning does not support 0 as a valid version number. + +While processing an update by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents. +A bulk update request is performed for each batch of matching documents. +Any query or update failures cause the update by query request to fail and the failures are shown in the response. +Any update requests that completed successfully still stick, they are not rolled back. + +**Refreshing shards** + +Specifying the `refresh` parameter refreshes all shards once the request completes. +This is different to the update API's `refresh` parameter, which causes only the shard +that received the request to be refreshed. Unlike the update API, it does not support +`wait_for`. + +**Running update by query asynchronously** + +If the request contains `wait_for_completion=false`, Elasticsearch +performs some preflight checks, launches the request, and returns a +[task](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks) you can use to cancel or get the status of the task. +Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. + +**Waiting for active shards** + +`wait_for_active_shards` controls how many copies of a shard must be active +before proceeding with the request. See [`wait_for_active_shards`](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create#operation-create-wait_for_active_shards) +for details. `timeout` controls how long each write request waits for unavailable +shards to become available. Both work exactly the way they work in the +[Bulk API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk). Update by query uses scrolled searches, so you can also +specify the `scroll` parameter to control how long it keeps the search context +alive, for example `?scroll=10m`. The default is 5 minutes. + +**Throttling update requests** + +To control the rate at which update by query issues batches of update operations, you can set `requests_per_second` to any positive decimal number. +This pads each batch with a wait time to throttle the rate. +Set `requests_per_second` to `-1` to turn off throttling. + +Throttling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account. +The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. +By default the batch size is 1000, so if `requests_per_second` is set to `500`: + +``` +target_time = 1000 / 500 per second = 2 seconds +wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds +``` + +Since the batch is issued as a single _bulk request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. +This is "bursty" instead of "smooth". + +**Slicing** + +Update by query supports sliced scroll to parallelize the update process. +This can improve efficiency and provide a convenient way to break the request down into smaller parts. + +Setting `slices` to `auto` chooses a reasonable number for most data streams and indices. +This setting will use one slice per shard, up to a certain limit. +If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards. + +Adding `slices` to `_update_by_query` just automates the manual process of creating sub-requests, which means it has some quirks: + +* You can see these requests in the tasks APIs. These sub-requests are "child" tasks of the task for the request with slices. +* Fetching the status of the task for the request with `slices` only contains the status of completed slices. +* These sub-requests are individually addressable for things like cancellation and rethrottling. +* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. +* Canceling the request with slices will cancel each sub-request. +* Due to the nature of slices each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. +* Parameters like `requests_per_second` and `max_docs` on a request with slices are distributed proportionally to each sub-request. Combine that with the point above about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being updated. +* Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time. + +If you're slicing manually or otherwise tuning automatic slicing, keep in mind that: + +* Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many slices hurts performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead. +* Update performance scales linearly across available resources with the number of slices. + +Whether query or update performance dominates the runtime depends on the documents being reindexed and cluster resources. +Refer to the linked documentation for examples of how to update documents using the `_update_by_query` API: + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query) + +```ts +client.updateByQuery({ index }) +``` +### Arguments [_arguments_update_by_query] + +#### Request (object) [_request_update_by_query] + +- **`index` (string \| string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. +- **`max_docs` (Optional, number)**: The maximum number of documents to update. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The documents to update using the Query DSL. +- **`script` (Optional, { source, id, params, lang, options })**: The script to run to update the document source or metadata when updating. +- **`slice` (Optional, { field, id, max })**: Slice the request manually using the provided slice ID and total number of slices. +- **`conflicts` (Optional, Enum("abort" \| "proceed"))**: The preferred behavior when update by query hits version conflicts: `abort` or `proceed`. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +- **`analyzer` (Optional, string)**: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. +- **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. +- **`default_operator` (Optional, Enum("and" \| "or"))**: The default operator for query string query: `and` or `or`. This parameter can be used only when the `q` query string parameter is specified. +- **`df` (Optional, string)**: The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. +- **`from` (Optional, number)**: Skips the specified number of documents. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. +- **`pipeline` (Optional, string)**: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. It is random by default. +- **`q` (Optional, string)**: A query in the Lucene query string syntax. +- **`refresh` (Optional, boolean)**: If `true`, Elasticsearch refreshes affected shards to make the operation visible to search after the request completes. This is different than the update API's `refresh` parameter, which causes just the shard that received the request to be refreshed. +- **`request_cache` (Optional, boolean)**: If `true`, the request cache is used for this request. It defaults to the index-level setting. +- **`requests_per_second` (Optional, float)**: The throttle for this request in sub-requests per second. +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +- **`scroll` (Optional, string \| -1 \| 0)**: The period to retain the search context for scrolling. +- **`scroll_size` (Optional, number)**: The size of the scroll request that powers the operation. +- **`search_timeout` (Optional, string \| -1 \| 0)**: An explicit timeout for each search request. By default, there is no timeout. +- **`search_type` (Optional, Enum("query_then_fetch" \| "dfs_query_then_fetch"))**: The type of the search operation. Available options include `query_then_fetch` and `dfs_query_then_fetch`. +- **`slices` (Optional, number \| Enum("auto"))**: The number of slices this task should be divided into. +- **`sort` (Optional, string[])**: A list of : pairs. +- **`stats` (Optional, string[])**: The specific `tag` of the request for logging and statistical purposes. +- **`terminate_after` (Optional, number)**: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. +- **`timeout` (Optional, string \| -1 \| 0)**: The period each update request waits for the following operations: dynamic mapping updates, waiting for active shards. By default, it is one minute. This guarantees Elasticsearch waits for at least the timeout before failing. The actual wait time could be longer, particularly when multiple waits occur. +- **`version` (Optional, boolean)**: If `true`, returns the document version as part of a hit. +- **`version_type` (Optional, boolean)**: Should the document increment the version number (internal) on hit or not (reindex) +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The `timeout` parameter controls how long each write request waits for unavailable shards to become available. Both work exactly the way they work in the bulk API. +- **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the operation is complete. If `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task ID that you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. + +## client.updateByQueryRethrottle [_update_by_query_rethrottle] +Throttle an update by query operation. + +Change the number of requests per second for a particular update by query operation. +Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query-rethrottle) + +```ts +client.updateByQueryRethrottle({ task_id }) +``` +### Arguments [_arguments_update_by_query_rethrottle] + +#### Request (object) [_request_update_by_query_rethrottle] + +- **`task_id` (string)**: The ID for the task. +- **`requests_per_second` (Optional, float)**: The throttle for this request in sub-requests per second. To turn off throttling, set it to `-1`. + +## client.asyncSearch.delete [_async_search.delete] +Delete an async search. + +If the asynchronous search is still running, it is cancelled. +Otherwise, the saved search results are deleted. +If the Elasticsearch security features are enabled, the deletion of a specific async search is restricted to: the authenticated user that submitted the original search request; users that have the `cancel_task` cluster privilege. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit) + +```ts +client.asyncSearch.delete({ id }) +``` + +### Arguments [_arguments_async_search.delete] + +#### Request (object) [_request_async_search.delete] +- **`id` (string)**: A unique identifier for the async search. + +## client.asyncSearch.get [_async_search.get] +Get async search results. + +Retrieve the results of a previously submitted asynchronous search request. +If the Elasticsearch security features are enabled, access to the results of a specific async search is restricted to the user or API key that submitted it. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit) + +```ts +client.asyncSearch.get({ id }) +``` + +### Arguments [_arguments_async_search.get] + +#### Request (object) [_request_async_search.get] +- **`id` (string)**: A unique identifier for the async search. +- **`keep_alive` (Optional, string \| -1 \| 0)**: The length of time that the async search should be available in the cluster. +When not specified, the `keep_alive` set with the corresponding submit async request will be used. +Otherwise, it is possible to override the value and extend the validity of the request. +When this period expires, the search, if still running, is cancelled. +If the search is completed, its saved results are deleted. +- **`typed_keys` (Optional, boolean)**: Specify whether aggregation and suggester names should be prefixed by their respective types in the response +- **`wait_for_completion_timeout` (Optional, string \| -1 \| 0)**: Specifies to wait for the search to be completed up until the provided timeout. +Final results will be returned if available before the timeout expires, otherwise the currently available results will be returned once the timeout expires. +By default no timeout is set meaning that the currently available results will be returned without any additional wait. + +## client.asyncSearch.status [_async_search.status] +Get the async search status. + +Get the status of a previously submitted async search request given its identifier, without retrieving search results. +If the Elasticsearch security features are enabled, the access to the status of a specific async search is restricted to: + +* The user or API key that submitted the original async search request. +* Users that have the `monitor` cluster privilege or greater privileges. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit) + +```ts +client.asyncSearch.status({ id }) +``` + +### Arguments [_arguments_async_search.status] + +#### Request (object) [_request_async_search.status] +- **`id` (string)**: A unique identifier for the async search. +- **`keep_alive` (Optional, string \| -1 \| 0)**: The length of time that the async search needs to be available. +Ongoing async searches and any saved search results are deleted after this period. + +## client.asyncSearch.submit [_async_search.submit] +Run an async search. + +When the primary sort of the results is an indexed field, shards get sorted based on minimum and maximum value that they hold for that field. Partial results become available following the sort criteria that was requested. + +Warning: Asynchronous search does not support scroll or search requests that include only the suggest section. + +By default, Elasticsearch does not allow you to store an async search response larger than 10Mb and an attempt to do this results in an error. +The maximum allowed size for a stored async search response can be set by changing the `search.max_async_search_response_size` cluster level setting. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit) + +```ts +client.asyncSearch.submit({ ... }) +``` + +### Arguments [_arguments_async_search.submit] + +#### Request (object) [_request_async_search.submit] +- **`index` (Optional, string \| string[])**: A list of index names to search; use `_all` or empty string to perform the operation on all indices +- **`aggregations` (Optional, Record)** +- **`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })** +- **`explain` (Optional, boolean)**: If true, returns detailed information about score computation as part of a hit. +- **`ext` (Optional, Record)**: Configuration of search extensions defined by Elasticsearch plugins. +- **`from` (Optional, number)**: Starting document offset. By default, you cannot page through more than 10,000 +hits using the from and size parameters. To page through more hits, use the +search_after parameter. +- **`highlight` (Optional, { encoder, fields })** +- **`track_total_hits` (Optional, boolean \| number)**: Number of hits matching the query to count accurately. If true, the exact +number of hits is returned at the cost of some performance. If false, the +response does not include the total number of hits matching the query. +Defaults to 10,000 hits. +- **`indices_boost` (Optional, Record[])**: Boosts the _score of documents from specified indices. +- **`docvalue_fields` (Optional, { field, format, include_unmapped }[])**: Array of wildcard (*) patterns. The request returns doc values for field +names matching these patterns in the hits.fields property of the response. +- **`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, visit_percentage, boost, filter, similarity, inner_hits, rescore_vector } \| { field, query_vector, query_vector_builder, k, num_candidates, visit_percentage, boost, filter, similarity, inner_hits, rescore_vector }[])**: Defines the approximate kNN search to run. +- **`min_score` (Optional, number)**: Minimum _score for matching documents. Documents with a lower _score are +not included in search results and results collected by aggregations. +- **`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })** +- **`profile` (Optional, boolean)** +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Defines the search definition using the Query DSL. +- **`rescore` (Optional, { window_size, query, learning_to_rank, script } \| { window_size, query, learning_to_rank, script }[])** +- **`script_fields` (Optional, Record)**: Retrieve a script evaluation (based on different fields) for each hit. +- **`search_after` (Optional, number \| number \| string \| boolean \| null[])** +- **`size` (Optional, number)**: The number of hits to return. By default, you cannot page through more +than 10,000 hits using the from and size parameters. To page through more +hits, use the search_after parameter. +- **`slice` (Optional, { field, id, max })** +- **`sort` (Optional, string \| { _score, _doc, _geo_distance, _script } \| string \| { _score, _doc, _geo_distance, _script }[])** +- **`_source` (Optional, boolean \| { exclude_vectors, excludes, includes })**: Indicates which source fields are returned for matching documents. These +fields are returned in the hits._source property of the search response. +- **`fields` (Optional, { field, format, include_unmapped }[])**: Array of wildcard (*) patterns. The request returns values for field names +matching these patterns in the hits.fields property of the response. +- **`suggest` (Optional, { text })** +- **`terminate_after` (Optional, number)**: Maximum number of documents to collect for each shard. If a query reaches this +limit, Elasticsearch terminates the query early. Elasticsearch collects documents +before sorting. Defaults to 0, which does not terminate query execution early. +- **`timeout` (Optional, string)**: Specifies the period of time to wait for a response from each shard. If no response +is received before the timeout expires, the request fails and returns an error. +Defaults to no timeout. +- **`track_scores` (Optional, boolean)**: If true, calculate and return document scores, even if the scores are not used for sorting. +- **`version` (Optional, boolean)**: If true, returns document version as part of a hit. +- **`seq_no_primary_term` (Optional, boolean)**: If true, returns sequence number and primary term of the last modification +of each hit. See Optimistic concurrency control. +- **`stored_fields` (Optional, string \| string[])**: List of stored fields to return as part of a hit. If no fields are specified, +no stored fields are included in the response. If this field is specified, the _source +parameter defaults to false. You can pass _source: true to return both source fields +and stored fields in the search response. +- **`pit` (Optional, { id, keep_alive })**: Limits the search to a point in time (PIT). If you provide a PIT, you +cannot specify an in the request path. +- **`runtime_mappings` (Optional, Record)**: Defines one or more runtime fields in the search request. These fields take +precedence over mapped fields with the same name. +- **`stats` (Optional, string[])**: Stats groups to associate with the search. Each group maintains a statistics +aggregation for its associated searches. You can retrieve these stats using +the indices stats API. +- **`wait_for_completion_timeout` (Optional, string \| -1 \| 0)**: Blocks and waits until the search is completed up to a certain timeout. +When the async search completes within the timeout, the response won’t include the ID as the results are not stored in the cluster. +- **`keep_alive` (Optional, string \| -1 \| 0)**: Specifies how long the async search needs to be available. +Ongoing async searches and any saved search results are deleted after this period. +- **`keep_on_completion` (Optional, boolean)**: If `true`, results are stored for later retrieval when the search completes within the `wait_for_completion_timeout`. +- **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +- **`allow_partial_search_results` (Optional, boolean)**: Indicate if an error should be returned if there is a partial search failure or timeout +- **`analyzer` (Optional, string)**: The analyzer to use for the query string +- **`analyze_wildcard` (Optional, boolean)**: Specify whether wildcard and prefix queries should be analyzed (default: false) +- **`batched_reduce_size` (Optional, number)**: Affects how often partial results become available, which happens whenever shard results are reduced. +A partial reduction is performed every time the coordinating node has received a certain number of new shard responses (5 by default). +- **`ccs_minimize_roundtrips` (Optional, boolean)**: The default value is the only supported value. +- **`default_operator` (Optional, Enum("and" \| "or"))**: The default operator for query string query (AND or OR) +- **`df` (Optional, string)**: The field to use as default where no field prefix is given in the query string +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. +- **`ignore_throttled` (Optional, boolean)**: Whether specified concrete, expanded or aliased indices should be ignored when throttled +- **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) +- **`lenient` (Optional, boolean)**: Specify whether format-based query failures (such as providing text to a numeric field) should be ignored +- **`max_concurrent_shard_requests` (Optional, number)**: The number of concurrent shard requests per node this search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests +- **`preference` (Optional, string)**: Specify the node or shard the operation should be performed on (default: random) +- **`project_routing` (Optional, string)**: Specifies a subset of projects to target for the search using project +metadata tags in a subset of Lucene query syntax. +Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). +Examples: + _alias:my-project + _alias:_origin + _alias:*pr* +Supported in serverless only. +- **`request_cache` (Optional, boolean)**: Specify if request cache should be used for this request or not, defaults to true +- **`routing` (Optional, string)**: A list of specific routing values +- **`search_type` (Optional, Enum("query_then_fetch" \| "dfs_query_then_fetch"))**: Search operation type +- **`suggest_field` (Optional, string)**: Specifies which field to use for suggestions. +- **`suggest_mode` (Optional, Enum("missing" \| "popular" \| "always"))**: Specify suggest mode +- **`suggest_size` (Optional, number)**: How many suggestions to return in response +- **`suggest_text` (Optional, string)**: The source text for which the suggestions should be returned. +- **`typed_keys` (Optional, boolean)**: Specify whether aggregation and suggester names should be prefixed by their respective types in the response +- **`rest_total_hits_as_int` (Optional, boolean)**: Indicates whether hits.total should be rendered as an integer or an object in the rest search response +- **`_source_excludes` (Optional, string \| string[])**: A list of fields to exclude from the returned _source field +- **`_source_includes` (Optional, string \| string[])**: A list of fields to extract and return from the _source field +- **`q` (Optional, string)**: Query in the Lucene query string syntax + +## client.cat.aliases [_cat.aliases] +Get aliases. + +Get the cluster's index aliases, including filter and routing information. +This API does not return data stream aliases. + +IMPORTANT: CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-aliases) + +```ts +client.cat.aliases({ ... }) +``` + +### Arguments [_arguments_cat.aliases] + +#### Request (object) [_request_cat.aliases] +- **`name` (Optional, string \| string[])**: A list of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`. +- **`h` (Optional, Enum("alias" \| "index" \| "filter" \| "routing.index" \| "routing.search" \| "is_write_index") \| Enum("alias" \| "index" \| "filter" \| "routing.index" \| "routing.search" \| "is_write_index")[])**: A list of columns names to display. It supports simple wildcards. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: The type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +It supports a list of values, such as `open,hidden`. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicated that the request should never timeout, you can set it to `-1`. + +## client.cat.allocation [_cat.allocation] +Get shard allocation information. + +Get a snapshot of the number of shards allocated to each data node and their disk space. + +IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-allocation) + +```ts +client.cat.allocation({ ... }) +``` + +### Arguments [_arguments_cat.allocation] + +#### Request (object) [_request_cat.allocation] +- **`node_id` (Optional, string \| string[])**: A list of node identifiers or names used to limit the returned information. +- **`h` (Optional, Enum("shards" \| "shards.undesired" \| "write_load.forecast" \| "disk.indices.forecast" \| "disk.indices" \| "disk.used" \| "disk.avail" \| "disk.total" \| "disk.percent" \| "host" \| "ip" \| "node" \| "node.role") \| Enum("shards" \| "shards.undesired" \| "write_load.forecast" \| "disk.indices.forecast" \| "disk.indices" \| "disk.used" \| "disk.avail" \| "disk.total" \| "disk.percent" \| "host" \| "ip" \| "node" \| "node.role")[])**: A list of columns names to display. It supports simple wildcards. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. + +## client.cat.circuitBreaker [_cat.circuit_breaker] +Get circuit breakers statistics + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch#TODO) + +```ts +client.cat.circuitBreaker() +``` + + +## client.cat.componentTemplates [_cat.component_templates] +Get component templates. + +Get information about component templates in a cluster. +Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. + +IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. +They are not intended for use by applications. For application consumption, use the get component template API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-component-templates) + +```ts +client.cat.componentTemplates({ ... }) +``` + +### Arguments [_arguments_cat.component_templates] + +#### Request (object) [_request_cat.component_templates] +- **`name` (Optional, string)**: The name of the component template. +It accepts wildcard expressions. +If it is omitted, all component templates are returned. +- **`h` (Optional, Enum("name" \| "version" \| "alias_count" \| "mapping_count" \| "settings_count" \| "metadata_count" \| "included_in") \| Enum("name" \| "version" \| "alias_count" \| "mapping_count" \| "settings_count" \| "metadata_count" \| "included_in")[])**: A list of columns names to display. It supports simple wildcards. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. + +## client.cat.count [_cat.count] +Get a document count. + +Get quick access to a document count for a data stream, an index, or an entire cluster. +The document count only includes live documents, not deleted documents which have not yet been removed by the merge process. + +IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. +They are not intended for use by applications. For application consumption, use the count API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-count) + +```ts +client.cat.count({ ... }) +``` + +### Arguments [_arguments_cat.count] + +#### Request (object) [_request_cat.count] +- **`index` (Optional, string \| string[])**: A list of data streams, indices, and aliases used to limit the request. +It supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`h` (Optional, Enum("epoch" \| "timestamp" \| "count") \| Enum("epoch" \| "timestamp" \| "count")[])**: A list of columns names to display. It supports simple wildcards. +- **`project_routing` (Optional, string)**: Specifies a subset of projects to target for the search using project +metadata tags in a subset of Lucene query syntax. +Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). +Examples: + _alias:my-project + _alias:_origin + _alias:*pr* +Supported in serverless only. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. + +## client.cat.fielddata [_cat.fielddata] +Get field data cache information. + +Get the amount of heap memory currently used by the field data cache on every data node in the cluster. + +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. +They are not intended for use by applications. For application consumption, use the nodes stats API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-fielddata) + +```ts +client.cat.fielddata({ ... }) +``` + +### Arguments [_arguments_cat.fielddata] + +#### Request (object) [_request_cat.fielddata] +- **`fields` (Optional, string \| string[])**: List of fields used to limit returned information. +To retrieve all fields, omit this parameter. +- **`h` (Optional, Enum("id" \| "host" \| "ip" \| "node" \| "field" \| "size") \| Enum("id" \| "host" \| "ip" \| "node" \| "field" \| "size")[])**: A list of columns names to display. It supports simple wildcards. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. + +## client.cat.health [_cat.health] +Get the cluster health status. + +IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. +They are not intended for use by applications. For application consumption, use the cluster health API. +This API is often used to check malfunctioning clusters. +To help you track cluster health alongside log files and alerting systems, the API returns timestamps in two formats: +`HH:MM:SS`, which is human-readable but includes no date information; +`Unix epoch time`, which is machine-sortable and includes date information. +The latter format is useful for cluster recoveries that take multiple days. +You can use the cat health API to verify cluster health across multiple nodes. +You also can use the API to track the recovery of a large cluster over a longer period of time. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-health) + +```ts +client.cat.health({ ... }) +``` + +### Arguments [_arguments_cat.health] + +#### Request (object) [_request_cat.health] +- **`ts` (Optional, boolean)**: If true, returns `HH:MM:SS` and Unix epoch timestamps. +- **`h` (Optional, Enum("epoch" \| "timestamp" \| "cluster" \| "status" \| "node.total" \| "node.data" \| "shards" \| "pri" \| "relo" \| "init" \| "unassign" \| "unassign.pri" \| "pending_tasks" \| "max_task_wait_time" \| "active_shards_percent") \| Enum("epoch" \| "timestamp" \| "cluster" \| "status" \| "node.total" \| "node.data" \| "shards" \| "pri" \| "relo" \| "init" \| "unassign" \| "unassign.pri" \| "pending_tasks" \| "max_task_wait_time" \| "active_shards_percent")[])**: A list of columns names to display. It supports simple wildcards. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. + +## client.cat.help [_cat.help] +Get CAT help. + +Get help for the CAT APIs. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-cat) + +```ts +client.cat.help() +``` + + +## client.cat.indices [_cat.indices] +Get index information. + +Get high-level information about indices in a cluster, including backing indices for data streams. + +Use this request to get the following information for each index in a cluster: +- shard count +- document count +- deleted document count +- primary store size +- total store size of all shards, including shard replicas + +These metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents. +To get an accurate count of Elasticsearch documents, use the cat count or count APIs. + +CAT APIs are only intended for human consumption using the command line or Kibana console. +They are not intended for use by applications. For application consumption, use an index endpoint. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-indices) + +```ts +client.cat.indices({ ... }) +``` + +### Arguments [_arguments_cat.indices] + +#### Request (object) [_request_cat.indices] +- **`index` (Optional, string \| string[])**: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: The type of index that wildcard patterns can match. +- **`health` (Optional, Enum("green" \| "yellow" \| "red" \| "unknown" \| "unavailable"))**: The health status used to limit returned indices. By default, the response includes indices of any health status. +- **`include_unloaded_segments` (Optional, boolean)**: If true, the response includes information from segments that are not loaded into memory. +- **`pri` (Optional, boolean)**: If true, the response only includes information from primary shards. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +- **`h` (Optional, Enum("health" \| "status" \| "index" \| "uuid" \| "pri" \| "rep" \| "docs.count" \| "docs.deleted" \| "creation.date" \| "creation.date.string" \| "store.size" \| "pri.store.size" \| "dataset.size" \| "completion.size" \| "pri.completion.size" \| "fielddata.memory_size" \| "pri.fielddata.memory_size" \| "fielddata.evictions" \| "pri.fielddata.evictions" \| "query_cache.memory_size" \| "pri.query_cache.memory_size" \| "query_cache.evictions" \| "pri.query_cache.evictions" \| "request_cache.memory_size" \| "pri.request_cache.memory_size" \| "request_cache.evictions" \| "pri.request_cache.evictions" \| "request_cache.hit_count" \| "pri.request_cache.hit_count" \| "request_cache.miss_count" \| "pri.request_cache.miss_count" \| "flush.total" \| "pri.flush.total" \| "flush.total_time" \| "pri.flush.total_time" \| "get.current" \| "pri.get.current" \| "get.time" \| "pri.get.time" \| "get.total" \| "pri.get.total" \| "get.exists_time" \| "pri.get.exists_time" \| "get.exists_total" \| "pri.get.exists_total" \| "get.missing_time" \| "pri.get.missing_time" \| "get.missing_total" \| "pri.get.missing_total" \| "indexing.delete_current" \| "pri.indexing.delete_current" \| "indexing.delete_time" \| "pri.indexing.delete_time" \| "indexing.delete_total" \| "pri.indexing.delete_total" \| "indexing.index_current" \| "pri.indexing.index_current" \| "indexing.index_time" \| "pri.indexing.index_time" \| "indexing.index_total" \| "pri.indexing.index_total" \| "indexing.index_failed" \| "pri.indexing.index_failed" \| "indexing.index_failed_due_to_version_conflict" \| "pri.indexing.index_failed_due_to_version_conflict" \| "merges.current" \| "pri.merges.current" \| "merges.current_docs" \| "pri.merges.current_docs" \| "merges.current_size" \| "pri.merges.current_size" \| "merges.total" \| "pri.merges.total" \| "merges.total_docs" \| "pri.merges.total_docs" \| "merges.total_size" \| "pri.merges.total_size" \| "merges.total_time" \| "pri.merges.total_time" \| "refresh.total" \| "pri.refresh.total" \| "refresh.time" \| "pri.refresh.time" \| "refresh.external_total" \| "pri.refresh.external_total" \| "refresh.external_time" \| "pri.refresh.external_time" \| "refresh.listeners" \| "pri.refresh.listeners" \| "search.fetch_current" \| "pri.search.fetch_current" \| "search.fetch_time" \| "pri.search.fetch_time" \| "search.fetch_total" \| "pri.search.fetch_total" \| "search.open_contexts" \| "pri.search.open_contexts" \| "search.query_current" \| "pri.search.query_current" \| "search.query_time" \| "pri.search.query_time" \| "search.query_total" \| "pri.search.query_total" \| "search.scroll_current" \| "pri.search.scroll_current" \| "search.scroll_time" \| "pri.search.scroll_time" \| "search.scroll_total" \| "pri.search.scroll_total" \| "segments.count" \| "pri.segments.count" \| "segments.memory" \| "pri.segments.memory" \| "segments.index_writer_memory" \| "pri.segments.index_writer_memory" \| "segments.version_map_memory" \| "pri.segments.version_map_memory" \| "segments.fixed_bitset_memory" \| "pri.segments.fixed_bitset_memory" \| "warmer.current" \| "pri.warmer.current" \| "warmer.total" \| "pri.warmer.total" \| "warmer.total_time" \| "pri.warmer.total_time" \| "suggest.current" \| "pri.suggest.current" \| "suggest.time" \| "pri.suggest.time" \| "suggest.total" \| "pri.suggest.total" \| "memory.total" \| "pri.memory.total" \| "bulk.total_operations" \| "pri.bulk.total_operations" \| "bulk.total_time" \| "pri.bulk.total_time" \| "bulk.total_size_in_bytes" \| "pri.bulk.total_size_in_bytes" \| "bulk.avg_time" \| "pri.bulk.avg_time" \| "bulk.avg_size_in_bytes" \| "pri.bulk.avg_size_in_bytes" \| "dense_vector.value_count" \| "pri.dense_vector.value_count" \| "sparse_vector.value_count" \| "pri.sparse_vector.value_count") \| Enum("health" \| "status" \| "index" \| "uuid" \| "pri" \| "rep" \| "docs.count" \| "docs.deleted" \| "creation.date" \| "creation.date.string" \| "store.size" \| "pri.store.size" \| "dataset.size" \| "completion.size" \| "pri.completion.size" \| "fielddata.memory_size" \| "pri.fielddata.memory_size" \| "fielddata.evictions" \| "pri.fielddata.evictions" \| "query_cache.memory_size" \| "pri.query_cache.memory_size" \| "query_cache.evictions" \| "pri.query_cache.evictions" \| "request_cache.memory_size" \| "pri.request_cache.memory_size" \| "request_cache.evictions" \| "pri.request_cache.evictions" \| "request_cache.hit_count" \| "pri.request_cache.hit_count" \| "request_cache.miss_count" \| "pri.request_cache.miss_count" \| "flush.total" \| "pri.flush.total" \| "flush.total_time" \| "pri.flush.total_time" \| "get.current" \| "pri.get.current" \| "get.time" \| "pri.get.time" \| "get.total" \| "pri.get.total" \| "get.exists_time" \| "pri.get.exists_time" \| "get.exists_total" \| "pri.get.exists_total" \| "get.missing_time" \| "pri.get.missing_time" \| "get.missing_total" \| "pri.get.missing_total" \| "indexing.delete_current" \| "pri.indexing.delete_current" \| "indexing.delete_time" \| "pri.indexing.delete_time" \| "indexing.delete_total" \| "pri.indexing.delete_total" \| "indexing.index_current" \| "pri.indexing.index_current" \| "indexing.index_time" \| "pri.indexing.index_time" \| "indexing.index_total" \| "pri.indexing.index_total" \| "indexing.index_failed" \| "pri.indexing.index_failed" \| "indexing.index_failed_due_to_version_conflict" \| "pri.indexing.index_failed_due_to_version_conflict" \| "merges.current" \| "pri.merges.current" \| "merges.current_docs" \| "pri.merges.current_docs" \| "merges.current_size" \| "pri.merges.current_size" \| "merges.total" \| "pri.merges.total" \| "merges.total_docs" \| "pri.merges.total_docs" \| "merges.total_size" \| "pri.merges.total_size" \| "merges.total_time" \| "pri.merges.total_time" \| "refresh.total" \| "pri.refresh.total" \| "refresh.time" \| "pri.refresh.time" \| "refresh.external_total" \| "pri.refresh.external_total" \| "refresh.external_time" \| "pri.refresh.external_time" \| "refresh.listeners" \| "pri.refresh.listeners" \| "search.fetch_current" \| "pri.search.fetch_current" \| "search.fetch_time" \| "pri.search.fetch_time" \| "search.fetch_total" \| "pri.search.fetch_total" \| "search.open_contexts" \| "pri.search.open_contexts" \| "search.query_current" \| "pri.search.query_current" \| "search.query_time" \| "pri.search.query_time" \| "search.query_total" \| "pri.search.query_total" \| "search.scroll_current" \| "pri.search.scroll_current" \| "search.scroll_time" \| "pri.search.scroll_time" \| "search.scroll_total" \| "pri.search.scroll_total" \| "segments.count" \| "pri.segments.count" \| "segments.memory" \| "pri.segments.memory" \| "segments.index_writer_memory" \| "pri.segments.index_writer_memory" \| "segments.version_map_memory" \| "pri.segments.version_map_memory" \| "segments.fixed_bitset_memory" \| "pri.segments.fixed_bitset_memory" \| "warmer.current" \| "pri.warmer.current" \| "warmer.total" \| "pri.warmer.total" \| "warmer.total_time" \| "pri.warmer.total_time" \| "suggest.current" \| "pri.suggest.current" \| "suggest.time" \| "pri.suggest.time" \| "suggest.total" \| "pri.suggest.total" \| "memory.total" \| "pri.memory.total" \| "bulk.total_operations" \| "pri.bulk.total_operations" \| "bulk.total_time" \| "pri.bulk.total_time" \| "bulk.total_size_in_bytes" \| "pri.bulk.total_size_in_bytes" \| "bulk.avg_time" \| "pri.bulk.avg_time" \| "bulk.avg_size_in_bytes" \| "pri.bulk.avg_size_in_bytes" \| "dense_vector.value_count" \| "pri.dense_vector.value_count" \| "sparse_vector.value_count" \| "pri.sparse_vector.value_count")[])**: A list of columns names to display. It supports simple wildcards. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. + +## client.cat.master [_cat.master] +Get master node information. + +Get information about the master node, including the ID, bound IP address, and name. + +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-master) + +```ts +client.cat.master({ ... }) +``` + +### Arguments [_arguments_cat.master] + +#### Request (object) [_request_cat.master] +- **`h` (Optional, Enum("id" \| "host" \| "ip" \| "node") \| Enum("id" \| "host" \| "ip" \| "node")[])**: A list of columns names to display. It supports simple wildcards. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. + +## client.cat.mlDataFrameAnalytics [_cat.ml_data_frame_analytics] +Get data frame analytics jobs. + +Get configuration and usage information about data frame analytics jobs. + +IMPORTANT: CAT APIs are only intended for human consumption using the Kibana +console or command line. They are not intended for use by applications. For +application consumption, use the get data frame analytics jobs statistics API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-data-frame-analytics) + +```ts +client.cat.mlDataFrameAnalytics({ ... }) +``` + +### Arguments [_arguments_cat.ml_data_frame_analytics] + +#### Request (object) [_request_cat.ml_data_frame_analytics] +- **`id` (Optional, string)**: The ID of the data frame analytics to fetch +- **`allow_no_match` (Optional, boolean)**: Whether to ignore if a wildcard expression matches no configs. +(This includes `_all` string or when no configs have been specified.) +- **`h` (Optional, Enum("assignment_explanation" \| "create_time" \| "description" \| "dest_index" \| "failure_reason" \| "id" \| "model_memory_limit" \| "node.address" \| "node.ephemeral_id" \| "node.id" \| "node.name" \| "progress" \| "source_index" \| "state" \| "type" \| "version") \| Enum("assignment_explanation" \| "create_time" \| "description" \| "dest_index" \| "failure_reason" \| "id" \| "model_memory_limit" \| "node.address" \| "node.ephemeral_id" \| "node.id" \| "node.name" \| "progress" \| "source_index" \| "state" \| "type" \| "version")[])**: List of column names to display. +- **`s` (Optional, Enum("assignment_explanation" \| "create_time" \| "description" \| "dest_index" \| "failure_reason" \| "id" \| "model_memory_limit" \| "node.address" \| "node.ephemeral_id" \| "node.id" \| "node.name" \| "progress" \| "source_index" \| "state" \| "type" \| "version") \| Enum("assignment_explanation" \| "create_time" \| "description" \| "dest_index" \| "failure_reason" \| "id" \| "model_memory_limit" \| "node.address" \| "node.ephemeral_id" \| "node.id" \| "node.name" \| "progress" \| "source_index" \| "state" \| "type" \| "version")[])**: List of column names or column aliases used to sort the +response. + +## client.cat.mlDatafeeds [_cat.ml_datafeeds] +Get datafeeds. + +Get configuration and usage information about datafeeds. +This API returns a maximum of 10,000 datafeeds. +If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` +cluster privileges to use this API. + +IMPORTANT: CAT APIs are only intended for human consumption using the Kibana +console or command line. They are not intended for use by applications. For +application consumption, use the get datafeed statistics API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-datafeeds) + +```ts +client.cat.mlDatafeeds({ ... }) +``` + +### Arguments [_arguments_cat.ml_datafeeds] + +#### Request (object) [_request_cat.ml_datafeeds] +- **`datafeed_id` (Optional, string)**: A numerical character string that uniquely identifies the datafeed. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: + +* Contains wildcard expressions and there are no datafeeds that match. +* Contains the `_all` string or no identifiers and there are no matches. +* Contains wildcard expressions and there are only partial matches. + +If `true`, the API returns an empty datafeeds array when there are no matches and the subset of results when +there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only +partial matches. +- **`h` (Optional, Enum("ae" \| "bc" \| "id" \| "na" \| "ne" \| "ni" \| "nn" \| "sba" \| "sc" \| "seah" \| "st" \| "s") \| Enum("ae" \| "bc" \| "id" \| "na" \| "ne" \| "ni" \| "nn" \| "sba" \| "sc" \| "seah" \| "st" \| "s")[])**: List of column names to display. +- **`s` (Optional, Enum("ae" \| "bc" \| "id" \| "na" \| "ne" \| "ni" \| "nn" \| "sba" \| "sc" \| "seah" \| "st" \| "s") \| Enum("ae" \| "bc" \| "id" \| "na" \| "ne" \| "ni" \| "nn" \| "sba" \| "sc" \| "seah" \| "st" \| "s")[])**: List of column names or column aliases used to sort the response. + +## client.cat.mlJobs [_cat.ml_jobs] +Get anomaly detection jobs. + +Get configuration and usage information for anomaly detection jobs. +This API returns a maximum of 10,000 jobs. +If the Elasticsearch security features are enabled, you must have `monitor_ml`, +`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. + +IMPORTANT: CAT APIs are only intended for human consumption using the Kibana +console or command line. They are not intended for use by applications. For +application consumption, use the get anomaly detection job statistics API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-jobs) + +```ts +client.cat.mlJobs({ ... }) +``` + +### Arguments [_arguments_cat.ml_jobs] + +#### Request (object) [_request_cat.ml_jobs] +- **`job_id` (Optional, string)**: Identifier for the anomaly detection job. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: + +* Contains wildcard expressions and there are no jobs that match. +* Contains the `_all` string or no identifiers and there are no matches. +* Contains wildcard expressions and there are only partial matches. + +If `true`, the API returns an empty jobs array when there are no matches and the subset of results when there +are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial +matches. +- **`h` (Optional, Enum("assignment_explanation" \| "buckets.count" \| "buckets.time.exp_avg" \| "buckets.time.exp_avg_hour" \| "buckets.time.max" \| "buckets.time.min" \| "buckets.time.total" \| "data.buckets" \| "data.earliest_record" \| "data.empty_buckets" \| "data.input_bytes" \| "data.input_fields" \| "data.input_records" \| "data.invalid_dates" \| "data.last" \| "data.last_empty_bucket" \| "data.last_sparse_bucket" \| "data.latest_record" \| "data.missing_fields" \| "data.out_of_order_timestamps" \| "data.processed_fields" \| "data.processed_records" \| "data.sparse_buckets" \| "forecasts.memory.avg" \| "forecasts.memory.max" \| "forecasts.memory.min" \| "forecasts.memory.total" \| "forecasts.records.avg" \| "forecasts.records.max" \| "forecasts.records.min" \| "forecasts.records.total" \| "forecasts.time.avg" \| "forecasts.time.max" \| "forecasts.time.min" \| "forecasts.time.total" \| "forecasts.total" \| "id" \| "model.bucket_allocation_failures" \| "model.by_fields" \| "model.bytes" \| "model.bytes_exceeded" \| "model.categorization_status" \| "model.categorized_doc_count" \| "model.dead_category_count" \| "model.failed_category_count" \| "model.frequent_category_count" \| "model.log_time" \| "model.memory_limit" \| "model.memory_status" \| "model.over_fields" \| "model.partition_fields" \| "model.rare_category_count" \| "model.timestamp" \| "model.total_category_count" \| "node.address" \| "node.ephemeral_id" \| "node.id" \| "node.name" \| "opened_time" \| "state") \| Enum("assignment_explanation" \| "buckets.count" \| "buckets.time.exp_avg" \| "buckets.time.exp_avg_hour" \| "buckets.time.max" \| "buckets.time.min" \| "buckets.time.total" \| "data.buckets" \| "data.earliest_record" \| "data.empty_buckets" \| "data.input_bytes" \| "data.input_fields" \| "data.input_records" \| "data.invalid_dates" \| "data.last" \| "data.last_empty_bucket" \| "data.last_sparse_bucket" \| "data.latest_record" \| "data.missing_fields" \| "data.out_of_order_timestamps" \| "data.processed_fields" \| "data.processed_records" \| "data.sparse_buckets" \| "forecasts.memory.avg" \| "forecasts.memory.max" \| "forecasts.memory.min" \| "forecasts.memory.total" \| "forecasts.records.avg" \| "forecasts.records.max" \| "forecasts.records.min" \| "forecasts.records.total" \| "forecasts.time.avg" \| "forecasts.time.max" \| "forecasts.time.min" \| "forecasts.time.total" \| "forecasts.total" \| "id" \| "model.bucket_allocation_failures" \| "model.by_fields" \| "model.bytes" \| "model.bytes_exceeded" \| "model.categorization_status" \| "model.categorized_doc_count" \| "model.dead_category_count" \| "model.failed_category_count" \| "model.frequent_category_count" \| "model.log_time" \| "model.memory_limit" \| "model.memory_status" \| "model.over_fields" \| "model.partition_fields" \| "model.rare_category_count" \| "model.timestamp" \| "model.total_category_count" \| "node.address" \| "node.ephemeral_id" \| "node.id" \| "node.name" \| "opened_time" \| "state")[])**: List of column names to display. +- **`s` (Optional, Enum("assignment_explanation" \| "buckets.count" \| "buckets.time.exp_avg" \| "buckets.time.exp_avg_hour" \| "buckets.time.max" \| "buckets.time.min" \| "buckets.time.total" \| "data.buckets" \| "data.earliest_record" \| "data.empty_buckets" \| "data.input_bytes" \| "data.input_fields" \| "data.input_records" \| "data.invalid_dates" \| "data.last" \| "data.last_empty_bucket" \| "data.last_sparse_bucket" \| "data.latest_record" \| "data.missing_fields" \| "data.out_of_order_timestamps" \| "data.processed_fields" \| "data.processed_records" \| "data.sparse_buckets" \| "forecasts.memory.avg" \| "forecasts.memory.max" \| "forecasts.memory.min" \| "forecasts.memory.total" \| "forecasts.records.avg" \| "forecasts.records.max" \| "forecasts.records.min" \| "forecasts.records.total" \| "forecasts.time.avg" \| "forecasts.time.max" \| "forecasts.time.min" \| "forecasts.time.total" \| "forecasts.total" \| "id" \| "model.bucket_allocation_failures" \| "model.by_fields" \| "model.bytes" \| "model.bytes_exceeded" \| "model.categorization_status" \| "model.categorized_doc_count" \| "model.dead_category_count" \| "model.failed_category_count" \| "model.frequent_category_count" \| "model.log_time" \| "model.memory_limit" \| "model.memory_status" \| "model.over_fields" \| "model.partition_fields" \| "model.rare_category_count" \| "model.timestamp" \| "model.total_category_count" \| "node.address" \| "node.ephemeral_id" \| "node.id" \| "node.name" \| "opened_time" \| "state") \| Enum("assignment_explanation" \| "buckets.count" \| "buckets.time.exp_avg" \| "buckets.time.exp_avg_hour" \| "buckets.time.max" \| "buckets.time.min" \| "buckets.time.total" \| "data.buckets" \| "data.earliest_record" \| "data.empty_buckets" \| "data.input_bytes" \| "data.input_fields" \| "data.input_records" \| "data.invalid_dates" \| "data.last" \| "data.last_empty_bucket" \| "data.last_sparse_bucket" \| "data.latest_record" \| "data.missing_fields" \| "data.out_of_order_timestamps" \| "data.processed_fields" \| "data.processed_records" \| "data.sparse_buckets" \| "forecasts.memory.avg" \| "forecasts.memory.max" \| "forecasts.memory.min" \| "forecasts.memory.total" \| "forecasts.records.avg" \| "forecasts.records.max" \| "forecasts.records.min" \| "forecasts.records.total" \| "forecasts.time.avg" \| "forecasts.time.max" \| "forecasts.time.min" \| "forecasts.time.total" \| "forecasts.total" \| "id" \| "model.bucket_allocation_failures" \| "model.by_fields" \| "model.bytes" \| "model.bytes_exceeded" \| "model.categorization_status" \| "model.categorized_doc_count" \| "model.dead_category_count" \| "model.failed_category_count" \| "model.frequent_category_count" \| "model.log_time" \| "model.memory_limit" \| "model.memory_status" \| "model.over_fields" \| "model.partition_fields" \| "model.rare_category_count" \| "model.timestamp" \| "model.total_category_count" \| "node.address" \| "node.ephemeral_id" \| "node.id" \| "node.name" \| "opened_time" \| "state")[])**: List of column names or column aliases used to sort the response. + +## client.cat.mlTrainedModels [_cat.ml_trained_models] +Get trained models. + +Get configuration and usage information about inference trained models. + +IMPORTANT: CAT APIs are only intended for human consumption using the Kibana +console or command line. They are not intended for use by applications. For +application consumption, use the get trained models statistics API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-trained-models) + +```ts +client.cat.mlTrainedModels({ ... }) +``` + +### Arguments [_arguments_cat.ml_trained_models] + +#### Request (object) [_request_cat.ml_trained_models] +- **`model_id` (Optional, string)**: A unique identifier for the trained model. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: contains wildcard expressions and there are no models that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. +If `true`, the API returns an empty array when there are no matches and the subset of results when there are partial matches. +If `false`, the API returns a 404 status code when there are no matches or only partial matches. +- **`h` (Optional, Enum("create_time" \| "created_by" \| "data_frame_analytics_id" \| "description" \| "heap_size" \| "id" \| "ingest.count" \| "ingest.current" \| "ingest.failed" \| "ingest.pipelines" \| "ingest.time" \| "license" \| "operations" \| "version") \| Enum("create_time" \| "created_by" \| "data_frame_analytics_id" \| "description" \| "heap_size" \| "id" \| "ingest.count" \| "ingest.current" \| "ingest.failed" \| "ingest.pipelines" \| "ingest.time" \| "license" \| "operations" \| "version")[])**: A list of column names to display. +- **`s` (Optional, Enum("create_time" \| "created_by" \| "data_frame_analytics_id" \| "description" \| "heap_size" \| "id" \| "ingest.count" \| "ingest.current" \| "ingest.failed" \| "ingest.pipelines" \| "ingest.time" \| "license" \| "operations" \| "version") \| Enum("create_time" \| "created_by" \| "data_frame_analytics_id" \| "description" \| "heap_size" \| "id" \| "ingest.count" \| "ingest.current" \| "ingest.failed" \| "ingest.pipelines" \| "ingest.time" \| "license" \| "operations" \| "version")[])**: A list of column names or aliases used to sort the response. +- **`from` (Optional, number)**: Skips the specified number of transforms. +- **`size` (Optional, number)**: The maximum number of transforms to display. + +## client.cat.nodeattrs [_cat.nodeattrs] +Get node attribute information. + +Get information about custom node attributes. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodeattrs) + +```ts +client.cat.nodeattrs({ ... }) +``` + +### Arguments [_arguments_cat.nodeattrs] + +#### Request (object) [_request_cat.nodeattrs] +- **`h` (Optional, Enum("node" \| "id" \| "pid" \| "host" \| "ip" \| "port" \| "attr" \| "value") \| Enum("node" \| "id" \| "pid" \| "host" \| "ip" \| "port" \| "attr" \| "value")[])**: A list of columns names to display. It supports simple wildcards. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. + +## client.cat.nodes [_cat.nodes] +Get node information. + +Get information about the nodes in a cluster. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodes) + +```ts +client.cat.nodes({ ... }) +``` + +### Arguments [_arguments_cat.nodes] + +#### Request (object) [_request_cat.nodes] +- **`full_id` (Optional, boolean)**: If `true`, return the full node ID. If `false`, return the shortened node ID. +- **`include_unloaded_segments` (Optional, boolean)**: If true, the response includes information from segments that are not loaded into memory. +- **`h` (Optional, Enum("build" \| "completion.size" \| "cpu" \| "disk.avail" \| "disk.total" \| "disk.used" \| "disk.used_percent" \| "fielddata.evictions" \| "fielddata.memory_size" \| "file_desc.current" \| "file_desc.max" \| "file_desc.percent" \| "flush.total" \| "flush.total_time" \| "get.current" \| "get.exists_time" \| "get.exists_total" \| "get.missing_time" \| "get.missing_total" \| "get.time" \| "get.total" \| "heap.current" \| "heap.max" \| "heap.percent" \| "http_address" \| "id" \| "indexing.delete_current" \| "indexing.delete_time" \| "indexing.delete_total" \| "indexing.index_current" \| "indexing.index_failed" \| "indexing.index_failed_due_to_version_conflict" \| "indexing.index_time" \| "indexing.index_total" \| "ip" \| "jdk" \| "load_1m" \| "load_5m" \| "load_15m" \| "available_processors" \| "mappings.total_count" \| "mappings.total_estimated_overhead_in_bytes" \| "master" \| "merges.current" \| "merges.current_docs" \| "merges.current_size" \| "merges.total" \| "merges.total_docs" \| "merges.total_size" \| "merges.total_time" \| "name" \| "node.role" \| "pid" \| "port" \| "query_cache.memory_size" \| "query_cache.evictions" \| "query_cache.hit_count" \| "query_cache.miss_count" \| "ram.current" \| "ram.max" \| "ram.percent" \| "refresh.total" \| "refresh.time" \| "request_cache.memory_size" \| "request_cache.evictions" \| "request_cache.hit_count" \| "request_cache.miss_count" \| "script.compilations" \| "script.cache_evictions" \| "search.fetch_current" \| "search.fetch_time" \| "search.fetch_total" \| "search.open_contexts" \| "search.query_current" \| "search.query_time" \| "search.query_total" \| "search.scroll_current" \| "search.scroll_time" \| "search.scroll_total" \| "segments.count" \| "segments.fixed_bitset_memory" \| "segments.index_writer_memory" \| "segments.memory" \| "segments.version_map_memory" \| "shard_stats.total_count" \| "suggest.current" \| "suggest.time" \| "suggest.total" \| "uptime" \| "version") \| Enum("build" \| "completion.size" \| "cpu" \| "disk.avail" \| "disk.total" \| "disk.used" \| "disk.used_percent" \| "fielddata.evictions" \| "fielddata.memory_size" \| "file_desc.current" \| "file_desc.max" \| "file_desc.percent" \| "flush.total" \| "flush.total_time" \| "get.current" \| "get.exists_time" \| "get.exists_total" \| "get.missing_time" \| "get.missing_total" \| "get.time" \| "get.total" \| "heap.current" \| "heap.max" \| "heap.percent" \| "http_address" \| "id" \| "indexing.delete_current" \| "indexing.delete_time" \| "indexing.delete_total" \| "indexing.index_current" \| "indexing.index_failed" \| "indexing.index_failed_due_to_version_conflict" \| "indexing.index_time" \| "indexing.index_total" \| "ip" \| "jdk" \| "load_1m" \| "load_5m" \| "load_15m" \| "available_processors" \| "mappings.total_count" \| "mappings.total_estimated_overhead_in_bytes" \| "master" \| "merges.current" \| "merges.current_docs" \| "merges.current_size" \| "merges.total" \| "merges.total_docs" \| "merges.total_size" \| "merges.total_time" \| "name" \| "node.role" \| "pid" \| "port" \| "query_cache.memory_size" \| "query_cache.evictions" \| "query_cache.hit_count" \| "query_cache.miss_count" \| "ram.current" \| "ram.max" \| "ram.percent" \| "refresh.total" \| "refresh.time" \| "request_cache.memory_size" \| "request_cache.evictions" \| "request_cache.hit_count" \| "request_cache.miss_count" \| "script.compilations" \| "script.cache_evictions" \| "search.fetch_current" \| "search.fetch_time" \| "search.fetch_total" \| "search.open_contexts" \| "search.query_current" \| "search.query_time" \| "search.query_total" \| "search.scroll_current" \| "search.scroll_time" \| "search.scroll_total" \| "segments.count" \| "segments.fixed_bitset_memory" \| "segments.index_writer_memory" \| "segments.memory" \| "segments.version_map_memory" \| "shard_stats.total_count" \| "suggest.current" \| "suggest.time" \| "suggest.total" \| "uptime" \| "version")[])**: A list of columns names to display. +It supports simple wildcards. +- **`s` (Optional, string \| string[])**: A list of column names or aliases that determines the sort order. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. + +## client.cat.pendingTasks [_cat.pending_tasks] +Get pending task information. + +Get information about cluster-level changes that have not yet taken effect. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the pending cluster tasks API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-pending-tasks) + +```ts +client.cat.pendingTasks({ ... }) +``` + +### Arguments [_arguments_cat.pending_tasks] + +#### Request (object) [_request_cat.pending_tasks] +- **`h` (Optional, Enum("insertOrder" \| "timeInQueue" \| "priority" \| "source") \| Enum("insertOrder" \| "timeInQueue" \| "priority" \| "source")[])**: A list of columns names to display. It supports simple wildcards. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. + +## client.cat.plugins [_cat.plugins] +Get plugin information. + +Get a list of plugins running on each node of a cluster. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-plugins) + +```ts +client.cat.plugins({ ... }) +``` + +### Arguments [_arguments_cat.plugins] + +#### Request (object) [_request_cat.plugins] +- **`h` (Optional, Enum("id" \| "name" \| "component" \| "version" \| "description") \| Enum("id" \| "name" \| "component" \| "version" \| "description")[])**: A list of columns names to display. It supports simple wildcards. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`include_bootstrap` (Optional, boolean)**: Include bootstrap plugins in the response +- **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. + +## client.cat.recovery [_cat.recovery] +Get shard recovery information. + +Get information about ongoing and completed shard recoveries. +Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or syncing a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing. +For data streams, the API returns information about the stream’s backing indices. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index recovery API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-recovery) + +```ts +client.cat.recovery({ ... }) +``` + +### Arguments [_arguments_cat.recovery] + +#### Request (object) [_request_cat.recovery] +- **`index` (Optional, string \| string[])**: A list of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`active_only` (Optional, boolean)**: If `true`, the response only includes ongoing shard recoveries. +- **`detailed` (Optional, boolean)**: If `true`, the response includes detailed information about shard recoveries. +- **`h` (Optional, Enum("index" \| "shard" \| "start_time" \| "start_time_millis" \| "stop_time" \| "stop_time_millis" \| "time" \| "type" \| "stage" \| "source_host" \| "source_node" \| "target_host" \| "target_node" \| "repository" \| "snapshot" \| "files" \| "files_recovered" \| "files_percent" \| "files_total" \| "bytes" \| "bytes_recovered" \| "bytes_percent" \| "bytes_total" \| "translog_ops" \| "translog_ops_recovered" \| "translog_ops_percent") \| Enum("index" \| "shard" \| "start_time" \| "start_time_millis" \| "stop_time" \| "stop_time_millis" \| "time" \| "type" \| "stage" \| "source_host" \| "source_node" \| "target_host" \| "target_node" \| "repository" \| "snapshot" \| "files" \| "files_recovered" \| "files_percent" \| "files_total" \| "bytes" \| "bytes_recovered" \| "bytes_percent" \| "bytes_total" \| "translog_ops" \| "translog_ops_recovered" \| "translog_ops_percent")[])**: A list of columns names to display. +It supports simple wildcards. +- **`s` (Optional, string \| string[])**: A list of column names or aliases that determines the sort order. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. + +## client.cat.repositories [_cat.repositories] +Get snapshot repository information. + +Get a list of snapshot repositories for a cluster. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot repository API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-repositories) + +```ts +client.cat.repositories({ ... }) +``` + +### Arguments [_arguments_cat.repositories] + +#### Request (object) [_request_cat.repositories] +- **`h` (Optional, string \| string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. + +## client.cat.segments [_cat.segments] +Get segment information. + +Get low-level information about the Lucene segments in index shards. +For data streams, the API returns information about the backing indices. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-segments) + +```ts +client.cat.segments({ ... }) +``` + +### Arguments [_arguments_cat.segments] + +#### Request (object) [_request_cat.segments] +- **`index` (Optional, string \| string[])**: A list of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`h` (Optional, Enum("index" \| "shard" \| "prirep" \| "ip" \| "segment" \| "generation" \| "docs.count" \| "docs.deleted" \| "size" \| "size.memory" \| "committed" \| "searchable" \| "version" \| "compound" \| "id") \| Enum("index" \| "shard" \| "prirep" \| "ip" \| "segment" \| "generation" \| "docs.count" \| "docs.deleted" \| "size" \| "size.memory" \| "committed" \| "searchable" \| "version" \| "compound" \| "id")[])**: A list of columns names to display. +It supports simple wildcards. +- **`s` (Optional, string \| string[])**: A list of column names or aliases that determines the sort order. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard expressions can match. If the request can target data streams, this argument +determines whether wildcard expressions match hidden data streams. Supports a list of values, +such as open,hidden. +- **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only +missing or closed indices. This behavior applies even if the request targets other open indices. For example, +a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. +- **`ignore_throttled` (Optional, boolean)**: If true, concrete, expanded or aliased indices are ignored when frozen. +- **`ignore_unavailable` (Optional, boolean)**: If true, missing or closed indices are not included in the response. +- **`allow_closed` (Optional, boolean)**: If true, allow closed indices to be returned in the response otherwise if false, keep the legacy behaviour +of throwing an exception if index pattern matches closed indices + +## client.cat.shards [_cat.shards] +Get shard information. + +Get information about the shards in a cluster. +For data streams, the API returns information about the backing indices. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-shards) + +```ts +client.cat.shards({ ... }) +``` + +### Arguments [_arguments_cat.shards] + +#### Request (object) [_request_cat.shards] +- **`index` (Optional, string \| string[])**: A list of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`h` (Optional, Enum("completion.size" \| "dataset.size" \| "dense_vector.value_count" \| "docs" \| "fielddata.evictions" \| "fielddata.memory_size" \| "flush.total" \| "flush.total_time" \| "get.current" \| "get.exists_time" \| "get.exists_total" \| "get.missing_time" \| "get.missing_total" \| "get.time" \| "get.total" \| "id" \| "index" \| "indexing.delete_current" \| "indexing.delete_time" \| "indexing.delete_total" \| "indexing.index_current" \| "indexing.index_failed_due_to_version_conflict" \| "indexing.index_failed" \| "indexing.index_time" \| "indexing.index_total" \| "ip" \| "merges.current" \| "merges.current_docs" \| "merges.current_size" \| "merges.total" \| "merges.total_docs" \| "merges.total_size" \| "merges.total_time" \| "node" \| "prirep" \| "query_cache.evictions" \| "query_cache.memory_size" \| "recoverysource.type" \| "refresh.time" \| "refresh.total" \| "search.fetch_current" \| "search.fetch_time" \| "search.fetch_total" \| "search.open_contexts" \| "search.query_current" \| "search.query_time" \| "search.query_total" \| "search.scroll_current" \| "search.scroll_time" \| "search.scroll_total" \| "segments.count" \| "segments.fixed_bitset_memory" \| "segments.index_writer_memory" \| "segments.memory" \| "segments.version_map_memory" \| "seq_no.global_checkpoint" \| "seq_no.local_checkpoint" \| "seq_no.max" \| "shard" \| "dsparse_vector.value_count" \| "state" \| "store" \| "suggest.current" \| "suggest.time" \| "suggest.total" \| "sync_id" \| "unassigned.at" \| "unassigned.details" \| "unassigned.for" \| "unassigned.reason") \| Enum("completion.size" \| "dataset.size" \| "dense_vector.value_count" \| "docs" \| "fielddata.evictions" \| "fielddata.memory_size" \| "flush.total" \| "flush.total_time" \| "get.current" \| "get.exists_time" \| "get.exists_total" \| "get.missing_time" \| "get.missing_total" \| "get.time" \| "get.total" \| "id" \| "index" \| "indexing.delete_current" \| "indexing.delete_time" \| "indexing.delete_total" \| "indexing.index_current" \| "indexing.index_failed_due_to_version_conflict" \| "indexing.index_failed" \| "indexing.index_time" \| "indexing.index_total" \| "ip" \| "merges.current" \| "merges.current_docs" \| "merges.current_size" \| "merges.total" \| "merges.total_docs" \| "merges.total_size" \| "merges.total_time" \| "node" \| "prirep" \| "query_cache.evictions" \| "query_cache.memory_size" \| "recoverysource.type" \| "refresh.time" \| "refresh.total" \| "search.fetch_current" \| "search.fetch_time" \| "search.fetch_total" \| "search.open_contexts" \| "search.query_current" \| "search.query_time" \| "search.query_total" \| "search.scroll_current" \| "search.scroll_time" \| "search.scroll_total" \| "segments.count" \| "segments.fixed_bitset_memory" \| "segments.index_writer_memory" \| "segments.memory" \| "segments.version_map_memory" \| "seq_no.global_checkpoint" \| "seq_no.local_checkpoint" \| "seq_no.max" \| "shard" \| "dsparse_vector.value_count" \| "state" \| "store" \| "suggest.current" \| "suggest.time" \| "suggest.total" \| "sync_id" \| "unassigned.at" \| "unassigned.details" \| "unassigned.for" \| "unassigned.reason")[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string \| string[])**: A list of column names or aliases that determines the sort order. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. + +## client.cat.snapshots [_cat.snapshots] +Get snapshot information. + +Get information about the snapshots stored in one or more repositories. +A snapshot is a backup of an index or running Elasticsearch cluster. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-snapshots) + +```ts +client.cat.snapshots({ ... }) +``` + +### Arguments [_arguments_cat.snapshots] + +#### Request (object) [_request_cat.snapshots] +- **`repository` (Optional, string \| string[])**: A list of snapshot repositories used to limit the request. +Accepts wildcard expressions. +`_all` returns all repositories. +If any repository fails during the request, Elasticsearch returns an error. +- **`ignore_unavailable` (Optional, boolean)**: If `true`, the response does not include information from unavailable snapshots. +- **`h` (Optional, Enum("id" \| "repository" \| "status" \| "start_epoch" \| "start_time" \| "end_epoch" \| "end_time" \| "duration" \| "indices" \| "successful_shards" \| "failed_shards" \| "total_shards" \| "reason") \| Enum("id" \| "repository" \| "status" \| "start_epoch" \| "start_time" \| "end_epoch" \| "end_time" \| "duration" \| "indices" \| "successful_shards" \| "failed_shards" \| "total_shards" \| "reason")[])**: A list of columns names to display. +It supports simple wildcards. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. + +## client.cat.tasks [_cat.tasks] +Get task information. + +Get information about tasks currently running in the cluster. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the task management API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-tasks) + +```ts +client.cat.tasks({ ... }) +``` + +### Arguments [_arguments_cat.tasks] + +#### Request (object) [_request_cat.tasks] +- **`actions` (Optional, string[])**: The task action names, which are used to limit the response. +- **`detailed` (Optional, boolean)**: If `true`, the response includes detailed information about shard recoveries. +- **`nodes` (Optional, string[])**: Unique node identifiers, which are used to limit the response. +- **`parent_task_id` (Optional, string)**: The parent task identifier, which is used to limit the response. +- **`h` (Optional, Enum("id" \| "action" \| "task_id" \| "parent_task_id" \| "type" \| "start_time" \| "timestamp" \| "running_time_ns" \| "running_time" \| "node_id" \| "ip" \| "port" \| "node" \| "version" \| "x_opaque_id") \| Enum("id" \| "action" \| "task_id" \| "parent_task_id" \| "type" \| "start_time" \| "timestamp" \| "running_time_ns" \| "running_time" \| "node_id" \| "ip" \| "port" \| "node" \| "version" \| "x_opaque_id")[])**: A list of columns names to display. It supports simple wildcards. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +- **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the task has completed. + +## client.cat.templates [_cat.templates] +Get index template information. + +Get information about the index templates in a cluster. +You can use index templates to apply index settings and field mappings to new indices at creation. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index template API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-templates) + +```ts +client.cat.templates({ ... }) +``` + +### Arguments [_arguments_cat.templates] + +#### Request (object) [_request_cat.templates] +- **`name` (Optional, string)**: The name of the template to return. +Accepts wildcard expressions. If omitted, all templates are returned. +- **`h` (Optional, Enum("name" \| "index_patterns" \| "order" \| "version" \| "composed_of") \| Enum("name" \| "index_patterns" \| "order" \| "version" \| "composed_of")[])**: A list of columns names to display. It supports simple wildcards. +- **`s` (Optional, string \| string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. + +## client.cat.threadPool [_cat.thread_pool] +Get thread pool statistics. + +Get thread pool statistics for each node in a cluster. +Returned information includes all built-in thread pools and custom thread pools. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-thread-pool) + +```ts +client.cat.threadPool({ ... }) +``` + +### Arguments [_arguments_cat.thread_pool] + +#### Request (object) [_request_cat.thread_pool] +- **`thread_pool_patterns` (Optional, string \| string[])**: A list of thread pool names used to limit the request. +Accepts wildcard expressions. +- **`h` (Optional, Enum("active" \| "completed" \| "core" \| "ephemeral_id" \| "host" \| "ip" \| "keep_alive" \| "largest" \| "max" \| "name" \| "node_id" \| "node_name" \| "pid" \| "pool_size" \| "port" \| "queue" \| "queue_size" \| "rejected" \| "size" \| "type") \| Enum("active" \| "completed" \| "core" \| "ephemeral_id" \| "host" \| "ip" \| "keep_alive" \| "largest" \| "max" \| "name" \| "node_id" \| "node_name" \| "pid" \| "pool_size" \| "port" \| "queue" \| "queue_size" \| "rejected" \| "size" \| "type")[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string \| string[])**: A list of column names or aliases that determines the sort order. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. + +## client.cat.transforms [_cat.transforms] +Get transform information. + +Get configuration and usage information about transforms. + +CAT APIs are only intended for human consumption using the Kibana +console or command line. They are not intended for use by applications. For +application consumption, use the get transform statistics API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-transforms) + +```ts +client.cat.transforms({ ... }) +``` + +### Arguments [_arguments_cat.transforms] + +#### Request (object) [_request_cat.transforms] +- **`transform_id` (Optional, string)**: A transform identifier or a wildcard expression. +If you do not specify one of these options, the API returns information for all transforms. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. +If `true`, it returns an empty transforms array when there are no matches and the subset of results when there are partial matches. +If `false`, the request returns a 404 status code when there are no matches or only partial matches. +- **`from` (Optional, number)**: Skips the specified number of transforms. +- **`h` (Optional, Enum("changes_last_detection_time" \| "checkpoint" \| "checkpoint_duration_time_exp_avg" \| "checkpoint_progress" \| "create_time" \| "delete_time" \| "description" \| "dest_index" \| "documents_deleted" \| "documents_indexed" \| "docs_per_second" \| "documents_processed" \| "frequency" \| "id" \| "index_failure" \| "index_time" \| "index_total" \| "indexed_documents_exp_avg" \| "last_search_time" \| "max_page_search_size" \| "pages_processed" \| "pipeline" \| "processed_documents_exp_avg" \| "processing_time" \| "reason" \| "search_failure" \| "search_time" \| "search_total" \| "source_index" \| "state" \| "transform_type" \| "trigger_count" \| "version") \| Enum("changes_last_detection_time" \| "checkpoint" \| "checkpoint_duration_time_exp_avg" \| "checkpoint_progress" \| "create_time" \| "delete_time" \| "description" \| "dest_index" \| "documents_deleted" \| "documents_indexed" \| "docs_per_second" \| "documents_processed" \| "frequency" \| "id" \| "index_failure" \| "index_time" \| "index_total" \| "indexed_documents_exp_avg" \| "last_search_time" \| "max_page_search_size" \| "pages_processed" \| "pipeline" \| "processed_documents_exp_avg" \| "processing_time" \| "reason" \| "search_failure" \| "search_time" \| "search_total" \| "source_index" \| "state" \| "transform_type" \| "trigger_count" \| "version")[])**: List of column names to display. +- **`s` (Optional, Enum("changes_last_detection_time" \| "checkpoint" \| "checkpoint_duration_time_exp_avg" \| "checkpoint_progress" \| "create_time" \| "delete_time" \| "description" \| "dest_index" \| "documents_deleted" \| "documents_indexed" \| "docs_per_second" \| "documents_processed" \| "frequency" \| "id" \| "index_failure" \| "index_time" \| "index_total" \| "indexed_documents_exp_avg" \| "last_search_time" \| "max_page_search_size" \| "pages_processed" \| "pipeline" \| "processed_documents_exp_avg" \| "processing_time" \| "reason" \| "search_failure" \| "search_time" \| "search_total" \| "source_index" \| "state" \| "transform_type" \| "trigger_count" \| "version") \| Enum("changes_last_detection_time" \| "checkpoint" \| "checkpoint_duration_time_exp_avg" \| "checkpoint_progress" \| "create_time" \| "delete_time" \| "description" \| "dest_index" \| "documents_deleted" \| "documents_indexed" \| "docs_per_second" \| "documents_processed" \| "frequency" \| "id" \| "index_failure" \| "index_time" \| "index_total" \| "indexed_documents_exp_avg" \| "last_search_time" \| "max_page_search_size" \| "pages_processed" \| "pipeline" \| "processed_documents_exp_avg" \| "processing_time" \| "reason" \| "search_failure" \| "search_time" \| "search_total" \| "source_index" \| "state" \| "transform_type" \| "trigger_count" \| "version")[])**: List of column names or column aliases used to sort the response. +- **`size` (Optional, number)**: The maximum number of transforms to obtain. + +## client.ccr.deleteAutoFollowPattern [_ccr.delete_auto_follow_pattern] +Delete auto-follow patterns. + +Delete a collection of cross-cluster replication auto-follow patterns. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-delete-auto-follow-pattern) + +```ts +client.ccr.deleteAutoFollowPattern({ name }) +``` + +### Arguments [_arguments_ccr.delete_auto_follow_pattern] + +#### Request (object) [_request_ccr.delete_auto_follow_pattern] +- **`name` (string)**: The auto-follow pattern collection to delete. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. + +## client.ccr.follow [_ccr.follow] +Create a follower. +Create a cross-cluster replication follower index that follows a specific leader index. +When the API returns, the follower index exists and cross-cluster replication starts replicating operations from the leader index to the follower index. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow) + +```ts +client.ccr.follow({ index, leader_index, remote_cluster }) +``` + +### Arguments [_arguments_ccr.follow] + +#### Request (object) [_request_ccr.follow] +- **`index` (string)**: The name of the follower index. +- **`leader_index` (string)**: The name of the index in the leader cluster to follow. +- **`remote_cluster` (string)**: The remote cluster containing the leader index. +- **`data_stream_name` (Optional, string)**: If the leader index is part of a data stream, the name to which the local data stream for the followed index should be renamed. +- **`max_outstanding_read_requests` (Optional, number)**: The maximum number of outstanding reads requests from the remote cluster. +- **`max_outstanding_write_requests` (Optional, number)**: The maximum number of outstanding write requests on the follower. +- **`max_read_request_operation_count` (Optional, number)**: The maximum number of operations to pull per read from the remote cluster. +- **`max_read_request_size` (Optional, number \| string)**: The maximum size in bytes of per read of a batch of operations pulled from the remote cluster. +- **`max_retry_delay` (Optional, string \| -1 \| 0)**: The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when +retrying. +- **`max_write_buffer_count` (Optional, number)**: The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be +deferred until the number of queued operations goes below the limit. +- **`max_write_buffer_size` (Optional, number \| string)**: The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will +be deferred until the total bytes of queued operations goes below the limit. +- **`max_write_request_operation_count` (Optional, number)**: The maximum number of operations per bulk write request executed on the follower. +- **`max_write_request_size` (Optional, number \| string)**: The maximum total bytes of operations per bulk write request executed on the follower. +- **`read_poll_timeout` (Optional, string \| -1 \| 0)**: The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. +When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. +Then the follower will immediately attempt to read from the leader again. +- **`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })**: Settings to override from the leader index. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: Specifies the number of shards to wait on being active before responding. This defaults to waiting on none of the shards to be +active. +A shard must be restored from the leader index before being active. Restoring a follower shard requires transferring all the +remote Lucene segment files to the follower index. + +## client.ccr.followInfo [_ccr.follow_info] +Get follower information. + +Get information about all cross-cluster replication follower indices. +For example, the results include follower index names, leader index names, replication options, and whether the follower indices are active or paused. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow-info) + +```ts +client.ccr.followInfo({ index }) +``` + +### Arguments [_arguments_ccr.follow_info] + +#### Request (object) [_request_ccr.follow_info] +- **`index` (string \| string[])**: A comma-delimited list of follower index patterns. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. + +## client.ccr.followStats [_ccr.follow_stats] +Get follower stats. + +Get cross-cluster replication follower stats. +The API returns shard-level stats about the "following tasks" associated with each shard for the specified indices. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow-stats) + +```ts +client.ccr.followStats({ index }) +``` + +### Arguments [_arguments_ccr.follow_stats] + +#### Request (object) [_request_ccr.follow_stats] +- **`index` (string \| string[])**: A comma-delimited list of index patterns. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.ccr.forgetFollower [_ccr.forget_follower] +Forget a follower. +Remove the cross-cluster replication follower retention leases from the leader. + +A following index takes out retention leases on its leader index. +These leases are used to increase the likelihood that the shards of the leader index retain the history of operations that the shards of the following index need to run replication. +When a follower index is converted to a regular index by the unfollow API (either by directly calling the API or by index lifecycle management tasks), these leases are removed. +However, removal of the leases can fail, for example when the remote cluster containing the leader index is unavailable. +While the leases will eventually expire on their own, their extended existence can cause the leader index to hold more history than necessary and prevent index lifecycle management from performing some operations on the leader index. +This API exists to enable manually removing the leases when the unfollow API is unable to do so. + +NOTE: This API does not stop replication by a following index. If you use this API with a follower index that is still actively following, the following index will add back retention leases on the leader. +The only purpose of this API is to handle the case of failure to remove the following retention leases after the unfollow API is invoked. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-forget-follower) + +```ts +client.ccr.forgetFollower({ index }) +``` + +### Arguments [_arguments_ccr.forget_follower] + +#### Request (object) [_request_ccr.forget_follower] +- **`index` (string)**: the name of the leader index for which specified follower retention leases should be removed +- **`follower_cluster` (Optional, string)** +- **`follower_index` (Optional, string)** +- **`follower_index_uuid` (Optional, string)** +- **`leader_remote_cluster` (Optional, string)** +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.ccr.getAutoFollowPattern [_ccr.get_auto_follow_pattern] +Get auto-follow patterns. + +Get cross-cluster replication auto-follow patterns. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-get-auto-follow-pattern-1) + +```ts +client.ccr.getAutoFollowPattern({ ... }) +``` + +### Arguments [_arguments_ccr.get_auto_follow_pattern] + +#### Request (object) [_request_ccr.get_auto_follow_pattern] +- **`name` (Optional, string)**: The auto-follow pattern collection that you want to retrieve. +If you do not specify a name, the API returns information for all collections. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. + +## client.ccr.pauseAutoFollowPattern [_ccr.pause_auto_follow_pattern] +Pause an auto-follow pattern. + +Pause a cross-cluster replication auto-follow pattern. +When the API returns, the auto-follow pattern is inactive. +New indices that are created on the remote cluster and match the auto-follow patterns are ignored. + +You can resume auto-following with the resume auto-follow pattern API. +When it resumes, the auto-follow pattern is active again and automatically configures follower indices for newly created indices on the remote cluster that match its patterns. +Remote indices that were created while the pattern was paused will also be followed, unless they have been deleted or closed in the interim. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-pause-auto-follow-pattern) + +```ts +client.ccr.pauseAutoFollowPattern({ name }) +``` + +### Arguments [_arguments_ccr.pause_auto_follow_pattern] + +#### Request (object) [_request_ccr.pause_auto_follow_pattern] +- **`name` (string)**: The name of the auto-follow pattern to pause. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. + +## client.ccr.pauseFollow [_ccr.pause_follow] +Pause a follower. + +Pause a cross-cluster replication follower index. +The follower index will not fetch any additional operations from the leader index. +You can resume following with the resume follower API. +You can pause and resume a follower index to change the configuration of the following task. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-pause-follow) + +```ts +client.ccr.pauseFollow({ index }) +``` + +### Arguments [_arguments_ccr.pause_follow] + +#### Request (object) [_request_ccr.pause_follow] +- **`index` (string)**: The name of the follower index. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. + +## client.ccr.putAutoFollowPattern [_ccr.put_auto_follow_pattern] +Create or update auto-follow patterns. +Create a collection of cross-cluster replication auto-follow patterns for a remote cluster. +Newly created indices on the remote cluster that match any of the patterns are automatically configured as follower indices. +Indices on the remote cluster that were created before the auto-follow pattern was created will not be auto-followed even if they match the pattern. + +This API can also be used to update auto-follow patterns. +NOTE: Follower indices that were configured automatically before updating an auto-follow pattern will remain unchanged even if they do not match against the new patterns. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-put-auto-follow-pattern) + +```ts +client.ccr.putAutoFollowPattern({ name, remote_cluster }) +``` + +### Arguments [_arguments_ccr.put_auto_follow_pattern] + +#### Request (object) [_request_ccr.put_auto_follow_pattern] +- **`name` (string)**: The name of the collection of auto-follow patterns. +- **`remote_cluster` (string)**: The remote cluster containing the leader indices to match against. +- **`follow_index_pattern` (Optional, string)**: The name of follower index. The template `leader_index` can be used to derive the name of the follower index from the name of the leader index. When following a data stream, use `leader_index`; CCR does not support changes to the names of a follower data stream’s backing indices. +- **`leader_index_patterns` (Optional, string[])**: An array of simple index patterns to match against indices in the remote cluster specified by the remote_cluster field. +- **`leader_index_exclusion_patterns` (Optional, string[])**: An array of simple index patterns that can be used to exclude indices from being auto-followed. Indices in the remote cluster whose names are matching one or more leader_index_patterns and one or more leader_index_exclusion_patterns won’t be followed. +- **`max_outstanding_read_requests` (Optional, number)**: The maximum number of outstanding reads requests from the remote cluster. +- **`settings` (Optional, Record)**: Settings to override from the leader index. Note that certain settings can not be overrode (e.g., index.number_of_shards). +- **`max_outstanding_write_requests` (Optional, number)**: The maximum number of outstanding reads requests from the remote cluster. +- **`read_poll_timeout` (Optional, string \| -1 \| 0)**: The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. Then the follower will immediately attempt to read from the leader again. +- **`max_read_request_operation_count` (Optional, number)**: The maximum number of operations to pull per read from the remote cluster. +- **`max_read_request_size` (Optional, number \| string)**: The maximum size in bytes of per read of a batch of operations pulled from the remote cluster. +- **`max_retry_delay` (Optional, string \| -1 \| 0)**: The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when retrying. +- **`max_write_buffer_count` (Optional, number)**: The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the number of queued operations goes below the limit. +- **`max_write_buffer_size` (Optional, number \| string)**: The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the total bytes of queued operations goes below the limit. +- **`max_write_request_operation_count` (Optional, number)**: The maximum number of operations per bulk write request executed on the follower. +- **`max_write_request_size` (Optional, number \| string)**: The maximum total bytes of operations per bulk write request executed on the follower. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. + +## client.ccr.resumeAutoFollowPattern [_ccr.resume_auto_follow_pattern] +Resume an auto-follow pattern. + +Resume a cross-cluster replication auto-follow pattern that was paused. +The auto-follow pattern will resume configuring following indices for newly created indices that match its patterns on the remote cluster. +Remote indices created while the pattern was paused will also be followed unless they have been deleted or closed in the interim. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-resume-auto-follow-pattern) + +```ts +client.ccr.resumeAutoFollowPattern({ name }) +``` + +### Arguments [_arguments_ccr.resume_auto_follow_pattern] + +#### Request (object) [_request_ccr.resume_auto_follow_pattern] +- **`name` (string)**: The name of the auto-follow pattern to resume. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. + +## client.ccr.resumeFollow [_ccr.resume_follow] +Resume a follower. +Resume a cross-cluster replication follower index that was paused. +The follower index could have been paused with the pause follower API. +Alternatively it could be paused due to replication that cannot be retried due to failures during following tasks. +When this API returns, the follower index will resume fetching operations from the leader index. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-resume-follow) + +```ts +client.ccr.resumeFollow({ index }) +``` + +### Arguments [_arguments_ccr.resume_follow] + +#### Request (object) [_request_ccr.resume_follow] +- **`index` (string)**: The name of the follow index to resume following. +- **`max_outstanding_read_requests` (Optional, number)** +- **`max_outstanding_write_requests` (Optional, number)** +- **`max_read_request_operation_count` (Optional, number)** +- **`max_read_request_size` (Optional, string)** +- **`max_retry_delay` (Optional, string \| -1 \| 0)** +- **`max_write_buffer_count` (Optional, number)** +- **`max_write_buffer_size` (Optional, string)** +- **`max_write_request_operation_count` (Optional, number)** +- **`max_write_request_size` (Optional, string)** +- **`read_poll_timeout` (Optional, string \| -1 \| 0)** +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. + +## client.ccr.stats [_ccr.stats] +Get cross-cluster replication stats. + +This API returns stats about auto-following and the same shard-level stats as the get follower stats API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-stats) + +```ts +client.ccr.stats({ ... }) +``` + +### Arguments [_arguments_ccr.stats] + +#### Request (object) [_request_ccr.stats] +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.ccr.unfollow [_ccr.unfollow] +Unfollow an index. + +Convert a cross-cluster replication follower index to a regular index. +The API stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. +The follower index must be paused and closed before you call the unfollow API. + +> info +> Currently cross-cluster replication does not support converting an existing regular index to a follower index. Converting a follower index to a regular index is an irreversible operation. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-unfollow) + +```ts +client.ccr.unfollow({ index }) +``` + +### Arguments [_arguments_ccr.unfollow] + +#### Request (object) [_request_ccr.unfollow] +- **`index` (string)**: The name of the follower index. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. + +## client.cluster.allocationExplain [_cluster.allocation_explain] +Explain the shard allocations. +Get explanations for shard allocations in the cluster. +This API accepts the current_node, index, primary and shard parameters in the request body or in query parameters, but not in both at the same time. +For unassigned shards, it provides an explanation for why the shard is unassigned. +For assigned shards, it provides an explanation for why the shard is remaining on its current node and has not moved or rebalanced to another node. +This API can be very useful when attempting to diagnose why a shard is unassigned or why a shard continues to remain on its current node when you might expect otherwise. +Refer to the linked documentation for examples of how to troubleshoot allocation issues using this API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-allocation-explain) + +```ts +client.cluster.allocationExplain({ ... }) +``` + +### Arguments [_arguments_cluster.allocation_explain] + +#### Request (object) [_request_cluster.allocation_explain] +- **`index` (Optional, string)**: The name of the index that you would like an explanation for. +- **`shard` (Optional, number)**: An identifier for the shard that you would like an explanation for. +- **`primary` (Optional, boolean)**: If true, returns an explanation for the primary shard for the specified shard ID. +- **`current_node` (Optional, string)**: Explain a shard only if it is currently located on the specified node name or node ID. +- **`include_disk_info` (Optional, boolean)**: If true, returns information about disk usage and shard sizes. +- **`include_yes_decisions` (Optional, boolean)**: If true, returns YES decisions in explanation. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. + +## client.cluster.deleteComponentTemplate [_cluster.delete_component_template] +Delete component templates. +Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template) + +```ts +client.cluster.deleteComponentTemplate({ name }) +``` + +### Arguments [_arguments_cluster.delete_component_template] + +#### Request (object) [_request_cluster.delete_component_template] +- **`name` (string \| string[])**: List or wildcard expression of component template names used to limit the request. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.cluster.deleteVotingConfigExclusions [_cluster.delete_voting_config_exclusions] +Clear cluster voting config exclusions. +Remove master-eligible nodes from the voting configuration exclusion list. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-post-voting-config-exclusions) + +```ts +client.cluster.deleteVotingConfigExclusions({ ... }) +``` + +### Arguments [_arguments_cluster.delete_voting_config_exclusions] + +#### Request (object) [_request_cluster.delete_voting_config_exclusions] +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +- **`wait_for_removal` (Optional, boolean)**: Specifies whether to wait for all excluded nodes to be removed from the +cluster before clearing the voting configuration exclusions list. +Defaults to true, meaning that all excluded nodes must be removed from +the cluster before this API takes any action. If set to false then the +voting configuration exclusions list is cleared even if some excluded +nodes are still in the cluster. + +## client.cluster.existsComponentTemplate [_cluster.exists_component_template] +Check component templates. +Returns information about whether a particular component template exists. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template) + +```ts +client.cluster.existsComponentTemplate({ name }) +``` + +### Arguments [_arguments_cluster.exists_component_template] + +#### Request (object) [_request_cluster.exists_component_template] +- **`name` (string \| string[])**: List of component template names used to limit the request. +Wildcard (*) expressions are supported. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is +received before the timeout expires, the request fails and returns an +error. +- **`local` (Optional, boolean)**: If true, the request retrieves information from the local node only. +Defaults to false, which means information is retrieved from the master node. + +## client.cluster.getComponentTemplate [_cluster.get_component_template] +Get component templates. +Get information about component templates. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template) + +```ts +client.cluster.getComponentTemplate({ ... }) +``` + +### Arguments [_arguments_cluster.get_component_template] + +#### Request (object) [_request_cluster.get_component_template] +- **`name` (Optional, string)**: List of component template names used to limit the request. +Wildcard (`*`) expressions are supported. +- **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. +- **`settings_filter` (Optional, string \| string[])**: Filter out results, for example to filter out sensitive information. Supports wildcards or full settings keys +- **`include_defaults` (Optional, boolean)**: Return all default configurations for the component template (default: false) +- **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. +If `false`, information is retrieved from the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.cluster.getSettings [_cluster.get_settings] +Get cluster-wide settings. + +By default, it returns only settings that have been explicitly defined. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-get-settings) + +```ts +client.cluster.getSettings({ ... }) +``` + +### Arguments [_arguments_cluster.get_settings] + +#### Request (object) [_request_cluster.get_settings] +- **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. +- **`include_defaults` (Optional, boolean)**: If `true`, also returns default values for all other cluster settings, reflecting the values +in the `elasticsearch.yml` file of one of the nodes in the cluster. If the nodes in your +cluster do not all have the same values in their `elasticsearch.yml` config files then the +values returned by this API may vary from invocation to invocation and may not reflect the +values that Elasticsearch uses in all situations. Use the `GET _nodes/settings` API to +fetch the settings for each individual node in your cluster. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.cluster.health [_cluster.health] +Get the cluster health status. + +You can also use the API to get the health status of only specified data streams and indices. +For data streams, the API retrieves the health status of the stream’s backing indices. + +The cluster health status is: green, yellow or red. +On the shard level, a red status indicates that the specific shard is not allocated in the cluster. Yellow means that the primary shard is allocated but replicas are not. Green means that all shards are allocated. +The index level status is controlled by the worst shard status. + +One of the main benefits of the API is the ability to wait until the cluster reaches a certain high watermark health level. +The cluster status is controlled by the worst index status. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-health) + +```ts +client.cluster.health({ ... }) +``` + +### Arguments [_arguments_cluster.health] + +#### Request (object) [_request_cluster.health] +- **`index` (Optional, string \| string[])**: List of data streams, indices, and index aliases used to limit the request. Wildcard expressions (`*`) are supported. To target all data streams and indices in a cluster, omit this parameter or use _all or `*`. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. +- **`level` (Optional, Enum("cluster" \| "indices" \| "shards"))**: Can be one of cluster, indices or shards. Controls the details level of the health information returned. +- **`local` (Optional, boolean)**: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: A number controlling to how many active shards to wait for, all to wait for all shards in the cluster to be active, or 0 to not wait. +- **`wait_for_events` (Optional, Enum("immediate" \| "urgent" \| "high" \| "normal" \| "low" \| "languid"))**: Can be one of immediate, urgent, high, normal, low, languid. Wait until all currently queued events with the given priority are processed. +- **`wait_for_nodes` (Optional, string \| number)**: The request waits until the specified number N of nodes is available. It also accepts >=N, <=N, >N and yellow > red. By default, will not wait for any status. + +## client.cluster.info [_cluster.info] +Get cluster info. +Returns basic information about the cluster. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-info) + +```ts +client.cluster.info({ target }) +``` + +### Arguments [_arguments_cluster.info] + +#### Request (object) [_request_cluster.info] +- **`target` (Enum("_all" \| "http" \| "ingest" \| "thread_pool" \| "script") \| Enum("_all" \| "http" \| "ingest" \| "thread_pool" \| "script")[])**: Limits the information returned to the specific target. Supports a list, such as http,ingest. + +## client.cluster.pendingTasks [_cluster.pending_tasks] +Get the pending cluster tasks. +Get information about cluster-level changes (such as create index, update mapping, allocate or fail shard) that have not yet taken effect. + +NOTE: This API returns a list of any pending updates to the cluster state. +These are distinct from the tasks reported by the task management API which include periodic tasks and tasks initiated by the user, such as node stats, search queries, or create index requests. +However, if a user-initiated task such as a create index command causes a cluster state update, the activity of this task might be reported by both task api and pending cluster tasks API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-pending-tasks) + +```ts +client.cluster.pendingTasks({ ... }) +``` + +### Arguments [_arguments_cluster.pending_tasks] + +#### Request (object) [_request_cluster.pending_tasks] +- **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. +If `false`, information is retrieved from the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.cluster.postVotingConfigExclusions [_cluster.post_voting_config_exclusions] +Update voting configuration exclusions. +Update the cluster voting config exclusions by node IDs or node names. +By default, if there are more than three master-eligible nodes in the cluster and you remove fewer than half of the master-eligible nodes in the cluster at once, the voting configuration automatically shrinks. +If you want to shrink the voting configuration to contain fewer than three nodes or to remove half or more of the master-eligible nodes in the cluster at once, use this API to remove departing nodes from the voting configuration manually. +The API adds an entry for each specified node to the cluster’s voting configuration exclusions list. +It then waits until the cluster has reconfigured its voting configuration to exclude the specified nodes. + +Clusters should have no voting configuration exclusions in normal operation. +Once the excluded nodes have stopped, clear the voting configuration exclusions with `DELETE /_cluster/voting_config_exclusions`. +This API waits for the nodes to be fully removed from the cluster before it returns. +If your cluster has voting configuration exclusions for nodes that you no longer intend to remove, use `DELETE /_cluster/voting_config_exclusions?wait_for_removal=false` to clear the voting configuration exclusions without waiting for the nodes to leave the cluster. + +A response to `POST /_cluster/voting_config_exclusions` with an HTTP status code of 200 OK guarantees that the node has been removed from the voting configuration and will not be reinstated until the voting configuration exclusions are cleared by calling `DELETE /_cluster/voting_config_exclusions`. +If the call to `POST /_cluster/voting_config_exclusions` fails or returns a response with an HTTP status code other than 200 OK then the node may not have been removed from the voting configuration. +In that case, you may safely retry the call. + +NOTE: Voting exclusions are required only when you remove at least half of the master-eligible nodes from a cluster in a short time period. +They are not required when removing master-ineligible nodes or when removing fewer than half of the master-eligible nodes. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-post-voting-config-exclusions) + +```ts +client.cluster.postVotingConfigExclusions({ ... }) +``` + +### Arguments [_arguments_cluster.post_voting_config_exclusions] + +#### Request (object) [_request_cluster.post_voting_config_exclusions] +- **`node_names` (Optional, string \| string[])**: A list of the names of the nodes to exclude from the +voting configuration. If specified, you may not also specify node_ids. +- **`node_ids` (Optional, string \| string[])**: A list of the persistent ids of the nodes to exclude +from the voting configuration. If specified, you may not also specify node_names. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +- **`timeout` (Optional, string \| -1 \| 0)**: When adding a voting configuration exclusion, the API waits for the +specified nodes to be excluded from the voting configuration before +returning. If the timeout expires before the appropriate condition +is satisfied, the request fails and returns an error. + +## client.cluster.putComponentTemplate [_cluster.put_component_template] +Create or update a component template. +Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. + +An index template can be composed of multiple component templates. +To use a component template, specify it in an index template’s `composed_of` list. +Component templates are only applied to new data streams and indices as part of a matching index template. + +Settings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template. + +Component templates are only used during index creation. +For data streams, this includes data stream creation and the creation of a stream’s backing indices. +Changes to component templates do not affect existing indices, including a stream’s backing indices. + +You can use C-style `/* *\/` block comments in component templates. +You can include comments anywhere in the request body except before the opening curly bracket. + +**Applying component templates** + +You cannot directly apply a component template to a data stream or index. +To be applied, a component template must be included in an index template's `composed_of` list. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template) + +```ts +client.cluster.putComponentTemplate({ name, template }) +``` + +### Arguments [_arguments_cluster.put_component_template] + +#### Request (object) [_request_cluster.put_component_template] +- **`name` (string)**: Name of the component template to create. +Elasticsearch includes the following built-in component templates: `logs-mappings`; `logs-settings`; `metrics-mappings`; `metrics-settings`;`synthetics-mapping`; `synthetics-settings`. +Elastic Agent uses these templates to configure backing indices for its data streams. +If you use Elastic Agent and want to overwrite one of these templates, set the `version` for your replacement template higher than the current version. +If you don’t use Elastic Agent and want to disable all built-in component and index templates, set `stack.templates.enabled` to `false` using the cluster update settings API. +- **`template` ({ aliases, mappings, settings, defaults, data_stream, lifecycle })**: The template to be applied which includes mappings, settings, or aliases configuration. +- **`version` (Optional, number)**: Version number used to manage component templates externally. +This number isn't automatically generated or incremented by Elasticsearch. +To unset a version, replace the template without specifying a version. +- **`_meta` (Optional, Record)**: Optional user metadata about the component template. +It may have any contents. This map is not automatically generated by Elasticsearch. +This information is stored in the cluster state, so keeping it short is preferable. +To unset `_meta`, replace the template without specifying this information. +- **`deprecated` (Optional, boolean)**: Marks this index template as deprecated. When creating or updating a non-deprecated index template +that uses deprecated components, Elasticsearch will emit a deprecation warning. +- **`create` (Optional, boolean)**: If `true`, this request cannot replace or update existing component templates. +- **`cause` (Optional, string)**: User defined reason for create the component template. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.cluster.putSettings [_cluster.put_settings] +Update the cluster settings. + +Configure and update dynamic settings on a running cluster. +You can also configure dynamic settings locally on an unstarted or shut down node in `elasticsearch.yml`. + +Updates made with this API can be persistent, which apply across cluster restarts, or transient, which reset after a cluster restart. +You can also reset transient or persistent settings by assigning them a null value. + +If you configure the same setting using multiple methods, Elasticsearch applies the settings in following order of precedence: 1) Transient setting; 2) Persistent setting; 3) `elasticsearch.yml` setting; 4) Default setting value. +For example, you can apply a transient setting to override a persistent setting or `elasticsearch.yml` setting. +However, a change to an `elasticsearch.yml` setting will not override a defined transient or persistent setting. + +TIP: In Elastic Cloud, use the user settings feature to configure all cluster settings. This method automatically rejects unsafe settings that could break your cluster. +If you run Elasticsearch on your own hardware, use this API to configure dynamic cluster settings. +Only use `elasticsearch.yml` for static cluster settings and node settings. +The API doesn’t require a restart and ensures a setting’s value is the same on all nodes. + +WARNING: Transient cluster settings are no longer recommended. Use persistent cluster settings instead. +If a cluster becomes unstable, transient settings can clear unexpectedly, resulting in a potentially undesired cluster configuration. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-settings) + +```ts +client.cluster.putSettings({ ... }) +``` + +### Arguments [_arguments_cluster.put_settings] + +#### Request (object) [_request_cluster.put_settings] +- **`persistent` (Optional, Record)**: The settings that persist after the cluster restarts. +- **`transient` (Optional, Record)**: The settings that do not persist after the cluster restarts. +- **`flat_settings` (Optional, boolean)**: Return settings in flat format (default: false) +- **`master_timeout` (Optional, string \| -1 \| 0)**: Explicit operation timeout for connection to master node +- **`timeout` (Optional, string \| -1 \| 0)**: Explicit operation timeout + +## client.cluster.remoteInfo [_cluster.remote_info] +Get remote cluster information. + +Get information about configured remote clusters. +The API returns connection and endpoint information keyed by the configured remote cluster alias. + +> info +> This API returns information that reflects current state on the local cluster. +> The `connected` field does not necessarily reflect whether a remote cluster is down or unavailable, only whether there is currently an open connection to it. +> Elasticsearch does not spontaneously try to reconnect to a disconnected remote cluster. +> To trigger a reconnection, attempt a cross-cluster search, ES|QL cross-cluster search, or try the [resolve cluster endpoint](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster). + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-remote-info) + +```ts +client.cluster.remoteInfo() +``` + + +## client.cluster.reroute [_cluster.reroute] +Reroute the cluster. +Manually change the allocation of individual shards in the cluster. +For example, a shard can be moved from one node to another explicitly, an allocation can be canceled, and an unassigned shard can be explicitly allocated to a specific node. + +It is important to note that after processing any reroute commands Elasticsearch will perform rebalancing as normal (respecting the values of settings such as `cluster.routing.rebalance.enable`) in order to remain in a balanced state. +For example, if the requested allocation includes moving a shard from node1 to node2 then this may cause a shard to be moved from node2 back to node1 to even things out. + +The cluster can be set to disable allocations using the `cluster.routing.allocation.enable` setting. +If allocations are disabled then the only allocations that will be performed are explicit ones given using the reroute command, and consequent allocations due to rebalancing. + +The cluster will attempt to allocate a shard a maximum of `index.allocation.max_retries` times in a row (defaults to `5`), before giving up and leaving the shard unallocated. +This scenario can be caused by structural problems such as having an analyzer which refers to a stopwords file which doesn’t exist on all nodes. + +Once the problem has been corrected, allocation can be manually retried by calling the reroute API with the `?retry_failed` URI query parameter, which will attempt a single retry round for these shards. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-reroute) + +```ts +client.cluster.reroute({ ... }) +``` + +### Arguments [_arguments_cluster.reroute] + +#### Request (object) [_request_cluster.reroute] +- **`commands` (Optional, { cancel, move, allocate_replica, allocate_stale_primary, allocate_empty_primary }[])**: Defines the commands to perform. +- **`dry_run` (Optional, boolean)**: If true, then the request simulates the operation. +It will calculate the result of applying the commands to the current cluster state and return the resulting cluster state after the commands (and rebalancing) have been applied; it will not actually perform the requested changes. +- **`explain` (Optional, boolean)**: If true, then the response contains an explanation of why the commands can or cannot run. +- **`metric` (Optional, string \| string[])**: Limits the information returned to the specified metrics. +- **`retry_failed` (Optional, boolean)**: If true, then retries allocation of shards that are blocked due to too many subsequent allocation failures. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.cluster.state [_cluster.state] +Get the cluster state. +Get comprehensive information about the state of the cluster. + +The cluster state is an internal data structure which keeps track of a variety of information needed by every node, including the identity and attributes of the other nodes in the cluster; cluster-wide settings; index metadata, including the mapping and settings for each index; the location and status of every shard copy in the cluster. + +The elected master node ensures that every node in the cluster has a copy of the same cluster state. +This API lets you retrieve a representation of this internal state for debugging or diagnostic purposes. +You may need to consult the Elasticsearch source code to determine the precise meaning of the response. + +By default the API will route requests to the elected master node since this node is the authoritative source of cluster states. +You can also retrieve the cluster state held on the node handling the API request by adding the `?local=true` query parameter. + +Elasticsearch may need to expend significant effort to compute a response to this API in larger clusters, and the response may comprise a very large quantity of data. +If you use this API repeatedly, your cluster may become unstable. + +WARNING: The response is a representation of an internal data structure. +Its format is not subject to the same compatibility guarantees as other more stable APIs and may change from version to version. +Do not query this API using external monitoring tools. +Instead, obtain the information you require using other more stable cluster APIs. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-state) + +```ts +client.cluster.state({ ... }) +``` + +### Arguments [_arguments_cluster.state] + +#### Request (object) [_request_cluster.state] +- **`metric` (Optional, Enum("_all" \| "version" \| "master_node" \| "blocks" \| "nodes" \| "metadata" \| "routing_table" \| "routing_nodes" \| "customs") \| Enum("_all" \| "version" \| "master_node" \| "blocks" \| "nodes" \| "metadata" \| "routing_table" \| "routing_nodes" \| "customs")[])**: Limit the information returned to the specified metrics +- **`index` (Optional, string \| string[])**: A list of index names; use `_all` or empty string to perform the operation on all indices +- **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. +- **`flat_settings` (Optional, boolean)**: Return settings in flat format (default: false) +- **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) +- **`local` (Optional, boolean)**: Return local information, do not retrieve the state from master node (default: false) +- **`master_timeout` (Optional, string \| -1 \| 0)**: Timeout for waiting for new cluster state in case it is blocked +- **`wait_for_metadata_version` (Optional, number)**: Wait for the metadata version to be equal or greater than the specified metadata version +- **`wait_for_timeout` (Optional, string \| -1 \| 0)**: The maximum time to wait for wait_for_metadata_version before timing out + +## client.cluster.stats [_cluster.stats] +Get cluster statistics. +Get basic index metrics (shard numbers, store size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins). + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-stats) + +```ts +client.cluster.stats({ ... }) +``` + +### Arguments [_arguments_cluster.stats] + +#### Request (object) [_request_cluster.stats] +- **`node_id` (Optional, string \| string[])**: List of node filters used to limit returned information. Defaults to all nodes in the cluster. +- **`include_remotes` (Optional, boolean)**: Include remote cluster data into the response +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for each node to respond. +If a node does not respond before its timeout expires, the response does not include its stats. +However, timed out nodes are included in the response’s `_nodes.failed` property. Defaults to no timeout. + +## client.connector.checkIn [_connector.check_in] +Check in a connector. + +Update the `last_seen` field in the connector and set it to the current timestamp. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-check-in) + +```ts +client.connector.checkIn({ connector_id }) +``` + +### Arguments [_arguments_connector.check_in] + +#### Request (object) [_request_connector.check_in] +- **`connector_id` (string)**: The unique identifier of the connector to be checked in + +## client.connector.delete [_connector.delete] +Delete a connector. + +Removes a connector and associated sync jobs. +This is a destructive action that is not recoverable. +NOTE: This action doesn’t delete any API keys, ingest pipelines, or data indices associated with the connector. +These need to be removed manually. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-delete) + +```ts +client.connector.delete({ connector_id }) +``` + +### Arguments [_arguments_connector.delete] + +#### Request (object) [_request_connector.delete] +- **`connector_id` (string)**: The unique identifier of the connector to be deleted +- **`delete_sync_jobs` (Optional, boolean)**: A flag indicating if associated sync jobs should be also removed. +- **`hard` (Optional, boolean)**: A flag indicating if the connector should be hard deleted. + +## client.connector.get [_connector.get] +Get a connector. + +Get the details about a connector. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-get) + +```ts +client.connector.get({ connector_id }) +``` + +### Arguments [_arguments_connector.get] + +#### Request (object) [_request_connector.get] +- **`connector_id` (string)**: The unique identifier of the connector +- **`include_deleted` (Optional, boolean)**: A flag to indicate if the desired connector should be fetched, even if it was soft-deleted. + +## client.connector.list [_connector.list] +Get all connectors. + +Get information about all connectors. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-list) + +```ts +client.connector.list({ ... }) +``` + +### Arguments [_arguments_connector.list] + +#### Request (object) [_request_connector.list] +- **`from` (Optional, number)**: Starting offset +- **`size` (Optional, number)**: Specifies a max number of results to get +- **`index_name` (Optional, string \| string[])**: A list of connector index names to fetch connector documents for +- **`connector_name` (Optional, string \| string[])**: A list of connector names to fetch connector documents for +- **`service_type` (Optional, string \| string[])**: A list of connector service types to fetch connector documents for +- **`include_deleted` (Optional, boolean)**: A flag to indicate if the desired connector should be fetched, even if it was soft-deleted. +- **`query` (Optional, string)**: A wildcard query string that filters connectors with matching name, description or index name + +## client.connector.post [_connector.post] +Create a connector. + +Connectors are Elasticsearch integrations that bring content from third-party data sources, which can be deployed on Elastic Cloud or hosted on your own infrastructure. +Elastic managed connectors (Native connectors) are a managed service on Elastic Cloud. +Self-managed connectors (Connector clients) are self-managed on your infrastructure. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-put) + +```ts +client.connector.post({ ... }) +``` + +### Arguments [_arguments_connector.post] + +#### Request (object) [_request_connector.post] +- **`description` (Optional, string)** +- **`index_name` (Optional, string)** +- **`is_native` (Optional, boolean)** +- **`language` (Optional, string)** +- **`name` (Optional, string)** +- **`service_type` (Optional, string)** + +## client.connector.put [_connector.put] +Create or update a connector. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-put) + +```ts +client.connector.put({ ... }) +``` + +### Arguments [_arguments_connector.put] + +#### Request (object) [_request_connector.put] +- **`connector_id` (Optional, string)**: The unique identifier of the connector to be created or updated. ID is auto-generated if not provided. +- **`description` (Optional, string)** +- **`index_name` (Optional, string)** +- **`is_native` (Optional, boolean)** +- **`language` (Optional, string)** +- **`name` (Optional, string)** +- **`service_type` (Optional, string)** + +## client.connector.syncJobCancel [_connector.sync_job_cancel] +Cancel a connector sync job. + +Cancel a connector sync job, which sets the status to cancelling and updates `cancellation_requested_at` to the current time. +The connector service is then responsible for setting the status of connector sync jobs to cancelled. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-cancel) + +```ts +client.connector.syncJobCancel({ connector_sync_job_id }) +``` + +### Arguments [_arguments_connector.sync_job_cancel] + +#### Request (object) [_request_connector.sync_job_cancel] +- **`connector_sync_job_id` (string)**: The unique identifier of the connector sync job + +## client.connector.syncJobCheckIn [_connector.sync_job_check_in] +Check in a connector sync job. +Check in a connector sync job and set the `last_seen` field to the current time before updating it in the internal index. + +To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. +This service runs automatically on Elastic Cloud for Elastic managed connectors. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-check-in) + +```ts +client.connector.syncJobCheckIn({ connector_sync_job_id }) +``` + +### Arguments [_arguments_connector.sync_job_check_in] + +#### Request (object) [_request_connector.sync_job_check_in] +- **`connector_sync_job_id` (string)**: The unique identifier of the connector sync job to be checked in. + +## client.connector.syncJobClaim [_connector.sync_job_claim] +Claim a connector sync job. +This action updates the job status to `in_progress` and sets the `last_seen` and `started_at` timestamps to the current time. +Additionally, it can set the `sync_cursor` property for the sync job. + +This API is not intended for direct connector management by users. +It supports the implementation of services that utilize the connector protocol to communicate with Elasticsearch. + +To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. +This service runs automatically on Elastic Cloud for Elastic managed connectors. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-claim) + +```ts +client.connector.syncJobClaim({ connector_sync_job_id, worker_hostname }) +``` + +### Arguments [_arguments_connector.sync_job_claim] + +#### Request (object) [_request_connector.sync_job_claim] +- **`connector_sync_job_id` (string)**: The unique identifier of the connector sync job. +- **`worker_hostname` (string)**: The host name of the current system that will run the job. +- **`sync_cursor` (Optional, User-defined value)**: The cursor object from the last incremental sync job. +This should reference the `sync_cursor` field in the connector state for which the job runs. + +## client.connector.syncJobDelete [_connector.sync_job_delete] +Delete a connector sync job. + +Remove a connector sync job and its associated data. +This is a destructive action that is not recoverable. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-delete) + +```ts +client.connector.syncJobDelete({ connector_sync_job_id }) +``` + +### Arguments [_arguments_connector.sync_job_delete] + +#### Request (object) [_request_connector.sync_job_delete] +- **`connector_sync_job_id` (string)**: The unique identifier of the connector sync job to be deleted + +## client.connector.syncJobError [_connector.sync_job_error] +Set a connector sync job error. +Set the `error` field for a connector sync job and set its `status` to `error`. + +To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. +This service runs automatically on Elastic Cloud for Elastic managed connectors. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-error) + +```ts +client.connector.syncJobError({ connector_sync_job_id, error }) +``` + +### Arguments [_arguments_connector.sync_job_error] + +#### Request (object) [_request_connector.sync_job_error] +- **`connector_sync_job_id` (string)**: The unique identifier for the connector sync job. +- **`error` (string)**: The error for the connector sync job error field. + +## client.connector.syncJobGet [_connector.sync_job_get] +Get a connector sync job. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-get) + +```ts +client.connector.syncJobGet({ connector_sync_job_id }) +``` + +### Arguments [_arguments_connector.sync_job_get] + +#### Request (object) [_request_connector.sync_job_get] +- **`connector_sync_job_id` (string)**: The unique identifier of the connector sync job + +## client.connector.syncJobList [_connector.sync_job_list] +Get all connector sync jobs. + +Get information about all stored connector sync jobs listed by their creation date in ascending order. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-list) + +```ts +client.connector.syncJobList({ ... }) +``` + +### Arguments [_arguments_connector.sync_job_list] + +#### Request (object) [_request_connector.sync_job_list] +- **`from` (Optional, number)**: Starting offset +- **`size` (Optional, number)**: Specifies a max number of results to get +- **`status` (Optional, Enum("canceling" \| "canceled" \| "completed" \| "error" \| "in_progress" \| "pending" \| "suspended"))**: A sync job status to fetch connector sync jobs for +- **`connector_id` (Optional, string)**: A connector id to fetch connector sync jobs for +- **`job_type` (Optional, Enum("full" \| "incremental" \| "access_control") \| Enum("full" \| "incremental" \| "access_control")[])**: A list of job types to fetch the sync jobs for + +## client.connector.syncJobPost [_connector.sync_job_post] +Create a connector sync job. + +Create a connector sync job document in the internal index and initialize its counters and timestamps with default values. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-post) + +```ts +client.connector.syncJobPost({ id }) +``` + +### Arguments [_arguments_connector.sync_job_post] + +#### Request (object) [_request_connector.sync_job_post] +- **`id` (string)**: The id of the associated connector +- **`job_type` (Optional, Enum("full" \| "incremental" \| "access_control"))** +- **`trigger_method` (Optional, Enum("on_demand" \| "scheduled"))** + +## client.connector.syncJobUpdateStats [_connector.sync_job_update_stats] +Set the connector sync job stats. +Stats include: `deleted_document_count`, `indexed_document_count`, `indexed_document_volume`, and `total_document_count`. +You can also update `last_seen`. +This API is mainly used by the connector service for updating sync job information. + +To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. +This service runs automatically on Elastic Cloud for Elastic managed connectors. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-update-stats) + +```ts +client.connector.syncJobUpdateStats({ connector_sync_job_id, deleted_document_count, indexed_document_count, indexed_document_volume }) +``` + +### Arguments [_arguments_connector.sync_job_update_stats] + +#### Request (object) [_request_connector.sync_job_update_stats] +- **`connector_sync_job_id` (string)**: The unique identifier of the connector sync job. +- **`deleted_document_count` (number)**: The number of documents the sync job deleted. +- **`indexed_document_count` (number)**: The number of documents the sync job indexed. +- **`indexed_document_volume` (number)**: The total size of the data (in MiB) the sync job indexed. +- **`last_seen` (Optional, string \| -1 \| 0)**: The timestamp to use in the `last_seen` property for the connector sync job. +- **`metadata` (Optional, Record)**: The connector-specific metadata. +- **`total_document_count` (Optional, number)**: The total number of documents in the target index after the sync job finished. + +## client.connector.updateActiveFiltering [_connector.update_active_filtering] +Activate the connector draft filter. + +Activates the valid draft filtering for a connector. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering) + +```ts +client.connector.updateActiveFiltering({ connector_id }) +``` + +### Arguments [_arguments_connector.update_active_filtering] + +#### Request (object) [_request_connector.update_active_filtering] +- **`connector_id` (string)**: The unique identifier of the connector to be updated + +## client.connector.updateApiKeyId [_connector.update_api_key_id] +Update the connector API key ID. + +Update the `api_key_id` and `api_key_secret_id` fields of a connector. +You can specify the ID of the API key used for authorization and the ID of the connector secret where the API key is stored. +The connector secret ID is required only for Elastic managed (native) connectors. +Self-managed connectors (connector clients) do not use this field. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-api-key-id) + +```ts +client.connector.updateApiKeyId({ connector_id }) +``` + +### Arguments [_arguments_connector.update_api_key_id] + +#### Request (object) [_request_connector.update_api_key_id] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +- **`api_key_id` (Optional, string)** +- **`api_key_secret_id` (Optional, string)** + +## client.connector.updateConfiguration [_connector.update_configuration] +Update the connector configuration. + +Update the configuration field in the connector document. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-configuration) + +```ts +client.connector.updateConfiguration({ connector_id }) +``` + +### Arguments [_arguments_connector.update_configuration] + +#### Request (object) [_request_connector.update_configuration] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +- **`configuration` (Optional, Record)** +- **`values` (Optional, Record)** + +## client.connector.updateError [_connector.update_error] +Update the connector error field. + +Set the error field for the connector. +If the error provided in the request body is non-null, the connector’s status is updated to error. +Otherwise, if the error is reset to null, the connector status is updated to connected. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-error) + +```ts +client.connector.updateError({ connector_id, error }) +``` + +### Arguments [_arguments_connector.update_error] + +#### Request (object) [_request_connector.update_error] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +- **`error` (T \| null)** + +## client.connector.updateFeatures [_connector.update_features] +Update the connector features. +Update the connector features in the connector document. +This API can be used to control the following aspects of a connector: + +* document-level security +* incremental syncs +* advanced sync rules +* basic sync rules + +Normally, the running connector service automatically manages these features. +However, you can use this API to override the default behavior. + +To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. +This service runs automatically on Elastic Cloud for Elastic managed connectors. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-features) + +```ts +client.connector.updateFeatures({ connector_id, features }) +``` + +### Arguments [_arguments_connector.update_features] + +#### Request (object) [_request_connector.update_features] +- **`connector_id` (string)**: The unique identifier of the connector to be updated. +- **`features` ({ document_level_security, incremental_sync, native_connector_api_keys, sync_rules })** + +## client.connector.updateFiltering [_connector.update_filtering] +Update the connector filtering. + +Update the draft filtering configuration of a connector and marks the draft validation state as edited. +The filtering draft is activated once validated by the running Elastic connector service. +The filtering property is used to configure sync rules (both basic and advanced) for a connector. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering) + +```ts +client.connector.updateFiltering({ connector_id }) +``` + +### Arguments [_arguments_connector.update_filtering] + +#### Request (object) [_request_connector.update_filtering] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +- **`filtering` (Optional, { active, domain, draft }[])** +- **`rules` (Optional, { created_at, field, id, order, policy, rule, updated_at, value }[])** +- **`advanced_snippet` (Optional, { created_at, updated_at, value })** + +## client.connector.updateFilteringValidation [_connector.update_filtering_validation] +Update the connector draft filtering validation. + +Update the draft filtering validation info for a connector. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering-validation) + +```ts +client.connector.updateFilteringValidation({ connector_id, validation }) +``` + +### Arguments [_arguments_connector.update_filtering_validation] + +#### Request (object) [_request_connector.update_filtering_validation] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +- **`validation` ({ errors, state })** + +## client.connector.updateIndexName [_connector.update_index_name] +Update the connector index name. + +Update the `index_name` field of a connector, specifying the index where the data ingested by the connector is stored. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-index-name) + +```ts +client.connector.updateIndexName({ connector_id, index_name }) +``` + +### Arguments [_arguments_connector.update_index_name] + +#### Request (object) [_request_connector.update_index_name] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +- **`index_name` (T \| null)** + +## client.connector.updateName [_connector.update_name] +Update the connector name and description. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-name) + +```ts +client.connector.updateName({ connector_id }) +``` + +### Arguments [_arguments_connector.update_name] + +#### Request (object) [_request_connector.update_name] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +- **`name` (Optional, string)** +- **`description` (Optional, string)** + +## client.connector.updateNative [_connector.update_native] +Update the connector is_native flag. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-native) + +```ts +client.connector.updateNative({ connector_id, is_native }) +``` + +### Arguments [_arguments_connector.update_native] + +#### Request (object) [_request_connector.update_native] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +- **`is_native` (boolean)** + +## client.connector.updatePipeline [_connector.update_pipeline] +Update the connector pipeline. + +When you create a new connector, the configuration of an ingest pipeline is populated with default settings. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-pipeline) + +```ts +client.connector.updatePipeline({ connector_id, pipeline }) +``` + +### Arguments [_arguments_connector.update_pipeline] + +#### Request (object) [_request_connector.update_pipeline] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +- **`pipeline` ({ extract_binary_content, name, reduce_whitespace, run_ml_inference })** + +## client.connector.updateScheduling [_connector.update_scheduling] +Update the connector scheduling. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-scheduling) + +```ts +client.connector.updateScheduling({ connector_id, scheduling }) +``` + +### Arguments [_arguments_connector.update_scheduling] + +#### Request (object) [_request_connector.update_scheduling] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +- **`scheduling` ({ access_control, full, incremental })** + +## client.connector.updateServiceType [_connector.update_service_type] +Update the connector service type. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-service-type) + +```ts +client.connector.updateServiceType({ connector_id, service_type }) +``` + +### Arguments [_arguments_connector.update_service_type] + +#### Request (object) [_request_connector.update_service_type] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +- **`service_type` (string)** + +## client.connector.updateStatus [_connector.update_status] +Update the connector status. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-status) + +```ts +client.connector.updateStatus({ connector_id, status }) +``` + +### Arguments [_arguments_connector.update_status] + +#### Request (object) [_request_connector.update_status] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +- **`status` (Enum("created" \| "needs_configuration" \| "configured" \| "connected" \| "error"))** + +## client.danglingIndices.deleteDanglingIndex [_dangling_indices.delete_dangling_index] +Delete a dangling index. +If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. +For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-delete-dangling-index) + +```ts +client.danglingIndices.deleteDanglingIndex({ index_uuid, accept_data_loss }) +``` + +### Arguments [_arguments_dangling_indices.delete_dangling_index] + +#### Request (object) [_request_dangling_indices.delete_dangling_index] +- **`index_uuid` (string)**: The UUID of the index to delete. Use the get dangling indices API to find the UUID. +- **`accept_data_loss` (boolean)**: This parameter must be set to true to acknowledge that it will no longer be possible to recove data from the dangling index. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Specify timeout for connection to master +- **`timeout` (Optional, string \| -1 \| 0)**: Explicit operation timeout + +## client.danglingIndices.importDanglingIndex [_dangling_indices.import_dangling_index] +Import a dangling index. + +If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. +For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-import-dangling-index) + +```ts +client.danglingIndices.importDanglingIndex({ index_uuid, accept_data_loss }) +``` + +### Arguments [_arguments_dangling_indices.import_dangling_index] + +#### Request (object) [_request_dangling_indices.import_dangling_index] +- **`index_uuid` (string)**: The UUID of the index to import. Use the get dangling indices API to locate the UUID. +- **`accept_data_loss` (boolean)**: This parameter must be set to true to import a dangling index. +Because Elasticsearch cannot know where the dangling index data came from or determine which shard copies are fresh and which are stale, it cannot guarantee that the imported data represents the latest state of the index when it was last in the cluster. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Specify timeout for connection to master +- **`timeout` (Optional, string \| -1 \| 0)**: Explicit operation timeout + +## client.danglingIndices.listDanglingIndices [_dangling_indices.list_dangling_indices] +Get the dangling indices. + +If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. +For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. + +Use this API to list dangling indices, which you can then import or delete. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-list-dangling-indices) + +```ts +client.danglingIndices.listDanglingIndices() +``` + + +## client.enrich.deletePolicy [_enrich.delete_policy] +Delete an enrich policy. +Deletes an existing enrich policy and its enrich index. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-delete-policy) + +```ts +client.enrich.deletePolicy({ name }) +``` + +### Arguments [_arguments_enrich.delete_policy] + +#### Request (object) [_request_enrich.delete_policy] +- **`name` (string)**: Enrich policy to delete. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. + +## client.enrich.executePolicy [_enrich.execute_policy] +Run an enrich policy. +Create the enrich index for an existing enrich policy. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-execute-policy) + +```ts +client.enrich.executePolicy({ name }) +``` + +### Arguments [_arguments_enrich.execute_policy] + +#### Request (object) [_request_enrich.execute_policy] +- **`name` (string)**: Enrich policy to execute. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +- **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks other enrich policy execution requests until complete. + +## client.enrich.getPolicy [_enrich.get_policy] +Get an enrich policy. +Returns information about an enrich policy. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-get-policy) + +```ts +client.enrich.getPolicy({ ... }) +``` + +### Arguments [_arguments_enrich.get_policy] + +#### Request (object) [_request_enrich.get_policy] +- **`name` (Optional, string \| string[])**: List of enrich policy names used to limit the request. +To return information for all enrich policies, omit this parameter. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. + +## client.enrich.putPolicy [_enrich.put_policy] +Create an enrich policy. +Creates an enrich policy. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-put-policy) + +```ts +client.enrich.putPolicy({ name }) +``` + +### Arguments [_arguments_enrich.put_policy] + +#### Request (object) [_request_enrich.put_policy] +- **`name` (string)**: Name of the enrich policy to create or update. +- **`geo_match` (Optional, { enrich_fields, indices, match_field, query, name, elasticsearch_version })**: Matches enrich data to incoming documents based on a `geo_shape` query. +- **`match` (Optional, { enrich_fields, indices, match_field, query, name, elasticsearch_version })**: Matches enrich data to incoming documents based on a `term` query. +- **`range` (Optional, { enrich_fields, indices, match_field, query, name, elasticsearch_version })**: Matches a number, date, or IP address in incoming documents to a range in the enrich index based on a `term` query. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. + +## client.enrich.stats [_enrich.stats] +Get enrich stats. +Returns enrich coordinator statistics and information about enrich policies that are currently executing. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-stats) + +```ts +client.enrich.stats({ ... }) +``` + +### Arguments [_arguments_enrich.stats] + +#### Request (object) [_request_enrich.stats] +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. + +## client.eql.delete [_eql.delete] +Delete an async EQL search. +Delete an async EQL search or a stored synchronous EQL search. +The API also deletes results for the search. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-delete) + +```ts +client.eql.delete({ id }) +``` + +### Arguments [_arguments_eql.delete] + +#### Request (object) [_request_eql.delete] +- **`id` (string)**: Identifier for the search to delete. +A search ID is provided in the EQL search API's response for an async search. +A search ID is also provided if the request’s `keep_on_completion` parameter is `true`. + +## client.eql.get [_eql.get] +Get async EQL search results. +Get the current status and available results for an async EQL search or a stored synchronous EQL search. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-get) + +```ts +client.eql.get({ id }) +``` + +### Arguments [_arguments_eql.get] + +#### Request (object) [_request_eql.get] +- **`id` (string)**: Identifier for the search. +- **`keep_alive` (Optional, string \| -1 \| 0)**: Period for which the search and its results are stored on the cluster. +Defaults to the keep_alive value set by the search’s EQL search API request. +- **`wait_for_completion_timeout` (Optional, string \| -1 \| 0)**: Timeout duration to wait for the request to finish. +Defaults to no timeout, meaning the request waits for complete search results. + +## client.eql.getStatus [_eql.get_status] +Get the async EQL status. +Get the current status for an async EQL search or a stored synchronous EQL search without returning results. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-get-status) + +```ts +client.eql.getStatus({ id }) +``` + +### Arguments [_arguments_eql.get_status] + +#### Request (object) [_request_eql.get_status] +- **`id` (string)**: Identifier for the search. + +## client.eql.search [_eql.search] +Get EQL search results. +Returns search results for an Event Query Language (EQL) query. +EQL assumes each document in a data stream or index corresponds to an event. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-search) + +```ts +client.eql.search({ index, query }) +``` + +### Arguments [_arguments_eql.search] + +#### Request (object) [_request_eql.search] +- **`index` (string \| string[])**: The name of the index to scope the operation +- **`query` (string)**: EQL query you wish to run. +- **`case_sensitive` (Optional, boolean)** +- **`event_category_field` (Optional, string)**: Field containing the event classification, such as process, file, or network. +- **`tiebreaker_field` (Optional, string)**: Field used to sort hits with the same timestamp in ascending order +- **`timestamp_field` (Optional, string)**: Field containing event timestamp. Default "@timestamp" +- **`fetch_size` (Optional, number)**: Maximum number of events to search at a time for sequence queries. +- **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type } \| { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type }[])**: Query, written in Query DSL, used to filter the events on which the EQL query runs. +- **`keep_alive` (Optional, string \| -1 \| 0)** +- **`keep_on_completion` (Optional, boolean)** +- **`wait_for_completion_timeout` (Optional, string \| -1 \| 0)** +- **`allow_partial_search_results` (Optional, boolean)**: Allow query execution also in case of shard failures. +If true, the query will keep running and will return results based on the available shards. +For sequences, the behavior can be further refined using allow_partial_sequence_results +- **`allow_partial_sequence_results` (Optional, boolean)**: This flag applies only to sequences and has effect only if allow_partial_search_results=true. +If true, the sequence query will return results based on the available shards, ignoring the others. +If false, the sequence query will return successfully, but will always have empty results. +- **`size` (Optional, number)**: For basic queries, the maximum number of matching events to return. Defaults to 10 +- **`fields` (Optional, { field, format, include_unmapped } \| { field, format, include_unmapped }[])**: Array of wildcard (*) patterns. The response returns values for field names matching these patterns in the fields property of each hit. +- **`result_position` (Optional, Enum("tail" \| "head"))** +- **`runtime_mappings` (Optional, Record)** +- **`max_samples_per_key` (Optional, number)**: By default, the response of a sample query contains up to `10` samples, with one sample per unique set of join keys. Use the `size` +parameter to get a smaller or larger set of samples. To retrieve more than one sample per set of join keys, use the +`max_samples_per_key` parameter. Pipes are not supported for sample queries. +- **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. +- **`ccs_minimize_roundtrips` (Optional, boolean)**: Indicates whether network round-trips should be minimized as part of cross-cluster search requests execution +- **`ignore_unavailable` (Optional, boolean)**: If true, missing or closed indices are not included in the response. +- **`project_routing` (Optional, string)**: Specifies a subset of projects to target for the search using project +metadata tags in a subset of Lucene query syntax. +Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). +Examples: + _alias:my-project + _alias:_origin + _alias:*pr* +Supported in serverless only. + +## client.esql.asyncQuery [_esql.async_query] +Run an async ES|QL query. +Asynchronously run an ES|QL (Elasticsearch query language) query, monitor its progress, and retrieve results when they become available. + +The API accepts the same parameters and request body as the synchronous query API, along with additional async related properties. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query) + +```ts +client.esql.asyncQuery({ query }) +``` + +### Arguments [_arguments_esql.async_query] + +#### Request (object) [_request_esql.async_query] +- **`query` (string)**: The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. +- **`columnar` (Optional, boolean)**: By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results. +- **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. +- **`locale` (Optional, string)** +- **`params` (Optional, number \| number \| string \| boolean \| null[])**: To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. +- **`profile` (Optional, boolean)**: If provided and `true` the response will include an extra `profile` object +with information on how the query was executed. This information is for human debugging +and its format can change at any time but it can give some insight into the performance +of each part of the query. +- **`tables` (Optional, Record>)**: Tables to use with the LOOKUP operation. The top level key is the table +name and the next level key is the column name. +- **`include_ccs_metadata` (Optional, boolean)**: When set to `true` and performing a cross-cluster/cross-project query, the response will include an extra `_clusters` +object with information about the clusters that participated in the search along with info such as shards +count. +- **`wait_for_completion_timeout` (Optional, string \| -1 \| 0)**: The period to wait for the request to finish. +By default, the request waits for 1 second for the query results. +If the query completes during this period, results are returned +Otherwise, a query ID is returned that can later be used to retrieve the results. +- **`keep_alive` (Optional, string \| -1 \| 0)**: The period for which the query and its results are stored in the cluster. +The default period is five days. +When this period expires, the query and its results are deleted, even if the query is still ongoing. +If the `keep_on_completion` parameter is false, Elasticsearch only stores async queries that do not complete within the period set by the `wait_for_completion_timeout` parameter, regardless of this value. +- **`keep_on_completion` (Optional, boolean)**: Indicates whether the query and its results are stored in the cluster. +If false, the query and its results are stored in the cluster only if the request does not complete during the period set by the `wait_for_completion_timeout` parameter. +- **`allow_partial_results` (Optional, boolean)**: If `true`, partial results will be returned if there are shard failures, but the query can continue to execute on other clusters and shards. +If `false`, the query will fail if there are any failures. + +To override the default behavior, you can set the `esql.query.allow_partial_results` cluster setting to `false`. +- **`delimiter` (Optional, string)**: The character to use between values within a CSV row. +It is valid only for the CSV format. +- **`drop_null_columns` (Optional, boolean)**: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. +If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. +- **`format` (Optional, Enum("csv" \| "json" \| "tsv" \| "txt" \| "yaml" \| "cbor" \| "smile" \| "arrow"))**: A short version of the Accept header, e.g. json, yaml. + +`csv`, `tsv`, and `txt` formats will return results in a tabular format, excluding other metadata fields from the response. + +For async requests, nothing will be returned if the async query doesn't finish within the timeout. +The query ID and running status are available in the `X-Elasticsearch-Async-Id` and `X-Elasticsearch-Async-Is-Running` HTTP headers of the response, respectively. + +## client.esql.asyncQueryDelete [_esql.async_query_delete] +Delete an async ES|QL query. +If the query is still running, it is cancelled. +Otherwise, the stored results are deleted. + +If the Elasticsearch security features are enabled, only the following users can use this API to delete a query: + +* The authenticated user that submitted the original query request +* Users with the `cancel_task` cluster privilege + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query-delete) + +```ts +client.esql.asyncQueryDelete({ id }) +``` + +### Arguments [_arguments_esql.async_query_delete] + +#### Request (object) [_request_esql.async_query_delete] +- **`id` (string)**: The unique identifier of the query. +A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. +A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. + +## client.esql.asyncQueryGet [_esql.async_query_get] +Get async ES|QL query results. +Get the current status and available results or stored results for an ES|QL asynchronous query. +If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can retrieve the results using this API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query-get) + +```ts +client.esql.asyncQueryGet({ id }) +``` + +### Arguments [_arguments_esql.async_query_get] + +#### Request (object) [_request_esql.async_query_get] +- **`id` (string)**: The unique identifier of the query. +A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. +A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. +- **`drop_null_columns` (Optional, boolean)**: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. +If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. +- **`format` (Optional, Enum("csv" \| "json" \| "tsv" \| "txt" \| "yaml" \| "cbor" \| "smile" \| "arrow"))**: A short version of the Accept header, for example `json` or `yaml`. +- **`keep_alive` (Optional, string \| -1 \| 0)**: The period for which the query and its results are stored in the cluster. +When this period expires, the query and its results are deleted, even if the query is still ongoing. +- **`wait_for_completion_timeout` (Optional, string \| -1 \| 0)**: The period to wait for the request to finish. +By default, the request waits for complete query results. +If the request completes during the period specified in this parameter, complete query results are returned. +Otherwise, the response returns an `is_running` value of `true` and no results. + +## client.esql.asyncQueryStop [_esql.async_query_stop] +Stop async ES|QL query. + +This API interrupts the query execution and returns the results so far. +If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can stop it. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query-stop) + +```ts +client.esql.asyncQueryStop({ id }) +``` + +### Arguments [_arguments_esql.async_query_stop] + +#### Request (object) [_request_esql.async_query_stop] +- **`id` (string)**: The unique identifier of the query. +A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. +A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. +- **`drop_null_columns` (Optional, boolean)**: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. +If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. + +## client.esql.getQuery [_esql.get_query] +Get a specific running ES|QL query information. +Returns an object extended information about a running ES|QL query. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-get-query) + +```ts +client.esql.getQuery({ id }) +``` + +### Arguments [_arguments_esql.get_query] + +#### Request (object) [_request_esql.get_query] +- **`id` (string)**: The query ID + +## client.esql.listQueries [_esql.list_queries] +Get running ES|QL queries information. +Returns an object containing IDs and other information about the running ES|QL queries. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-list-queries) + +```ts +client.esql.listQueries() +``` + + +## client.esql.query [_esql.query] +Run an ES|QL query. +Get search results for an ES|QL (Elasticsearch query language) query. + +[Endpoint documentation](https://www.elastic.co/docs/explore-analyze/query-filter/languages/esql-rest) + +```ts +client.esql.query({ query }) +``` + +### Arguments [_arguments_esql.query] + +#### Request (object) [_request_esql.query] +- **`query` (string)**: The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. +- **`columnar` (Optional, boolean)**: By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results. +- **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. +- **`locale` (Optional, string)** +- **`params` (Optional, number \| number \| string \| boolean \| null \| number \| number \| string \| boolean \| null[][])**: To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. +- **`profile` (Optional, boolean)**: If provided and `true` the response will include an extra `profile` object +with information on how the query was executed. This information is for human debugging +and its format can change at any time but it can give some insight into the performance +of each part of the query. +- **`tables` (Optional, Record>)**: Tables to use with the LOOKUP operation. The top level key is the table +name and the next level key is the column name. +- **`include_ccs_metadata` (Optional, boolean)**: When set to `true` and performing a cross-cluster/cross-project query, the response will include an extra `_clusters` +object with information about the clusters that participated in the search along with info such as shards +count. +- **`format` (Optional, Enum("csv" \| "json" \| "tsv" \| "txt" \| "yaml" \| "cbor" \| "smile" \| "arrow"))**: A short version of the Accept header, e.g. json, yaml. + +`csv`, `tsv`, and `txt` formats will return results in a tabular format, excluding other metadata fields from the response. +- **`delimiter` (Optional, string)**: The character to use between values within a CSV row. Only valid for the CSV format. +- **`drop_null_columns` (Optional, boolean)**: Should columns that are entirely `null` be removed from the `columns` and `values` portion of the results? +Defaults to `false`. If `true` then the response will include an extra section under the name `all_columns` which has the name of all columns. +- **`allow_partial_results` (Optional, boolean)**: If `true`, partial results will be returned if there are shard failures, but the query can continue to execute on other clusters and shards. +If `false`, the query will fail if there are any failures. + +To override the default behavior, you can set the `esql.query.allow_partial_results` cluster setting to `false`. + +## client.features.getFeatures [_features.get_features] +Get the features. +Get a list of features that can be included in snapshots using the `feature_states` field when creating a snapshot. +You can use this API to determine which feature states to include when taking a snapshot. +By default, all feature states are included in a snapshot if that snapshot includes the global state, or none if it does not. + +A feature state includes one or more system indices necessary for a given feature to function. +In order to ensure data integrity, all system indices that comprise a feature state are snapshotted and restored together. + +The features listed by this API are a combination of built-in features and features defined by plugins. +In order for a feature state to be listed in this API and recognized as a valid feature state by the create snapshot API, the plugin that defines that feature must be installed on the master node. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-features-get-features) + +```ts +client.features.getFeatures({ ... }) +``` + +### Arguments [_arguments_features.get_features] + +#### Request (object) [_request_features.get_features] +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. + +## client.features.resetFeatures [_features.reset_features] +Reset the features. +Clear all of the state information stored in system indices by Elasticsearch features, including the security and machine learning indices. + +WARNING: Intended for development and testing use only. Do not reset features on a production cluster. + +Return a cluster to the same state as a new installation by resetting the feature state for all Elasticsearch features. +This deletes all state information stored in system indices. + +The response code is HTTP 200 if the state is successfully reset for all features. +It is HTTP 500 if the reset operation failed for any feature. + +Note that select features might provide a way to reset particular system indices. +Using this API resets all features, both those that are built-in and implemented as plugins. + +To list the features that will be affected, use the get features API. + +IMPORTANT: The features installed on the node you submit this request to are the features that will be reset. Run on the master node if you have any doubts about which plugins are installed on individual nodes. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-features-reset-features) + +```ts +client.features.resetFeatures({ ... }) +``` + +### Arguments [_arguments_features.reset_features] + +#### Request (object) [_request_features.reset_features] +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. + +## client.fleet.globalCheckpoints [_fleet.global_checkpoints] +Get global checkpoints. + +Get the current global checkpoints for an index. +This API is designed for internal use by the Fleet server project. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-fleet) + +```ts +client.fleet.globalCheckpoints({ index }) +``` + +### Arguments [_arguments_fleet.global_checkpoints] + +#### Request (object) [_request_fleet.global_checkpoints] +- **`index` (string \| string)**: A single index or index alias that resolves to a single index. +- **`wait_for_advance` (Optional, boolean)**: A boolean value which controls whether to wait (until the timeout) for the global checkpoints +to advance past the provided `checkpoints`. +- **`wait_for_index` (Optional, boolean)**: A boolean value which controls whether to wait (until the timeout) for the target index to exist +and all primary shards be active. Can only be true when `wait_for_advance` is true. +- **`checkpoints` (Optional, number[])**: A comma separated list of previous global checkpoints. When used in combination with `wait_for_advance`, +the API will only return once the global checkpoints advances past the checkpoints. Providing an empty list +will cause Elasticsearch to immediately return the current global checkpoints. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a global checkpoints to advance past `checkpoints`. + +## client.fleet.msearch [_fleet.msearch] +Run multiple Fleet searches. +Run several Fleet searches with a single API request. +The API follows the same structure as the multi search API. +However, similar to the Fleet search API, it supports the `wait_for_checkpoints` parameter. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-fleet-msearch) + +```ts +client.fleet.msearch({ ... }) +``` + +### Arguments [_arguments_fleet.msearch] + +#### Request (object) [_request_fleet.msearch] +- **`index` (Optional, string \| string)**: A single target to search. If the target is an index alias, it must resolve to a single index. +- **`searches` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, project_routing, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } \| { aggregations, collapse, explain, ext, from, highlight, track_total_hits, indices_boost, docvalue_fields, knn, rank, min_score, post_filter, profile, query, rescore, retriever, script_fields, search_after, size, slice, sort, _source, fields, suggest, terminate_after, timeout, track_scores, version, seq_no_primary_term, stored_fields, pit, runtime_mappings, stats }[])** +- **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. +- **`ccs_minimize_roundtrips` (Optional, boolean)**: If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +- **`ignore_throttled` (Optional, boolean)**: If true, concrete, expanded or aliased indices are ignored when frozen. +- **`ignore_unavailable` (Optional, boolean)**: If true, missing or closed indices are not included in the response. +- **`max_concurrent_searches` (Optional, number)**: Maximum number of concurrent searches the multi search API can execute. +- **`max_concurrent_shard_requests` (Optional, number)**: Maximum number of concurrent shard requests that each sub-search request executes per node. +- **`pre_filter_shard_size` (Optional, number)**: Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint. +- **`search_type` (Optional, Enum("query_then_fetch" \| "dfs_query_then_fetch"))**: Indicates whether global term and document frequencies should be used when scoring returned documents. +- **`rest_total_hits_as_int` (Optional, boolean)**: If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object. +- **`typed_keys` (Optional, boolean)**: Specifies whether aggregation and suggester names should be prefixed by their respective types in the response. +- **`wait_for_checkpoints` (Optional, number[])**: A comma separated list of checkpoints. When configured, the search API will only be executed on a shard +after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause +Elasticsearch to immediately execute the search. +- **`allow_partial_search_results` (Optional, boolean)**: If true, returns partial results if there are shard request timeouts or shard failures. +If false, returns an error with no partial results. +Defaults to the configured cluster setting `search.default_allow_partial_results`, which is true by default. + +## client.fleet.search [_fleet.search] +Run a Fleet search. +The purpose of the Fleet search API is to provide an API where the search will be run only +after the provided checkpoint has been processed and is visible for searches inside of Elasticsearch. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-fleet-search) + +```ts +client.fleet.search({ index }) +``` + +### Arguments [_arguments_fleet.search] + +#### Request (object) [_request_fleet.search] +- **`index` (string \| string)**: A single target to search. If the target is an index alias, it must resolve to a single index. +- **`aggregations` (Optional, Record)** +- **`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })** +- **`explain` (Optional, boolean)**: If true, returns detailed information about score computation as part of a hit. +- **`ext` (Optional, Record)**: Configuration of search extensions defined by Elasticsearch plugins. +- **`from` (Optional, number)**: Starting document offset. By default, you cannot page through more than 10,000 +hits using the from and size parameters. To page through more hits, use the +search_after parameter. +- **`highlight` (Optional, { encoder, fields })** +- **`track_total_hits` (Optional, boolean \| number)**: Number of hits matching the query to count accurately. If true, the exact +number of hits is returned at the cost of some performance. If false, the +response does not include the total number of hits matching the query. +Defaults to 10,000 hits. +- **`indices_boost` (Optional, Record[])**: Boosts the _score of documents from specified indices. +- **`docvalue_fields` (Optional, { field, format, include_unmapped }[])**: Array of wildcard (*) patterns. The request returns doc values for field +names matching these patterns in the hits.fields property of the response. +- **`min_score` (Optional, number)**: Minimum _score for matching documents. Documents with a lower _score are +not included in search results and results collected by aggregations. +- **`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })** +- **`profile` (Optional, boolean)** +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Defines the search definition using the Query DSL. +- **`rescore` (Optional, { window_size, query, learning_to_rank, script } \| { window_size, query, learning_to_rank, script }[])** +- **`script_fields` (Optional, Record)**: Retrieve a script evaluation (based on different fields) for each hit. +- **`search_after` (Optional, number \| number \| string \| boolean \| null[])** +- **`size` (Optional, number)**: The number of hits to return. By default, you cannot page through more +than 10,000 hits using the from and size parameters. To page through more +hits, use the search_after parameter. +- **`slice` (Optional, { field, id, max })** +- **`sort` (Optional, string \| { _score, _doc, _geo_distance, _script } \| string \| { _score, _doc, _geo_distance, _script }[])** +- **`_source` (Optional, boolean \| { exclude_vectors, excludes, includes })**: Indicates which source fields are returned for matching documents. These +fields are returned in the hits._source property of the search response. +- **`fields` (Optional, { field, format, include_unmapped }[])**: Array of wildcard (*) patterns. The request returns values for field names +matching these patterns in the hits.fields property of the response. +- **`suggest` (Optional, { text })** +- **`terminate_after` (Optional, number)**: Maximum number of documents to collect for each shard. If a query reaches this +limit, Elasticsearch terminates the query early. Elasticsearch collects documents +before sorting. Defaults to 0, which does not terminate query execution early. +- **`timeout` (Optional, string)**: Specifies the period of time to wait for a response from each shard. If no response +is received before the timeout expires, the request fails and returns an error. +Defaults to no timeout. +- **`track_scores` (Optional, boolean)**: If true, calculate and return document scores, even if the scores are not used for sorting. +- **`version` (Optional, boolean)**: If true, returns document version as part of a hit. +- **`seq_no_primary_term` (Optional, boolean)**: If true, returns sequence number and primary term of the last modification +of each hit. See Optimistic concurrency control. +- **`stored_fields` (Optional, string \| string[])**: List of stored fields to return as part of a hit. If no fields are specified, +no stored fields are included in the response. If this field is specified, the _source +parameter defaults to false. You can pass _source: true to return both source fields +and stored fields in the search response. +- **`pit` (Optional, { id, keep_alive })**: Limits the search to a point in time (PIT). If you provide a PIT, you +cannot specify an in the request path. +- **`runtime_mappings` (Optional, Record)**: Defines one or more runtime fields in the search request. These fields take +precedence over mapped fields with the same name. +- **`stats` (Optional, string[])**: Stats groups to associate with the search. Each group maintains a statistics +aggregation for its associated searches. You can retrieve these stats using +the indices stats API. +- **`allow_no_indices` (Optional, boolean)** +- **`analyzer` (Optional, string)** +- **`analyze_wildcard` (Optional, boolean)** +- **`batched_reduce_size` (Optional, number)** +- **`ccs_minimize_roundtrips` (Optional, boolean)** +- **`default_operator` (Optional, Enum("and" \| "or"))** +- **`df` (Optional, string)** +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])** +- **`ignore_throttled` (Optional, boolean)** +- **`ignore_unavailable` (Optional, boolean)** +- **`lenient` (Optional, boolean)** +- **`max_concurrent_shard_requests` (Optional, number)** +- **`preference` (Optional, string)** +- **`pre_filter_shard_size` (Optional, number)** +- **`request_cache` (Optional, boolean)** +- **`routing` (Optional, string)** +- **`scroll` (Optional, string \| -1 \| 0)** +- **`search_type` (Optional, Enum("query_then_fetch" \| "dfs_query_then_fetch"))** +- **`suggest_field` (Optional, string)**: Specifies which field to use for suggestions. +- **`suggest_mode` (Optional, Enum("missing" \| "popular" \| "always"))** +- **`suggest_size` (Optional, number)** +- **`suggest_text` (Optional, string)**: The source text for which the suggestions should be returned. +- **`typed_keys` (Optional, boolean)** +- **`rest_total_hits_as_int` (Optional, boolean)** +- **`_source_excludes` (Optional, string \| string[])** +- **`_source_includes` (Optional, string \| string[])** +- **`q` (Optional, string)** +- **`wait_for_checkpoints` (Optional, number[])**: A comma separated list of checkpoints. When configured, the search API will only be executed on a shard +after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause +Elasticsearch to immediately execute the search. +- **`allow_partial_search_results` (Optional, boolean)**: If true, returns partial results if there are shard request timeouts or shard failures. +If false, returns an error with no partial results. +Defaults to the configured cluster setting `search.default_allow_partial_results`, which is true by default. + +## client.graph.explore [_graph.explore] +Explore graph analytics. +Extract and summarize information about the documents and terms in an Elasticsearch data stream or index. +The easiest way to understand the behavior of this API is to use the Graph UI to explore connections. +An initial request to the `_explore` API contains a seed query that identifies the documents of interest and specifies the fields that define the vertices and connections you want to include in the graph. +Subsequent requests enable you to spider out from one more vertices of interest. +You can exclude vertices that have already been returned. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-graph) + +```ts +client.graph.explore({ index }) +``` + +### Arguments [_arguments_graph.explore] + +#### Request (object) [_request_graph.explore] +- **`index` (string \| string[])**: Name of the index. +- **`connections` (Optional, { connections, query, vertices })**: Specifies or more fields from which you want to extract terms that are associated with the specified vertices. +- **`controls` (Optional, { sample_diversity, sample_size, timeout, use_significance })**: Direct the Graph API how to build the graph. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: A seed query that identifies the documents of interest. Can be any valid Elasticsearch query. +- **`vertices` (Optional, { exclude, field, include, min_doc_count, shard_min_doc_count, size }[])**: Specifies one or more fields that contain the terms you want to include in the graph as vertices. +- **`routing` (Optional, string)**: Custom value used to route operations to a specific shard. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the period of time to wait for a response from each shard. +If no response is received before the timeout expires, the request fails and returns an error. +Defaults to no timeout. + +## client.ilm.deleteLifecycle [_ilm.delete_lifecycle] +Delete a lifecycle policy. +You cannot delete policies that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-delete-lifecycle) + +```ts +client.ilm.deleteLifecycle({ policy }) +``` + +### Arguments [_arguments_ilm.delete_lifecycle] + +#### Request (object) [_request_ilm.delete_lifecycle] +- **`policy` (string)**: Identifier for the policy. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.ilm.explainLifecycle [_ilm.explain_lifecycle] +Explain the lifecycle state. +Get the current lifecycle status for one or more indices. +For data streams, the API retrieves the current lifecycle status for the stream's backing indices. + +The response indicates when the index entered each lifecycle state, provides the definition of the running phase, and information about any failures. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-explain-lifecycle) + +```ts +client.ilm.explainLifecycle({ index }) +``` + +### Arguments [_arguments_ilm.explain_lifecycle] + +#### Request (object) [_request_ilm.explain_lifecycle] +- **`index` (string)**: List of data streams, indices, and aliases to target. Supports wildcards (`*`). +To target all data streams and indices, use `*` or `_all`. +- **`only_errors` (Optional, boolean)**: Filters the returned indices to only indices that are managed by ILM and are in an error state, either due to an encountering an error while executing the policy, or attempting to use a policy that does not exist. +- **`only_managed` (Optional, boolean)**: Filters the returned indices to only indices that are managed by ILM. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + +## client.ilm.getLifecycle [_ilm.get_lifecycle] +Get lifecycle policies. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-get-lifecycle) + +```ts +client.ilm.getLifecycle({ ... }) +``` + +### Arguments [_arguments_ilm.get_lifecycle] + +#### Request (object) [_request_ilm.get_lifecycle] +- **`policy` (Optional, string)**: Identifier for the policy. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.ilm.getStatus [_ilm.get_status] +Get the ILM status. + +Get the current index lifecycle management status. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-get-status) + +```ts +client.ilm.getStatus() +``` + + +## client.ilm.migrateToDataTiers [_ilm.migrate_to_data_tiers] +Migrate to data tiers routing. +Switch the indices, ILM policies, and legacy, composable, and component templates from using custom node attributes and attribute-based allocation filters to using data tiers. +Optionally, delete one legacy index template. +Using node roles enables ILM to automatically move the indices between data tiers. + +Migrating away from custom node attributes routing can be manually performed. +This API provides an automated way of performing three out of the four manual steps listed in the migration guide: + +1. Stop setting the custom hot attribute on new indices. +1. Remove custom allocation settings from existing ILM policies. +1. Replace custom allocation settings from existing indices with the corresponding tier preference. + +ILM must be stopped before performing the migration. +Use the stop ILM and get ILM status APIs to wait until the reported operation mode is `STOPPED`. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-migrate-to-data-tiers) + +```ts +client.ilm.migrateToDataTiers({ ... }) +``` + +### Arguments [_arguments_ilm.migrate_to_data_tiers] + +#### Request (object) [_request_ilm.migrate_to_data_tiers] +- **`legacy_template_to_delete` (Optional, string)** +- **`node_attribute` (Optional, string)** +- **`dry_run` (Optional, boolean)**: If true, simulates the migration from node attributes based allocation filters to data tiers, but does not perform the migration. +This provides a way to retrieve the indices and ILM policies that need to be migrated. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. + +## client.ilm.moveToStep [_ilm.move_to_step] +Move to a lifecycle step. +Manually move an index into a specific step in the lifecycle policy and run that step. + +WARNING: This operation can result in the loss of data. Manually moving an index into a specific step runs that step even if it has already been performed. This is a potentially destructive action and this should be considered an expert level API. + +You must specify both the current step and the step to be executed in the body of the request. +The request will fail if the current step does not match the step currently running for the index +This is to prevent the index from being moved from an unexpected step into the next step. + +When specifying the target (`next_step`) to which the index will be moved, either the name or both the action and name fields are optional. +If only the phase is specified, the index will move to the first step of the first action in the target phase. +If the phase and action are specified, the index will move to the first step of the specified action in the specified phase. +Only actions specified in the ILM policy are considered valid. +An index cannot move to a step that is not part of its policy. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-move-to-step) + +```ts +client.ilm.moveToStep({ index, current_step, next_step }) +``` + +### Arguments [_arguments_ilm.move_to_step] + +#### Request (object) [_request_ilm.move_to_step] +- **`index` (string)**: The name of the index whose lifecycle step is to change +- **`current_step` ({ action, name, phase })**: The step that the index is expected to be in. +- **`next_step` ({ action, name, phase })**: The step that you want to run. + +## client.ilm.putLifecycle [_ilm.put_lifecycle] +Create or update a lifecycle policy. +If the specified policy exists, it is replaced and the policy version is incremented. + +NOTE: Only the latest version of the policy is stored, you cannot revert to previous versions. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-put-lifecycle) + +```ts +client.ilm.putLifecycle({ policy }) +``` + +### Arguments [_arguments_ilm.put_lifecycle] + +#### Request (object) [_request_ilm.put_lifecycle] +- **`policy` (string)**: Identifier for the policy. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.ilm.removePolicy [_ilm.remove_policy] +Remove policies from an index. +Remove the assigned lifecycle policies from an index or a data stream's backing indices. +It also stops managing the indices. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-remove-policy) + +```ts +client.ilm.removePolicy({ index }) +``` + +### Arguments [_arguments_ilm.remove_policy] + +#### Request (object) [_request_ilm.remove_policy] +- **`index` (string)**: The name of the index to remove policy on + +## client.ilm.retry [_ilm.retry] +Retry a policy. +Retry running the lifecycle policy for an index that is in the ERROR step. +The API sets the policy back to the step where the error occurred and runs the step. +Use the explain lifecycle state API to determine whether an index is in the ERROR step. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-retry) + +```ts +client.ilm.retry({ index }) +``` + +### Arguments [_arguments_ilm.retry] + +#### Request (object) [_request_ilm.retry] +- **`index` (string)**: The name of the indices (comma-separated) whose failed lifecycle step is to be retry + +## client.ilm.start [_ilm.start] +Start the ILM plugin. +Start the index lifecycle management plugin if it is currently stopped. +ILM is started automatically when the cluster is formed. +Restarting ILM is necessary only when it has been stopped using the stop ILM API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-start) + +```ts +client.ilm.start({ ... }) +``` + +### Arguments [_arguments_ilm.start] + +#### Request (object) [_request_ilm.start] +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.ilm.stop [_ilm.stop] +Stop the ILM plugin. +Halt all lifecycle management operations and stop the index lifecycle management plugin. +This is useful when you are performing maintenance on the cluster and need to prevent ILM from performing any actions on your indices. + +The API returns as soon as the stop request has been acknowledged, but the plugin might continue to run until in-progress operations complete and the plugin can be safely stopped. +Use the get ILM status API to check whether ILM is running. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-stop) + +```ts +client.ilm.stop({ ... }) +``` + +### Arguments [_arguments_ilm.stop] + +#### Request (object) [_request_ilm.stop] +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.indices.addBlock [_indices.add_block] +Add an index block. + +Add an index block to an index. +Index blocks limit the operations allowed on an index by blocking specific operation types. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-add-block) + +```ts +client.indices.addBlock({ index, block }) +``` + +### Arguments [_arguments_indices.add_block] + +#### Request (object) [_request_indices.add_block] +- **`index` (string \| string[])**: A list or wildcard expression of index names used to limit the request. +By default, you must explicitly name the indices you are adding blocks to. +To allow the adding of blocks to indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. +You can update this setting in the `elasticsearch.yml` file or by using the cluster update settings API. +- **`block` (Enum("metadata" \| "read" \| "read_only" \| "write"))**: The block type to add to the index. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: The type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +It supports a list of values, such as `open,hidden`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. +If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. +It can also be set to `-1` to indicate that the request should never timeout. + +## client.indices.analyze [_indices.analyze] +Get tokens from text analysis. +The analyze API performs analysis on a text string and returns the resulting tokens. + +Generating excessive amount of tokens may cause a node to run out of memory. +The `index.analyze.max_token_count` setting enables you to limit the number of tokens that can be produced. +If more than this limit of tokens gets generated, an error occurs. +The `_analyze` endpoint without a specified index will always use `10000` as its limit. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-analyze) + +```ts +client.indices.analyze({ ... }) +``` + +### Arguments [_arguments_indices.analyze] + +#### Request (object) [_request_indices.analyze] +- **`index` (Optional, string)**: Index used to derive the analyzer. +If specified, the `analyzer` or field parameter overrides this value. +If no index is specified or the index does not have a default analyzer, the analyze API uses the standard analyzer. +- **`analyzer` (Optional, string)**: The name of the analyzer that should be applied to the provided `text`. +This could be a built-in analyzer, or an analyzer that’s been configured in the index. +- **`attributes` (Optional, string[])**: Array of token attributes used to filter the output of the `explain` parameter. +- **`char_filter` (Optional, string \| { type, escaped_tags } \| { type, mappings, mappings_path } \| { type, flags, pattern, replacement } \| { type, mode, name, unicode_set_filter } \| { type, normalize_kana, normalize_kanji }[])**: Array of character filters used to preprocess characters before the tokenizer. +- **`explain` (Optional, boolean)**: If `true`, the response includes token attributes and additional details. +- **`field` (Optional, string)**: Field used to derive the analyzer. +To use this parameter, you must specify an index. +If specified, the `analyzer` parameter overrides this value. +- **`filter` (Optional, string \| { type } \| { type } \| { type } \| { type, preserve_original } \| { type } \| { type } \| { type, ignored_scripts, output_unigrams } \| { type } \| { type } \| { type, common_words, common_words_path, ignore_case, query_mode } \| { type, filter, script } \| { type } \| { type } \| { type, delimiter, encoding } \| { type } \| { type, max_gram, min_gram, side, preserve_original } \| { type, articles, articles_path, articles_case } \| { type, max_output_size, separator } \| { type } \| { type } \| { type } \| { type } \| { type } \| { type, dedup, dictionary, locale, longest_only } \| { type, hyphenation_patterns_path, no_sub_matches, no_overlapping_matches } \| { type } \| { type, mode, types } \| { type, keep_words, keep_words_case, keep_words_path } \| { type, ignore_case, keywords, keywords_path, keywords_pattern } \| { type } \| { type } \| { type, max, min } \| { type, consume_all_tokens, max_token_count } \| { type, language } \| { type, bucket_count, hash_count, hash_set_size, with_rotation } \| { type, filters, preserve_original } \| { type, max_gram, min_gram, preserve_original } \| { type, stoptags } \| { type, patterns, preserve_original } \| { type, all, flags, pattern, replacement } \| { type } \| { type } \| { type } \| { type, script } \| { type } \| { type } \| { type } \| { type } \| { type } \| { type } \| { type, filler_token, max_shingle_size, min_shingle_size, output_unigrams, output_unigrams_if_no_shingles, token_separator } \| { type, language } \| { type } \| { type, rules, rules_path } \| { type, language } \| { type, ignore_case, remove_trailing, stopwords, stopwords_path } \| { type } \| { type } \| { type } \| { type, length } \| { type, only_on_same_position } \| { type } \| { type, adjust_offsets, ignore_keywords } \| { type } \| { type, stopwords } \| { type, minimum_length } \| { type, use_romaji } \| { type, stoptags } \| { type, alternate, caseFirst, caseLevel, country, decomposition, hiraganaQuaternaryMode, language, numeric, rules, strength, variableTop, variant } \| { type, unicode_set_filter } \| { type, name } \| { type, dir, id } \| { type, encoder, languageset, max_code_len, name_type, replace, rule_type } \| { type }[])**: Array of token filters used to apply after the tokenizer. +- **`normalizer` (Optional, string)**: Normalizer to use to convert text into a single token. +- **`text` (Optional, string \| string[])**: Text to analyze. +If an array of strings is provided, it is analyzed as a multi-value field. +- **`tokenizer` (Optional, string \| { type, tokenize_on_chars, max_token_length } \| { type, max_token_length } \| { type, custom_token_chars, max_gram, min_gram, token_chars } \| { type, buffer_size } \| { type } \| { type } \| { type, custom_token_chars, max_gram, min_gram, token_chars } \| { type, buffer_size, delimiter, replacement, reverse, skip } \| { type, flags, group, pattern } \| { type, pattern } \| { type, pattern } \| { type, max_token_length } \| { type } \| { type, max_token_length } \| { type, max_token_length } \| { type, rule_files } \| { type, discard_punctuation, mode, nbest_cost, nbest_examples, user_dictionary, user_dictionary_rules, discard_compound_token } \| { type, decompound_mode, discard_punctuation, user_dictionary, user_dictionary_rules })**: Tokenizer to use to convert text into tokens. + +## client.indices.cancelMigrateReindex [_indices.cancel_migrate_reindex] +Cancel a migration reindex operation. + +Cancel a migration reindex attempt for a data stream or index. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-cancel-migrate-reindex) + +```ts +client.indices.cancelMigrateReindex({ index }) +``` + +### Arguments [_arguments_indices.cancel_migrate_reindex] + +#### Request (object) [_request_indices.cancel_migrate_reindex] +- **`index` (string \| string[])**: The index or data stream name + +## client.indices.clearCache [_indices.clear_cache] +Clear the cache. +Clear the cache of one or more indices. +For data streams, the API clears the caches of the stream's backing indices. + +By default, the clear cache API clears all caches. +To clear only specific caches, use the `fielddata`, `query`, or `request` parameters. +To clear the cache only of specific fields, use the `fields` parameter. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-clear-cache) + +```ts +client.indices.clearCache({ ... }) +``` + +### Arguments [_arguments_indices.clear_cache] + +#### Request (object) [_request_indices.clear_cache] +- **`index` (Optional, string \| string[])**: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +- **`fielddata` (Optional, boolean)**: If `true`, clears the fields cache. +Use the `fields` parameter to clear the cache of specific fields only. +- **`fields` (Optional, string \| string[])**: List of field names used to limit the `fielddata` parameter. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`query` (Optional, boolean)**: If `true`, clears the query cache. +- **`request` (Optional, boolean)**: If `true`, clears the request cache. + +## client.indices.clone [_indices.clone] +Clone an index. +Clone an existing index into a new index. +Each original primary shard is cloned into a new primary shard in the new index. + +IMPORTANT: Elasticsearch does not apply index templates to the resulting index. +The API also does not copy index metadata from the original index. +Index metadata includes aliases, index lifecycle management phase definitions, and cross-cluster replication (CCR) follower information. +For example, if you clone a CCR follower index, the resulting clone will not be a follower index. + +The clone API copies most index settings from the source index to the resulting index, with the exception of `index.number_of_replicas` and `index.auto_expand_replicas`. +To set the number of replicas in the resulting index, configure these settings in the clone request. + +Cloning works as follows: + +* First, it creates a new target index with the same definition as the source index. +* Then it hard-links segments from the source index into the target index. If the file system does not support hard-linking, all segments are copied into the new index, which is a much more time consuming process. +* Finally, it recovers the target index as though it were a closed index which had just been re-opened. + +IMPORTANT: Indices can only be cloned if they meet the following requirements: + +* The index must be marked as read-only and have a cluster health status of green. +* The target index must not exist. +* The source index must have the same number of primary shards as the target index. +* The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing index. + +The current write index on a data stream cannot be cloned. +In order to clone the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be cloned. + +NOTE: Mappings cannot be specified in the `_clone` request. The mappings of the source index will be used for the target index. + +**Monitor the cloning process** + +The cloning process can be monitored with the cat recovery API or the cluster health API can be used to wait until all primary shards have been allocated by setting the `wait_for_status` parameter to `yellow`. + +The `_clone` API returns as soon as the target index has been added to the cluster state, before any shards have been allocated. +At this point, all shards are in the state unassigned. +If, for any reason, the target index can't be allocated, its primary shard will remain unassigned until it can be allocated on that node. + +Once the primary shard is allocated, it moves to state initializing, and the clone process begins. +When the clone operation completes, the shard will become active. +At that point, Elasticsearch will try to allocate any replicas and may decide to relocate the primary shard to another node. + +**Wait for active shards** + +Because the clone operation creates a new index to clone the shards to, the wait for active shards setting on index creation applies to the clone index action as well. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-clone) + +```ts +client.indices.clone({ index, target }) +``` + +### Arguments [_arguments_indices.clone] + +#### Request (object) [_request_indices.clone] +- **`index` (string)**: Name of the source index to clone. +- **`target` (string)**: Name of the target index to create. +- **`aliases` (Optional, Record)**: Aliases for the resulting index. +- **`settings` (Optional, Record)**: Configuration options for the target index. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. +Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + +## client.indices.close [_indices.close] +Close an index. +A closed index is blocked for read or write operations and does not allow all operations that opened indices allow. +It is not possible to index documents or to search for documents in a closed index. +Closed indices do not have to maintain internal data structures for indexing or searching documents, which results in a smaller overhead on the cluster. + +When opening or closing an index, the master node is responsible for restarting the index shards to reflect the new state of the index. +The shards will then go through the normal recovery process. +The data of opened and closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times. + +You can open and close multiple indices. +An error is thrown if the request explicitly refers to a missing index. +This behaviour can be turned off using the `ignore_unavailable=true` parameter. + +By default, you must explicitly name the indices you are opening or closing. +To open or close indices with `_all`, `*`, or other wildcard expressions, change the` action.destructive_requires_name` setting to `false`. This setting can also be changed with the cluster update settings API. + +Closed indices consume a significant amount of disk-space which can cause problems in managed environments. +Closing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-close) + +```ts +client.indices.close({ index }) +``` + +### Arguments [_arguments_indices.close] + +#### Request (object) [_request_indices.close] +- **`index` (string \| string[])**: List or wildcard expression of index names used to limit the request. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. +Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + +## client.indices.create [_indices.create] +Create an index. +You can use the create index API to add a new index to an Elasticsearch cluster. +When creating an index, you can specify the following: + +* Settings for the index. +* Mappings for fields in the index. +* Index aliases + +**Wait for active shards** + +By default, index creation will only return a response to the client when the primary copies of each shard have been started, or the request times out. +The index creation response will indicate what happened. +For example, `acknowledged` indicates whether the index was successfully created in the cluster, `while shards_acknowledged` indicates whether the requisite number of shard copies were started for each shard in the index before timing out. +Note that it is still possible for either `acknowledged` or `shards_acknowledged` to be `false`, but for the index creation to be successful. +These values simply indicate whether the operation completed before the timeout. +If `acknowledged` is false, the request timed out before the cluster state was updated with the newly created index, but it probably will be created sometime soon. +If `shards_acknowledged` is false, then the request timed out before the requisite number of shards were started (by default just the primaries), even if the cluster state was successfully updated to reflect the newly created index (that is to say, `acknowledged` is `true`). + +You can change the default of only waiting for the primary shards to start through the index setting `index.write.wait_for_active_shards`. +Note that changing this setting will also affect the `wait_for_active_shards` value on all subsequent write operations. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create) + +```ts +client.indices.create({ index }) +``` + +### Arguments [_arguments_indices.create] + +#### Request (object) [_request_indices.create] +- **`index` (string)**: Name of the index you wish to create. +Index names must meet the following criteria: + +* Lowercase only +* Cannot include `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, ` ` (space character), `,`, or `#` +* Indices prior to 7.0 could contain a colon (`:`), but that has been deprecated and will not be supported in later versions +* Cannot start with `-`, `_`, or `+` +* Cannot be `.` or `..` +* Cannot be longer than 255 bytes (note thtat it is bytes, so multi-byte characters will reach the limit faster) +* Names starting with `.` are deprecated, except for hidden indices and internal indices managed by plugins +- **`aliases` (Optional, Record)**: Aliases for the index. +- **`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })**: Mapping for fields in the index. If specified, this mapping can include: +- Field names +- Field data types +- Mapping parameters +- **`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })**: Configuration options for the index. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. +Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + +## client.indices.createDataStream [_indices.create_data_stream] +Create a data stream. + +You must have a matching index template with data stream enabled. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create-data-stream) + +```ts +client.indices.createDataStream({ name }) +``` + +### Arguments [_arguments_indices.create_data_stream] + +#### Request (object) [_request_indices.create_data_stream] +- **`name` (string)**: Name of the data stream, which must meet the following criteria: +Lowercase only; +Cannot include `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, `#`, `:`, or a space character; +Cannot start with `-`, `_`, `+`, or `.ds-`; +Cannot be `.` or `..`; +Cannot be longer than 255 bytes. Multi-byte characters count towards this limit faster. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.indices.createFrom [_indices.create_from] +Create an index from a source index. + +Copy the mappings and settings from the source index to a destination index while allowing request settings and mappings to override the source values. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create-from) + +```ts +client.indices.createFrom({ source, dest }) +``` + +### Arguments [_arguments_indices.create_from] + +#### Request (object) [_request_indices.create_from] +- **`source` (string)**: The source index or data stream name +- **`dest` (string)**: The destination index or data stream name +- **`create_from` (Optional, { mappings_override, settings_override, remove_index_blocks })** + +## client.indices.dataStreamsStats [_indices.data_streams_stats] +Get data stream stats. + +Get statistics for one or more data streams. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-data-streams-stats-1) + +```ts +client.indices.dataStreamsStats({ ... }) +``` + +### Arguments [_arguments_indices.data_streams_stats] + +#### Request (object) [_request_indices.data_streams_stats] +- **`name` (Optional, string \| string[])**: List of data streams used to limit the request. +Wildcard expressions (`*`) are supported. +To target all data streams in a cluster, omit this parameter or use `*`. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of data stream that wildcard patterns can match. +Supports a list of values, such as `open,hidden`. + +## client.indices.delete [_indices.delete] +Delete indices. +Deleting an index deletes its documents, shards, and metadata. +It does not delete related Kibana components, such as data views, visualizations, or dashboards. + +You cannot delete the current write index of a data stream. +To delete the index, you must roll over the data stream so a new write index is created. +You can then use the delete index API to delete the previous write index. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete) + +```ts +client.indices.delete({ index }) +``` + +### Arguments [_arguments_indices.delete] + +#### Request (object) [_request_indices.delete] +- **`index` (string \| string[])**: List of indices to delete. +You cannot specify index aliases. +By default, this parameter does not support wildcards (`*`) or `_all`. +To use wildcards or `_all`, set the `action.destructive_requires_name` cluster setting to `false`. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.indices.deleteAlias [_indices.delete_alias] +Delete an alias. +Removes a data stream or index from an alias. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-alias) + +```ts +client.indices.deleteAlias({ index, name }) +``` + +### Arguments [_arguments_indices.delete_alias] + +#### Request (object) [_request_indices.delete_alias] +- **`index` (string \| string[])**: List of data streams or indices used to limit the request. +Supports wildcards (`*`). +- **`name` (string \| string[])**: List of aliases to remove. +Supports wildcards (`*`). To remove all aliases, use `*` or `_all`. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.indices.deleteDataLifecycle [_indices.delete_data_lifecycle] +Delete data stream lifecycles. +Removes the data stream lifecycle from a data stream, rendering it not managed by the data stream lifecycle. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-data-lifecycle) + +```ts +client.indices.deleteDataLifecycle({ name }) +``` + +### Arguments [_arguments_indices.delete_data_lifecycle] + +#### Request (object) [_request_indices.delete_data_lifecycle] +- **`name` (string \| string[])**: A list of data streams of which the data stream lifecycle will be deleted; use `*` to get all data streams +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Whether wildcard expressions should get expanded to open or closed indices (default: open) +- **`master_timeout` (Optional, string \| -1 \| 0)**: Specify timeout for connection to master +- **`timeout` (Optional, string \| -1 \| 0)**: Explicit timestamp for the document + +## client.indices.deleteDataStream [_indices.delete_data_stream] +Delete data streams. +Deletes one or more data streams and their backing indices. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-data-stream) + +```ts +client.indices.deleteDataStream({ name }) +``` + +### Arguments [_arguments_indices.delete_data_stream] + +#### Request (object) [_request_indices.delete_data_stream] +- **`name` (string \| string[])**: List of data streams to delete. Wildcard (`*`) expressions are supported. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of data stream that wildcard patterns can match. Supports a list of values,such as `open,hidden`. + +## client.indices.deleteDataStreamOptions [_indices.delete_data_stream_options] +Delete data stream options. +Removes the data stream options from a data stream. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-data-stream-options) + +```ts +client.indices.deleteDataStreamOptions({ name }) +``` + +### Arguments [_arguments_indices.delete_data_stream_options] + +#### Request (object) [_request_indices.delete_data_stream_options] +- **`name` (string \| string[])**: A list of data streams of which the data stream options will be deleted; use `*` to get all data streams +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Whether wildcard expressions should get expanded to open or closed indices (default: open) +- **`master_timeout` (Optional, string \| -1 \| 0)**: Specify timeout for connection to master +- **`timeout` (Optional, string \| -1 \| 0)**: Explicit timestamp for the document + +## client.indices.deleteIndexTemplate [_indices.delete_index_template] +Delete an index template. +The provided may contain multiple template names separated by a comma. If multiple template +names are specified then there is no wildcard support and the provided names should match completely with +existing templates. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-index-template) + +```ts +client.indices.deleteIndexTemplate({ name }) +``` + +### Arguments [_arguments_indices.delete_index_template] + +#### Request (object) [_request_indices.delete_index_template] +- **`name` (string \| string[])**: List of index template names used to limit the request. Wildcard (*) expressions are supported. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.indices.deleteTemplate [_indices.delete_template] +Delete a legacy index template. +IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-template) + +```ts +client.indices.deleteTemplate({ name }) +``` + +### Arguments [_arguments_indices.delete_template] + +#### Request (object) [_request_indices.delete_template] +- **`name` (string)**: The name of the legacy index template to delete. +Wildcard (`*`) expressions are supported. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.indices.diskUsage [_indices.disk_usage] +Analyze the index disk usage. +Analyze the disk usage of each field of an index or data stream. +This API might not support indices created in previous Elasticsearch versions. +The result of a small index can be inaccurate as some parts of an index might not be analyzed by the API. + +NOTE: The total size of fields of the analyzed shards of the index in the response is usually smaller than the index `store_size` value because some small metadata files are ignored and some parts of data files might not be scanned by the API. +Since stored fields are stored together in a compressed format, the sizes of stored fields are also estimates and can be inaccurate. +The stored size of the `_id` field is likely underestimated while the `_source` field is overestimated. + +For usage examples see the External documentation or refer to [Analyze the index disk usage example](https://www.elastic.co/docs/reference/elasticsearch/rest-apis/index-disk-usage) for an example. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-disk-usage) + +```ts +client.indices.diskUsage({ index }) +``` + +### Arguments [_arguments_indices.disk_usage] + +#### Request (object) [_request_indices.disk_usage] +- **`index` (string \| string[])**: List of data streams, indices, and aliases used to limit the request. +It’s recommended to execute this API with a single index (or the latest backing index of a data stream) as the API consumes resources significantly. +- **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +- **`flush` (Optional, boolean)**: If `true`, the API performs a flush before analysis. +If `false`, the response may not include uncommitted data. +- **`ignore_unavailable` (Optional, boolean)**: If `true`, missing or closed indices are not included in the response. +- **`run_expensive_tasks` (Optional, boolean)**: Analyzing field disk usage is resource-intensive. +To use the API, this parameter must be set to `true`. + +## client.indices.downsample [_indices.downsample] +Downsample an index. +Downsamples a time series (TSDS) index and reduces its size by keeping the last value or by pre-aggregating metrics: + +- When running in `aggregate` mode, it pre-calculates and stores statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) +for each metric field grouped by a configured time interval and their dimensions. +- When running in `last_value` mode, it keeps the last value for each metric in the configured interval and their dimensions. + +For example, a TSDS index that contains metrics sampled every 10 seconds can be downsampled to an hourly index. +All documents within an hour interval are summarized and stored as a single document in the downsample index. + +NOTE: Only indices in a time series data stream are supported. +Neither field nor document level security can be defined on the source index. +The source index must be read-only (`index.blocks.write: true`). + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-downsample) + +```ts +client.indices.downsample({ index, target_index }) +``` + +### Arguments [_arguments_indices.downsample] + +#### Request (object) [_request_indices.downsample] +- **`index` (string)**: Name of the time series index to downsample. +- **`target_index` (string)**: Name of the index to create. +- **`config` (Optional, { fixed_interval, sampling_method })** + +## client.indices.exists [_indices.exists] +Check indices. +Check if one or more indices, index aliases, or data streams exist. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists) + +```ts +client.indices.exists({ index }) +``` + +### Arguments [_arguments_indices.exists] + +#### Request (object) [_request_indices.exists] +- **`index` (string \| string[])**: List of data streams, indices, and aliases. Supports wildcards (`*`). +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +- **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`include_defaults` (Optional, boolean)**: If `true`, return all default settings in the response. +- **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. + +## client.indices.existsAlias [_indices.exists_alias] +Check aliases. + +Check if one or more data stream or index aliases exist. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-alias) + +```ts +client.indices.existsAlias({ name }) +``` + +### Arguments [_arguments_indices.exists_alias] + +#### Request (object) [_request_indices.exists_alias] +- **`name` (string \| string[])**: List of aliases to check. Supports wildcards (`*`). +- **`index` (Optional, string \| string[])**: List of data streams or indices used to limit the request. Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, requests that include a missing data stream or index in the target indices or data streams return an error. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.indices.existsIndexTemplate [_indices.exists_index_template] +Check index templates. + +Check whether index templates exist. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-index-template) + +```ts +client.indices.existsIndexTemplate({ name }) +``` + +### Arguments [_arguments_indices.exists_index_template] + +#### Request (object) [_request_indices.exists_index_template] +- **`name` (string)**: List of index template names used to limit the request. Wildcard (*) expressions are supported. +- **`local` (Optional, boolean)**: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. +- **`flat_settings` (Optional, boolean)**: If true, returns settings in flat format. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + +## client.indices.existsTemplate [_indices.exists_template] +Check existence of index templates. +Get information about whether index templates exist. +Index templates define settings, mappings, and aliases that can be applied automatically to new indices. + +IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-template) + +```ts +client.indices.existsTemplate({ name }) +``` + +### Arguments [_arguments_indices.exists_template] + +#### Request (object) [_request_indices.exists_template] +- **`name` (string \| string[])**: A list of index template names used to limit the request. +Wildcard (`*`) expressions are supported. +- **`flat_settings` (Optional, boolean)**: Indicates whether to use a flat format for the response. +- **`local` (Optional, boolean)**: Indicates whether to get information from the local node only. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. + +## client.indices.explainDataLifecycle [_indices.explain_data_lifecycle] +Get the status for a data stream lifecycle. +Get information about an index or data stream's current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-explain-data-lifecycle) + +```ts +client.indices.explainDataLifecycle({ index }) +``` + +### Arguments [_arguments_indices.explain_data_lifecycle] + +#### Request (object) [_request_indices.explain_data_lifecycle] +- **`index` (string \| string[])**: The name of the index to explain +- **`include_defaults` (Optional, boolean)**: indicates if the API should return the default values the system uses for the index's lifecycle +- **`master_timeout` (Optional, string \| -1 \| 0)**: Specify timeout for connection to master + +## client.indices.fieldUsageStats [_indices.field_usage_stats] +Get field usage stats. +Get field usage information for each shard and field of an index. +Field usage statistics are automatically captured when queries are running on a cluster. +A shard-level search request that accesses a given field, even if multiple times during that request, is counted as a single use. + +The response body reports the per-shard usage count of the data structures that back the fields in the index. +A given request will increment each count by a maximum value of 1, even if the request accesses the same field multiple times. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-field-usage-stats) + +```ts +client.indices.fieldUsageStats({ index }) +``` + +### Arguments [_arguments_indices.field_usage_stats] + +#### Request (object) [_request_indices.field_usage_stats] +- **`index` (string \| string[])**: List or wildcard expression of index names used to limit the request. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +- **`ignore_unavailable` (Optional, boolean)**: If `true`, missing or closed indices are not included in the response. +- **`fields` (Optional, string \| string[])**: List or wildcard expressions of fields to include in the statistics. + +## client.indices.flush [_indices.flush] +Flush data streams or indices. +Flushing a data stream or index is the process of making sure that any data that is currently only stored in the transaction log is also permanently stored in the Lucene index. +When restarting, Elasticsearch replays any unflushed operations from the transaction log into the Lucene index to bring it back into the state that it was in before the restart. +Elasticsearch automatically triggers flushes as needed, using heuristics that trade off the size of the unflushed transaction log against the cost of performing each flush. + +After each operation has been flushed it is permanently stored in the Lucene index. +This may mean that there is no need to maintain an additional copy of it in the transaction log. +The transaction log is made up of multiple files, called generations, and Elasticsearch will delete any generation files when they are no longer needed, freeing up disk space. + +It is also possible to trigger a flush on one or more indices using the flush API, although it is rare for users to need to call this API directly. +If you call the flush API after indexing some documents then a successful response indicates that Elasticsearch has flushed all the documents that were indexed before the flush API was called. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-flush) + +```ts +client.indices.flush({ ... }) +``` + +### Arguments [_arguments_indices.flush] + +#### Request (object) [_request_indices.flush] +- **`index` (Optional, string \| string[])**: List of data streams, indices, and aliases to flush. +Supports wildcards (`*`). +To flush all data streams and indices, omit this parameter or use `*` or `_all`. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +- **`force` (Optional, boolean)**: If `true`, the request forces a flush even if there are no changes to commit to the index. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`wait_if_ongoing` (Optional, boolean)**: If `true`, the flush operation blocks until execution when another flush operation is running. +If `false`, Elasticsearch returns an error if you request a flush when another flush operation is running. + +## client.indices.forcemerge [_indices.forcemerge] +Force a merge. +Perform the force merge operation on the shards of one or more indices. +For data streams, the API forces a merge on the shards of the stream's backing indices. + +Merging reduces the number of segments in each shard by merging some of them together and also frees up the space used by deleted documents. +Merging normally happens automatically, but sometimes it is useful to trigger a merge manually. + +WARNING: We recommend force merging only a read-only index (meaning the index is no longer receiving writes). +When documents are updated or deleted, the old version is not immediately removed but instead soft-deleted and marked with a "tombstone". +These soft-deleted documents are automatically cleaned up during regular segment merges. +But force merge can cause very large (greater than 5 GB) segments to be produced, which are not eligible for regular merges. +So the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance. +If you regularly force merge an index receiving writes, this can also make snapshots more expensive, since the new documents can't be backed up incrementally. + +**Blocks during a force merge** + +Calls to this API block until the merge is complete (unless request contains `wait_for_completion=false`). +If the client connection is lost before completion then the force merge process will continue in the background. +Any new requests to force merge the same indices will also block until the ongoing force merge is complete. + +**Running force merge asynchronously** + +If the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to get the status of the task. +However, you can not cancel this task as the force merge task is not cancelable. +Elasticsearch creates a record of this task as a document at `_tasks/`. +When you are done with a task, you should delete the task document so Elasticsearch can reclaim the space. + +**Force merging multiple indices** + +You can force merge multiple indices with a single request by targeting: + +* One or more data streams that contain multiple backing indices +* Multiple indices +* One or more aliases +* All data streams and indices in a cluster + +Each targeted shard is force-merged separately using the force_merge threadpool. +By default each node only has a single `force_merge` thread which means that the shards on that node are force-merged one at a time. +If you expand the `force_merge` threadpool on a node then it will force merge its shards in parallel + +Force merge makes the storage for the shard being merged temporarily increase, as it may require free space up to triple its size in case `max_num_segments parameter` is set to `1`, to rewrite all segments into a new one. + +**Data streams and time-based indices** + +Force-merging is useful for managing a data stream's older backing indices and other time-based indices, particularly after a rollover. +In these cases, each index only receives indexing traffic for a certain period of time. +Once an index receive no more writes, its shards can be force-merged to a single segment. +This can be a good idea because single-segment shards can sometimes use simpler and more efficient data structures to perform searches. +For example: + +``` +POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1 +``` + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-forcemerge) + +```ts +client.indices.forcemerge({ ... }) +``` + +### Arguments [_arguments_indices.forcemerge] + +#### Request (object) [_request_indices.forcemerge] +- **`index` (Optional, string \| string[])**: A list of index names; use `_all` or empty string to perform the operation on all indices +- **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. +- **`flush` (Optional, boolean)**: Specify whether the index should be flushed after performing the operation (default: true) +- **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) +- **`max_num_segments` (Optional, number)**: The number of segments the index should be merged into (default: dynamic) +- **`only_expunge_deletes` (Optional, boolean)**: Specify whether the operation should only expunge deleted documents +- **`wait_for_completion` (Optional, boolean)**: Should the request wait until the force merge is completed. + +## client.indices.get [_indices.get] +Get index information. +Get information about one or more indices. For data streams, the API returns information about the +stream’s backing indices. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get) + +```ts +client.indices.get({ index }) +``` + +### Arguments [_arguments_indices.get] + +#### Request (object) [_request_indices.get] +- **`index` (string \| string[])**: List of data streams, indices, and index aliases used to limit the request. +Wildcard expressions (*) are supported. +- **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only +missing or closed indices. This behavior applies even if the request targets other open indices. For example, +a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard expressions can match. If the request can target data streams, this argument +determines whether wildcard expressions match hidden data streams. Supports a list of values, +such as open,hidden. +- **`flat_settings` (Optional, boolean)**: If true, returns settings in flat format. +- **`ignore_unavailable` (Optional, boolean)**: If false, requests that target a missing index return an error. +- **`include_defaults` (Optional, boolean)**: If true, return all default settings in the response. +- **`local` (Optional, boolean)**: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`features` (Optional, { name, description } \| { name, description }[])**: Return only information on specified index features + +## client.indices.getAlias [_indices.get_alias] +Get aliases. +Retrieves information for one or more data stream or index aliases. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-alias) + +```ts +client.indices.getAlias({ ... }) +``` + +### Arguments [_arguments_indices.get_alias] + +#### Request (object) [_request_indices.get_alias] +- **`name` (Optional, string \| string[])**: List of aliases to retrieve. +Supports wildcards (`*`). +To retrieve all aliases, omit this parameter or use `*` or `_all`. +- **`index` (Optional, string \| string[])**: List of data streams or indices used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.indices.getDataLifecycle [_indices.get_data_lifecycle] +Get data stream lifecycles. + +Get the data stream lifecycle configuration of one or more data streams. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-lifecycle) + +```ts +client.indices.getDataLifecycle({ name }) +``` + +### Arguments [_arguments_indices.get_data_lifecycle] + +#### Request (object) [_request_indices.get_data_lifecycle] +- **`name` (string \| string[])**: List of data streams to limit the request. +Supports wildcards (`*`). +To target all data streams, omit this parameter or use `*` or `_all`. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of data stream that wildcard patterns can match. +Supports a list of values, such as `open,hidden`. +- **`include_defaults` (Optional, boolean)**: If `true`, return all default settings in the response. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + +## client.indices.getDataLifecycleStats [_indices.get_data_lifecycle_stats] +Get data stream lifecycle stats. +Get statistics about the data streams that are managed by a data stream lifecycle. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-lifecycle-stats) + +```ts +client.indices.getDataLifecycleStats() +``` + + +## client.indices.getDataStream [_indices.get_data_stream] +Get data streams. + +Get information about one or more data streams. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-stream) + +```ts +client.indices.getDataStream({ ... }) +``` + +### Arguments [_arguments_indices.get_data_stream] + +#### Request (object) [_request_indices.get_data_stream] +- **`name` (Optional, string \| string[])**: List of data stream names used to limit the request. +Wildcard (`*`) expressions are supported. If omitted, all data streams are returned. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of data stream that wildcard patterns can match. +Supports a list of values, such as `open,hidden`. +- **`include_defaults` (Optional, boolean)**: If true, returns all relevant default configurations for the index template. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`verbose` (Optional, boolean)**: Whether the maximum timestamp for each data stream should be calculated and returned. + +## client.indices.getDataStreamMappings [_indices.get_data_stream_mappings] +Get data stream mappings. + +Get mapping information for one or more data streams. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-stream-mappings) + +```ts +client.indices.getDataStreamMappings({ name }) +``` + +### Arguments [_arguments_indices.get_data_stream_mappings] + +#### Request (object) [_request_indices.get_data_stream_mappings] +- **`name` (string \| string[])**: A list of data streams or data stream patterns. Supports wildcards (`*`). +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If no response is +received before the timeout expires, the request fails and returns an +error. + +## client.indices.getDataStreamOptions [_indices.get_data_stream_options] +Get data stream options. + +Get the data stream options configuration of one or more data streams. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-stream-options) + +```ts +client.indices.getDataStreamOptions({ name }) +``` + +### Arguments [_arguments_indices.get_data_stream_options] + +#### Request (object) [_request_indices.get_data_stream_options] +- **`name` (string \| string[])**: List of data streams to limit the request. +Supports wildcards (`*`). +To target all data streams, omit this parameter or use `*` or `_all`. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of data stream that wildcard patterns can match. +Supports a list of values, such as `open,hidden`. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + +## client.indices.getDataStreamSettings [_indices.get_data_stream_settings] +Get data stream settings. + +Get setting information for one or more data streams. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-stream-settings) + +```ts +client.indices.getDataStreamSettings({ name }) +``` + +### Arguments [_arguments_indices.get_data_stream_settings] + +#### Request (object) [_request_indices.get_data_stream_settings] +- **`name` (string \| string[])**: A list of data streams or data stream patterns. Supports wildcards (`*`). +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If no response is +received before the timeout expires, the request fails and returns an +error. + +## client.indices.getFieldMapping [_indices.get_field_mapping] +Get mapping definitions. +Retrieves mapping definitions for one or more fields. +For data streams, the API retrieves field mappings for the stream’s backing indices. + +This API is useful if you don't need a complete mapping or if an index mapping contains a large number of fields. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping) + +```ts +client.indices.getFieldMapping({ fields }) +``` + +### Arguments [_arguments_indices.get_field_mapping] + +#### Request (object) [_request_indices.get_field_mapping] +- **`fields` (string \| string[])**: List or wildcard expression of fields used to limit returned information. +Supports wildcards (`*`). +- **`index` (Optional, string \| string[])**: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`include_defaults` (Optional, boolean)**: If `true`, return all default settings in the response. + +## client.indices.getIndexTemplate [_indices.get_index_template] +Get index templates. +Get information about one or more index templates. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-index-template) + +```ts +client.indices.getIndexTemplate({ ... }) +``` + +### Arguments [_arguments_indices.get_index_template] + +#### Request (object) [_request_indices.get_index_template] +- **`name` (Optional, string)**: Name of index template to retrieve. Wildcard (*) expressions are supported. +- **`local` (Optional, boolean)**: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. +- **`flat_settings` (Optional, boolean)**: If true, returns settings in flat format. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`include_defaults` (Optional, boolean)**: If true, returns all relevant default configurations for the index template. + +## client.indices.getMapping [_indices.get_mapping] +Get mapping definitions. +For data streams, the API retrieves mappings for the stream’s backing indices. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping) + +```ts +client.indices.getMapping({ ... }) +``` + +### Arguments [_arguments_indices.get_mapping] + +#### Request (object) [_request_indices.get_mapping] +- **`index` (Optional, string \| string[])**: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.indices.getMigrateReindexStatus [_indices.get_migrate_reindex_status] +Get the migration reindexing status. + +Get the status of a migration reindex attempt for a data stream or index. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-migration) + +```ts +client.indices.getMigrateReindexStatus({ index }) +``` + +### Arguments [_arguments_indices.get_migrate_reindex_status] + +#### Request (object) [_request_indices.get_migrate_reindex_status] +- **`index` (string \| string[])**: The index or data stream name. + +## client.indices.getSettings [_indices.get_settings] +Get index settings. +Get setting information for one or more indices. +For data streams, it returns setting information for the stream's backing indices. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-settings) + +```ts +client.indices.getSettings({ ... }) +``` + +### Arguments [_arguments_indices.get_settings] + +#### Request (object) [_request_indices.get_settings] +- **`index` (Optional, string \| string[])**: List of data streams, indices, and aliases used to limit +the request. Supports wildcards (`*`). To target all data streams and +indices, omit this parameter or use `*` or `_all`. +- **`name` (Optional, string \| string[])**: List or wildcard expression of settings to retrieve. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index +alias, or `_all` value targets only missing or closed indices. This +behavior applies even if the request targets other open indices. For +example, a request targeting `foo*,bar*` returns an error if an index +starts with foo but no index starts with `bar`. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +- **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`include_defaults` (Optional, boolean)**: If `true`, return all default settings in the response. +- **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. If +`false`, information is retrieved from the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is +received before the timeout expires, the request fails and returns an +error. + +## client.indices.getTemplate [_indices.get_template] +Get legacy index templates. +Get information about one or more index templates. + +IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-template) + +```ts +client.indices.getTemplate({ ... }) +``` + +### Arguments [_arguments_indices.get_template] + +#### Request (object) [_request_indices.get_template] +- **`name` (Optional, string \| string[])**: List of index template names used to limit the request. +Wildcard (`*`) expressions are supported. +To return all index templates, omit this parameter or use a value of `_all` or `*`. +- **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. +- **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.indices.migrateReindex [_indices.migrate_reindex] +Reindex legacy backing indices. + +Reindex all legacy backing indices for a data stream. +This operation occurs in a persistent task. +The persistent task ID is returned immediately and the reindexing work is completed in that task. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-migrate-reindex) + +```ts +client.indices.migrateReindex({ ... }) +``` + +### Arguments [_arguments_indices.migrate_reindex] + +#### Request (object) [_request_indices.migrate_reindex] +- **`reindex` (Optional, { mode, source })** + +## client.indices.migrateToDataStream [_indices.migrate_to_data_stream] +Convert an index alias to a data stream. +Converts an index alias to a data stream. +You must have a matching index template that is data stream enabled. +The alias must meet the following criteria: +The alias must have a write index; +All indices for the alias must have a `@timestamp` field mapping of a `date` or `date_nanos` field type; +The alias must not have any filters; +The alias must not use custom routing. +If successful, the request removes the alias and creates a data stream with the same name. +The indices for the alias become hidden backing indices for the stream. +The write index for the alias becomes the write index for the stream. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-migrate-to-data-stream) + +```ts +client.indices.migrateToDataStream({ name }) +``` + +### Arguments [_arguments_indices.migrate_to_data_stream] + +#### Request (object) [_request_indices.migrate_to_data_stream] +- **`name` (string)**: Name of the index alias to convert to a data stream. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.indices.modifyDataStream [_indices.modify_data_stream] +Update data streams. +Performs one or more data stream modification actions in a single atomic operation. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-modify-data-stream) + +```ts +client.indices.modifyDataStream({ actions }) +``` + +### Arguments [_arguments_indices.modify_data_stream] + +#### Request (object) [_request_indices.modify_data_stream] +- **`actions` ({ add_backing_index, remove_backing_index }[])**: Actions to perform. + +## client.indices.open [_indices.open] +Open a closed index. +For data streams, the API opens any closed backing indices. + +A closed index is blocked for read/write operations and does not allow all operations that opened indices allow. +It is not possible to index documents or to search for documents in a closed index. +This allows closed indices to not have to maintain internal data structures for indexing or searching documents, resulting in a smaller overhead on the cluster. + +When opening or closing an index, the master is responsible for restarting the index shards to reflect the new state of the index. +The shards will then go through the normal recovery process. +The data of opened or closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times. + +You can open and close multiple indices. +An error is thrown if the request explicitly refers to a missing index. +This behavior can be turned off by using the `ignore_unavailable=true` parameter. + +By default, you must explicitly name the indices you are opening or closing. +To open or close indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. +This setting can also be changed with the cluster update settings API. + +Closed indices consume a significant amount of disk-space which can cause problems in managed environments. +Closing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`. + +Because opening or closing an index allocates its shards, the `wait_for_active_shards` setting on index creation applies to the `_open` and `_close` index actions as well. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-open) + +```ts +client.indices.open({ index }) +``` + +### Arguments [_arguments_indices.open] + +#### Request (object) [_request_indices.open] +- **`index` (string \| string[])**: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +By default, you must explicitly name the indices you using to limit the request. +To limit a request using `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to false. +You can update this setting in the `elasticsearch.yml` file or using the cluster update settings API. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. +Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + +## client.indices.promoteDataStream [_indices.promote_data_stream] +Promote a data stream. +Promote a data stream from a replicated data stream managed by cross-cluster replication (CCR) to a regular data stream. + +With CCR auto following, a data stream from a remote cluster can be replicated to the local cluster. +These data streams can't be rolled over in the local cluster. +These replicated data streams roll over only if the upstream data stream rolls over. +In the event that the remote cluster is no longer available, the data stream in the local cluster can be promoted to a regular data stream, which allows these data streams to be rolled over in the local cluster. + +NOTE: When promoting a data stream, ensure the local cluster has a data stream enabled index template that matches the data stream. +If this is missing, the data stream will not be able to roll over until a matching index template is created. +This will affect the lifecycle management of the data stream and interfere with the data stream size and retention. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-promote-data-stream) + +```ts +client.indices.promoteDataStream({ name }) +``` + +### Arguments [_arguments_indices.promote_data_stream] + +#### Request (object) [_request_indices.promote_data_stream] +- **`name` (string)**: The name of the data stream +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + +## client.indices.putAlias [_indices.put_alias] +Create or update an alias. +Adds a data stream or index to an alias. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-alias) + +```ts +client.indices.putAlias({ index, name }) +``` + +### Arguments [_arguments_indices.put_alias] + +#### Request (object) [_request_indices.put_alias] +- **`index` (string \| string[])**: List of data streams or indices to add. +Supports wildcards (`*`). +Wildcard patterns that match both data streams and indices return an error. +- **`name` (string)**: Alias to update. +If the alias doesn’t exist, the request creates it. +Index alias names support date math. +- **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Query used to limit documents the alias can access. +- **`index_routing` (Optional, string)**: Value used to route indexing operations to a specific shard. +If specified, this overwrites the `routing` value for indexing operations. +Data stream aliases don’t support this parameter. +- **`is_write_index` (Optional, boolean)**: If `true`, sets the write index or data stream for the alias. +If an alias points to multiple indices or data streams and `is_write_index` isn’t set, the alias rejects write requests. +If an index alias points to one index and `is_write_index` isn’t set, the index automatically acts as the write index. +Data stream aliases don’t automatically set a write data stream, even if the alias points to one data stream. +- **`routing` (Optional, string)**: Value used to route indexing and search operations to a specific shard. +Data stream aliases don’t support this parameter. +- **`search_routing` (Optional, string)**: Value used to route search operations to a specific shard. +If specified, this overwrites the `routing` value for search operations. +Data stream aliases don’t support this parameter. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.indices.putDataLifecycle [_indices.put_data_lifecycle] +Update data stream lifecycles. +Update the data stream lifecycle of the specified data streams. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-data-lifecycle) + +```ts +client.indices.putDataLifecycle({ name }) +``` + +### Arguments [_arguments_indices.put_data_lifecycle] + +#### Request (object) [_request_indices.put_data_lifecycle] +- **`name` (string \| string[])**: List of data streams used to limit the request. +Supports wildcards (`*`). +To target all data streams use `*` or `_all`. +- **`data_retention` (Optional, string \| -1 \| 0)**: If defined, every document added to this data stream will be stored at least for this time frame. +Any time after this duration the document could be deleted. +When empty, every document in this data stream will be stored indefinitely. +- **`downsampling` (Optional, { after, fixed_interval }[])**: The downsampling configuration to execute for the managed backing index after rollover. +- **`enabled` (Optional, boolean)**: If defined, it turns data stream lifecycle on/off (`true`/`false`) for this data stream. A data stream lifecycle +that's disabled (enabled: `false`) will have no effect on the data stream. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of data stream that wildcard patterns can match. +Supports a list of values, such as `open,hidden`. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is +received before the timeout expires, the request fails and returns an +error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.indices.putDataStreamMappings [_indices.put_data_stream_mappings] +Update data stream mappings. + +This API can be used to override mappings on specific data streams. These overrides will take precedence over what +is specified in the template that the data stream matches. The mapping change is only applied to new write indices +that are created during rollover after this API is called. No indices are changed by this API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-data-stream-mappings) + +```ts +client.indices.putDataStreamMappings({ name }) +``` + +### Arguments [_arguments_indices.put_data_stream_mappings] + +#### Request (object) [_request_indices.put_data_stream_mappings] +- **`name` (string \| string[])**: A list of data streams or data stream patterns. +- **`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })** +- **`dry_run` (Optional, boolean)**: If `true`, the request does not actually change the mappings on any data streams. Instead, it +simulates changing the settings and reports back to the user what would have happened had these settings +actually been applied. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If no response is +received before the timeout expires, the request fails and returns an +error. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. If no response is received before the + timeout expires, the request fails and returns an error. + +## client.indices.putDataStreamOptions [_indices.put_data_stream_options] +Update data stream options. +Update the data stream options of the specified data streams. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-data-stream-options) + +```ts +client.indices.putDataStreamOptions({ name }) +``` + +### Arguments [_arguments_indices.put_data_stream_options] + +#### Request (object) [_request_indices.put_data_stream_options] +- **`name` (string \| string[])**: List of data streams used to limit the request. +Supports wildcards (`*`). +To target all data streams use `*` or `_all`. +- **`failure_store` (Optional, { enabled, lifecycle })**: If defined, it will update the failure store configuration of every data stream resolved by the name expression. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of data stream that wildcard patterns can match. +Supports a list of values, such as `open,hidden`. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is +received before the timeout expires, the request fails and returns an +error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.indices.putDataStreamSettings [_indices.put_data_stream_settings] +Update data stream settings. + +This API can be used to override settings on specific data streams. These overrides will take precedence over what +is specified in the template that the data stream matches. To prevent your data stream from getting into an invalid state, +only certain settings are allowed. If possible, the setting change is applied to all +backing indices. Otherwise, it will be applied when the data stream is next rolled over. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-data-stream-settings) + +```ts +client.indices.putDataStreamSettings({ name }) +``` + +### Arguments [_arguments_indices.put_data_stream_settings] + +#### Request (object) [_request_indices.put_data_stream_settings] +- **`name` (string \| string[])**: A list of data streams or data stream patterns. +- **`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })** +- **`dry_run` (Optional, boolean)**: If `true`, the request does not actually change the settings on any data streams or indices. Instead, it +simulates changing the settings and reports back to the user what would have happened had these settings +actually been applied. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. If no response is +received before the timeout expires, the request fails and returns an +error. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. If no response is received before the + timeout expires, the request fails and returns an error. + +## client.indices.putIndexTemplate [_indices.put_index_template] +Create or update an index template. +Index templates define settings, mappings, and aliases that can be applied automatically to new indices. + +Elasticsearch applies templates to new indices based on an wildcard pattern that matches the index name. +Index templates are applied during data stream or index creation. +For data streams, these settings and mappings are applied when the stream's backing indices are created. +Settings and mappings specified in a create index API request override any settings or mappings specified in an index template. +Changes to index templates do not affect existing indices, including the existing backing indices of a data stream. + +You can use C-style `/* *\/` block comments in index templates. +You can include comments anywhere in the request body, except before the opening curly bracket. + +**Multiple matching templates** + +If multiple index templates match the name of a new index or data stream, the template with the highest priority is used. + +Multiple templates with overlapping index patterns at the same priority are not allowed and an error will be thrown when attempting to create a template matching an existing index template at identical priorities. + +**Composing aliases, mappings, and settings** + +When multiple component templates are specified in the `composed_of` field for an index template, they are merged in the order specified, meaning that later component templates override earlier component templates. +Any mappings, settings, or aliases from the parent index template are merged in next. +Finally, any configuration on the index request itself is merged. +Mapping definitions are merged recursively, which means that later mapping components can introduce new field mappings and update the mapping configuration. +If a field mapping is already contained in an earlier component, its definition will be completely overwritten by the later one. +This recursive merging strategy applies not only to field mappings, but also root options like `dynamic_templates` and `meta`. +If an earlier component contains a `dynamic_templates` block, then by default new `dynamic_templates` entries are appended onto the end. +If an entry already exists with the same key, then it is overwritten by the new definition. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-index-template) + +```ts +client.indices.putIndexTemplate({ name }) +``` + +### Arguments [_arguments_indices.put_index_template] + +#### Request (object) [_request_indices.put_index_template] +- **`name` (string)**: Index or template name +- **`index_patterns` (Optional, string \| string[])**: Name of the index template to create. +- **`composed_of` (Optional, string[])**: An ordered list of component template names. +Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. +- **`template` (Optional, { aliases, mappings, settings, lifecycle })**: Template to be applied. +It may optionally include an `aliases`, `mappings`, or `settings` configuration. +- **`data_stream` (Optional, { hidden, allow_custom_routing })**: If this object is included, the template is used to create data streams and their backing indices. +Supports an empty object. +Data streams require a matching index template with a `data_stream` object. +- **`priority` (Optional, number)**: Priority to determine index template precedence when a new data stream or index is created. +The index template with the highest priority is chosen. +If no priority is specified the template is treated as though it is of priority 0 (lowest priority). +This number is not automatically generated by Elasticsearch. +- **`version` (Optional, number)**: Version number used to manage index templates externally. +This number is not automatically generated by Elasticsearch. +External systems can use these version numbers to simplify template management. +To unset a version, replace the template without specifying one. +- **`_meta` (Optional, Record)**: Optional user metadata about the index template. +It may have any contents. +It is not automatically generated or used by Elasticsearch. +This user-defined object is stored in the cluster state, so keeping it short is preferable +To unset the metadata, replace the template without specifying it. +- **`allow_auto_create` (Optional, boolean)**: This setting overrides the value of the `action.auto_create_index` cluster setting. +If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. +If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created. +- **`ignore_missing_component_templates` (Optional, string[])**: The configuration option ignore_missing_component_templates can be used when an index template +references a component template that might not exist +- **`deprecated` (Optional, boolean)**: Marks this index template as deprecated. When creating or updating a non-deprecated index template +that uses deprecated components, Elasticsearch will emit a deprecation warning. +- **`create` (Optional, boolean)**: If `true`, this request cannot replace or update existing index templates. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`cause` (Optional, string)**: User defined reason for creating/updating the index template + +## client.indices.putMapping [_indices.put_mapping] +Update field mappings. +Add new fields to an existing data stream or index. +You can use the update mapping API to: + +- Add a new field to an existing index +- Update mappings for multiple indices in a single request +- Add new properties to an object field +- Enable multi-fields for an existing field +- Update supported mapping parameters +- Change a field's mapping using reindexing +- Rename a field using a field alias + +Learn how to use the update mapping API with practical examples in the [Update mapping API examples](https://www.elastic.co/docs/manage-data/data-store/mapping/update-mappings-examples) guide. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-mapping) + +```ts +client.indices.putMapping({ index }) +``` + +### Arguments [_arguments_indices.put_mapping] + +#### Request (object) [_request_indices.put_mapping] +- **`index` (string \| string[])**: A list of index names the mapping should be added to (supports wildcards); use `_all` or omit to add the mapping on all indices. +- **`date_detection` (Optional, boolean)**: Controls whether dynamic date detection is enabled. +- **`dynamic` (Optional, Enum("strict" \| "runtime" \| true \| false))**: Controls whether new fields are added dynamically. +- **`dynamic_date_formats` (Optional, string[])**: If date detection is enabled then new string fields are checked +against 'dynamic_date_formats' and if the value matches then +a new date field is added instead of string. +- **`dynamic_templates` (Optional, Record[])**: Specify dynamic templates for the mapping. +- **`_field_names` (Optional, { enabled })**: Control whether field names are enabled for the index. +- **`_meta` (Optional, Record)**: A mapping type can have custom meta data associated with it. These are +not used at all by Elasticsearch, but can be used to store +application-specific metadata. +- **`numeric_detection` (Optional, boolean)**: Automatically map strings into numeric data types for all fields. +- **`properties` (Optional, Record)**: Mapping for a field. For new fields, this mapping can include: + +- Field name +- Field data type +- Mapping parameters +- **`_routing` (Optional, { required })**: Enable making a routing value required on indexed documents. +- **`_source` (Optional, { compress, compress_threshold, enabled, excludes, includes, mode })**: Control whether the _source field is enabled on the index. +- **`runtime` (Optional, Record)**: Mapping of runtime fields for the index. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +- **`write_index_only` (Optional, boolean)**: If `true`, the mappings are applied only to the current write index for the target. + +## client.indices.putSettings [_indices.put_settings] +Update index settings. +Changes dynamic index settings in real time. +For data streams, index setting changes are applied to all backing indices by default. + +To revert a setting to the default value, use a null value. +The list of per-index settings that can be updated dynamically on live indices can be found in index settings documentation. +To preserve existing settings from being updated, set the `preserve_existing` parameter to `true`. + +For performance optimization during bulk indexing, you can disable the refresh interval. +Refer to [disable refresh interval](https://www.elastic.co/docs/deploy-manage/production-guidance/optimize-performance/indexing-speed#disable-refresh-interval) for an example. +There are multiple valid ways to represent index settings in the request body. You can specify only the setting, for example: + +``` +{ + "number_of_replicas": 1 +} +``` + +Or you can use an `index` setting object: +``` +{ + "index": { + "number_of_replicas": 1 + } +} +``` + +Or you can use dot annotation: +``` +{ + "index.number_of_replicas": 1 +} +``` + +Or you can embed any of the aforementioned options in a `settings` object. For example: + +``` +{ + "settings": { + "index": { + "number_of_replicas": 1 + } + } +} +``` + +NOTE: You can only define new analyzers on closed indices. +To add an analyzer, you must close the index, define the analyzer, and reopen the index. +You cannot close the write index of a data stream. +To update the analyzer for a data stream's write index and future backing indices, update the analyzer in the index template used by the stream. +Then roll over the data stream to apply the new analyzer to the stream's write index and future backing indices. +This affects searches and any new data added to the stream after the rollover. +However, it does not affect the data stream's backing indices or their existing data. +To change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it. +Refer to [updating analyzers on existing indices](https://www.elastic.co/docs/manage-data/data-store/text-analysis/specify-an-analyzer#update-analyzers-on-existing-indices) for step-by-step examples. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-settings) + +```ts +client.indices.putSettings({ ... }) +``` + +### Arguments [_arguments_indices.put_settings] + +#### Request (object) [_request_indices.put_settings] +- **`index` (Optional, string \| string[])**: List of data streams, indices, and aliases used to limit +the request. Supports wildcards (`*`). To target all data streams and +indices, omit this parameter or use `*` or `_all`. +- **`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })** +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index +alias, or `_all` value targets only missing or closed indices. This +behavior applies even if the request targets other open indices. For +example, a request targeting `foo*,bar*` returns an error if an index +starts with `foo` but no index starts with `bar`. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target +data streams, this argument determines whether wildcard expressions match +hidden data streams. Supports a list of values, such as +`open,hidden`. +- **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. +- **`ignore_unavailable` (Optional, boolean)**: If `true`, returns settings in flat format. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is +received before the timeout expires, the request fails and returns an +error. +- **`preserve_existing` (Optional, boolean)**: If `true`, existing index settings remain unchanged. +- **`reopen` (Optional, boolean)**: Whether to close and reopen the index to apply non-dynamic settings. +If set to `true` the indices to which the settings are being applied +will be closed temporarily and then reopened in order to apply the changes. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the + timeout expires, the request fails and returns an error. + +## client.indices.putTemplate [_indices.put_template] +Create or update a legacy index template. +Index templates define settings, mappings, and aliases that can be applied automatically to new indices. +Elasticsearch applies templates to new indices based on an index pattern that matches the index name. + +IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. + +Composable templates always take precedence over legacy templates. +If no composable template matches a new index, matching legacy templates are applied according to their order. + +Index templates are only applied during index creation. +Changes to index templates do not affect existing indices. +Settings and mappings specified in create index API requests override any settings or mappings specified in an index template. + +You can use C-style `/* *\/` block comments in index templates. +You can include comments anywhere in the request body, except before the opening curly bracket. + +**Indices matching multiple templates** + +Multiple index templates can potentially match an index, in this case, both the settings and mappings are merged into the final configuration of the index. +The order of the merging can be controlled using the order parameter, with lower order being applied first, and higher orders overriding them. +NOTE: Multiple matching templates with the same order value will result in a non-deterministic merging order. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-template) + +```ts +client.indices.putTemplate({ name }) +``` + +### Arguments [_arguments_indices.put_template] + +#### Request (object) [_request_indices.put_template] +- **`name` (string)**: The name of the template +- **`aliases` (Optional, Record)**: Aliases for the index. +- **`index_patterns` (Optional, string \| string[])**: Array of wildcard expressions used to match the names +of indices during creation. +- **`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })**: Mapping for fields in the index. +- **`order` (Optional, number)**: Order in which Elasticsearch applies this template if index +matches multiple templates. + +Templates with lower 'order' values are merged first. Templates with higher +'order' values are merged later, overriding templates with lower values. +- **`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })**: Configuration options for the index. +- **`version` (Optional, number)**: Version number used to manage index templates externally. This number +is not automatically generated by Elasticsearch. +To unset a version, replace the template without specifying one. +- **`create` (Optional, boolean)**: If true, this request cannot replace or update existing index templates. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is +received before the timeout expires, the request fails and returns an error. +- **`cause` (Optional, string)**: User defined reason for creating/updating the index template + +## client.indices.recovery [_indices.recovery] +Get index recovery information. +Get information about ongoing and completed shard recoveries for one or more indices. +For data streams, the API returns information for the stream's backing indices. + +All recoveries, whether ongoing or complete, are kept in the cluster state and may be reported on at any time. + +Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard. +When a shard recovery completes, the recovered shard is available for search and indexing. + +Recovery automatically occurs during the following processes: + +* When creating an index for the first time. +* When a node rejoins the cluster and starts up any missing primary shard copies using the data that it holds in its data path. +* Creation of new replica shard copies from the primary. +* Relocation of a shard copy to a different node in the same cluster. +* A snapshot restore operation. +* A clone, shrink, or split operation. + +You can determine the cause of a shard recovery using the recovery or cat recovery APIs. + +The index recovery API reports information about completed recoveries only for shard copies that currently exist in the cluster. +It only reports the last recovery for each shard copy and does not report historical information about earlier recoveries, nor does it report information about the recoveries of shard copies that no longer exist. +This means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-recovery) + +```ts +client.indices.recovery({ ... }) +``` + +### Arguments [_arguments_indices.recovery] + +#### Request (object) [_request_indices.recovery] +- **`index` (Optional, string \| string[])**: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`active_only` (Optional, boolean)**: If `true`, the response only includes ongoing shard recoveries. +- **`detailed` (Optional, boolean)**: If `true`, the response includes detailed information about shard recoveries. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. + +## client.indices.refresh [_indices.refresh] +Refresh an index. +A refresh makes recent operations performed on one or more indices available for search. +For data streams, the API runs the refresh operation on the stream’s backing indices. + +By default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds. +You can change this default interval with the `index.refresh_interval` setting. + +In Elastic Cloud Serverless, the default refresh interval is 5 seconds across all indices. + +Refresh requests are synchronous and do not return a response until the refresh operation completes. + +Refreshes are resource-intensive. +To ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible. + +If your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's `refresh=wait_for` query parameter option. +This option ensures the indexing operation waits for a periodic refresh before running the search. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-refresh) + +```ts +client.indices.refresh({ ... }) +``` + +### Arguments [_arguments_indices.refresh] + +#### Request (object) [_request_indices.refresh] +- **`index` (Optional, string \| string[])**: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. + +## client.indices.reloadSearchAnalyzers [_indices.reload_search_analyzers] +Reload search analyzers. +Reload an index's search analyzers and their resources. +For data streams, the API reloads search analyzers and resources for the stream's backing indices. + +IMPORTANT: After reloading the search analyzers you should clear the request cache to make sure it doesn't contain responses derived from the previous versions of the analyzer. + +You can use the reload search analyzers API to pick up changes to synonym files used in the `synonym_graph` or `synonym` token filter of a search analyzer. +To be eligible, the token filter must have an `updateable` flag of `true` and only be used in search analyzers. + +NOTE: This API does not perform a reload for each shard of an index. +Instead, it performs a reload for each node containing index shards. +As a result, the total shard count returned by the API can differ from the number of index shards. +Because reloading affects every node with an index shard, it is important to update the synonym file on every data node in the cluster--including nodes that don't contain a shard replica--before using this API. +This ensures the synonym file is updated everywhere in the cluster in case shards are relocated in the future. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-reload-search-analyzers) + +```ts +client.indices.reloadSearchAnalyzers({ index }) +``` + +### Arguments [_arguments_indices.reload_search_analyzers] + +#### Request (object) [_request_indices.reload_search_analyzers] +- **`index` (string \| string[])**: A list of index names to reload analyzers for +- **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. +- **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) +- **`resource` (Optional, string)**: Changed resource to reload analyzers from if applicable + +## client.indices.removeBlock [_indices.remove_block] +Remove an index block. + +Remove an index block from an index. +Index blocks limit the operations allowed on an index by blocking specific operation types. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-remove-block) + +```ts +client.indices.removeBlock({ index, block }) +``` + +### Arguments [_arguments_indices.remove_block] + +#### Request (object) [_request_indices.remove_block] +- **`index` (string \| string[])**: A list or wildcard expression of index names used to limit the request. +By default, you must explicitly name the indices you are removing blocks from. +To allow the removal of blocks from indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. +You can update this setting in the `elasticsearch.yml` file or by using the cluster update settings API. +- **`block` (Enum("metadata" \| "read" \| "read_only" \| "write"))**: The block type to remove from the index. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: The type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +It supports a list of values, such as `open,hidden`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. +If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. +It can also be set to `-1` to indicate that the request should never timeout. + +## client.indices.resolveCluster [_indices.resolve_cluster] +Resolve the cluster. + +Resolve the specified index expressions to return information about each cluster, including the local "querying" cluster, if included. +If no index expression is provided, the API will return information about all the remote clusters that are configured on the querying cluster. + +This endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search. + +You use the same index expression with this endpoint as you would for cross-cluster search. +Index and cluster exclusions are also supported with this endpoint. + +For each cluster in the index expression, information is returned about: + +* Whether the querying ("local") cluster is currently connected to each remote cluster specified in the index expression. Note that this endpoint actively attempts to contact the remote clusters, unlike the `remote/info` endpoint. +* Whether each remote cluster is configured with `skip_unavailable` as `true` or `false`. +* Whether there are any indices, aliases, or data streams on that cluster that match the index expression. +* Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index). +* Cluster version information, including the Elasticsearch server version. + +For example, `GET /_resolve/cluster/my-index-*,cluster*:my-index-*` returns information about the local cluster and all remotely configured clusters that start with the alias `cluster*`. +Each cluster returns information about whether it has any indices, aliases or data streams that match `my-index-*`. + +## Note on backwards compatibility +The ability to query without an index expression was added in version 8.18, so when +querying remote clusters older than that, the local cluster will send the index +expression `dummy*` to those remote clusters. Thus, if an errors occur, you may see a reference +to that index expression even though you didn't request it. If it causes a problem, you can +instead include an index expression like `*:*` to bypass the issue. + +## Advantages of using this endpoint before a cross-cluster search + +You may want to exclude a cluster or index from a search when: + +* A remote cluster is not currently connected and is configured with `skip_unavailable=false`. Running a cross-cluster search under those conditions will cause the entire search to fail. +* A cluster has no matching indices, aliases or data streams for the index expression (or your user does not have permissions to search them). For example, suppose your index expression is `logs*,remote1:logs*` and the remote1 cluster has no indices, aliases or data streams that match `logs*`. In that case, that cluster will return no results from that cluster if you include it in a cross-cluster search. +* The index expression (combined with any query parameters you specify) will likely cause an exception to be thrown when you do the search. In these cases, the "error" field in the `_resolve/cluster` response will be present. (This is also where security/permission errors will be shown.) +* A remote cluster is an older version that does not support the feature you want to use in your search. + +## Test availability of remote clusters + +The `remote/info` endpoint is commonly used to test whether the "local" cluster (the cluster being queried) is connected to its remote clusters, but it does not necessarily reflect whether the remote cluster is available or not. +The remote cluster may be available, while the local cluster is not currently connected to it. + +You can use the `_resolve/cluster` API to attempt to reconnect to remote clusters. +For example with `GET _resolve/cluster` or `GET _resolve/cluster/*:*`. +The `connected` field in the response will indicate whether it was successful. +If a connection was (re-)established, this will also cause the `remote/info` endpoint to now indicate a connected status. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster) + +```ts +client.indices.resolveCluster({ ... }) +``` + +### Arguments [_arguments_indices.resolve_cluster] + +#### Request (object) [_request_indices.resolve_cluster] +- **`name` (Optional, string \| string[])**: A list of names or index patterns for the indices, aliases, and data streams to resolve. +Resources on remote clusters can be specified using the ``:`` syntax. +Index and cluster exclusions (e.g., `-cluster1:*`) are also supported. +If no index expression is specified, information about all remote clusters configured on the local cluster +is returned without doing any index matching +- **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing +or closed indices. This behavior applies even if the request targets other open indices. For example, a request +targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index +options to the `_resolve/cluster` API endpoint that takes no index expression. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index +options to the `_resolve/cluster` API endpoint that takes no index expression. +- **`ignore_throttled` (Optional, boolean)**: If true, concrete, expanded, or aliased indices are ignored when frozen. +NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index +options to the `_resolve/cluster` API endpoint that takes no index expression. +- **`ignore_unavailable` (Optional, boolean)**: If false, the request returns an error if it targets a missing or closed index. +NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index +options to the `_resolve/cluster` API endpoint that takes no index expression. +- **`timeout` (Optional, string \| -1 \| 0)**: The maximum time to wait for remote clusters to respond. +If a remote cluster does not respond within this timeout period, the API response +will show the cluster as not connected and include an error message that the +request timed out. + +The default timeout is unset and the query can take +as long as the networking layer is configured to wait for remote clusters that are +not responding (typically 30 seconds). + +## client.indices.resolveIndex [_indices.resolve_index] +Resolve indices. +Resolve the names and/or index patterns for indices, aliases, and data streams. +Multiple patterns and remote clusters are supported. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-index) + +```ts +client.indices.resolveIndex({ name }) +``` + +### Arguments [_arguments_indices.resolve_index] + +#### Request (object) [_request_indices.resolve_index] +- **`name` (string \| string[])**: Comma-separated name(s) or index pattern(s) of the indices, aliases, and data streams to resolve. +Resources on remote clusters can be specified using the ``:`` syntax. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +- **`mode` (Optional, Enum("standard" \| "time_series" \| "logsdb" \| "lookup") \| Enum("standard" \| "time_series" \| "logsdb" \| "lookup")[])**: Filter indices by index mode - standard, lookup, time_series, etc. List of IndexMode. Empty means no filter. +- **`project_routing` (Optional, string)**: Specifies a subset of projects to target using project +metadata tags in a subset of Lucene query syntax. +Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). +Examples: + _alias:my-project + _alias:_origin + _alias:*pr* +Supported in serverless only. + +## client.indices.rollover [_indices.rollover] +Roll over to a new index. +TIP: We recommend using the index lifecycle rollover action to automate rollovers. However, Serverless does not support Index Lifecycle Management (ILM), so don't use this approach in the Serverless context. + +The rollover API creates a new index for a data stream or index alias. +The API behavior depends on the rollover target. + +**Roll over a data stream** + +If you roll over a data stream, the API creates a new write index for the stream. +The stream's previous write index becomes a regular backing index. +A rollover also increments the data stream's generation. + +**Roll over an index alias with a write index** + +TIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a write index to manage time series data. +Data streams replace this functionality, require less maintenance, and automatically integrate with data tiers. + +If an index alias points to multiple indices, one of the indices must be a write index. +The rollover API creates a new write index for the alias with `is_write_index` set to `true`. +The API also `sets is_write_index` to `false` for the previous write index. + +**Roll over an index alias with one index** + +If you roll over an index alias that points to only one index, the API creates a new index for the alias and removes the original index from the alias. + +NOTE: A rollover creates a new index and is subject to the `wait_for_active_shards` setting. + +**Increment index names for an alias** + +When you roll over an index alias, you can specify a name for the new index. +If you don't specify a name and the current index ends with `-` and a number, such as `my-index-000001` or `my-index-3`, the new index name increments that number. +For example, if you roll over an alias with a current index of `my-index-000001`, the rollover creates a new index named `my-index-000002`. +This number is always six characters and zero-padded, regardless of the previous index's name. + +If you use an index alias for time series data, you can use date math in the index name to track the rollover date. +For example, you can create an alias that points to an index named ``. +If you create the index on May 6, 2099, the index's name is `my-index-2099.05.06-000001`. +If you roll over the alias on May 7, 2099, the new index's name is `my-index-2099.05.07-000002`. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-rollover) + +```ts +client.indices.rollover({ alias }) +``` + +### Arguments [_arguments_indices.rollover] + +#### Request (object) [_request_indices.rollover] +- **`alias` (string)**: Name of the data stream or index alias to roll over. +- **`new_index` (Optional, string)**: Name of the index to create. +Supports date math. +Data streams do not support this parameter. +- **`aliases` (Optional, Record)**: Aliases for the target index. +Data streams do not support this parameter. +- **`conditions` (Optional, { min_age, max_age, max_age_millis, min_docs, max_docs, max_size, max_size_bytes, min_size, min_size_bytes, max_primary_shard_size, max_primary_shard_size_bytes, min_primary_shard_size, min_primary_shard_size_bytes, max_primary_shard_docs, min_primary_shard_docs })**: Conditions for the rollover. +If specified, Elasticsearch only performs the rollover if the current index satisfies these conditions. +If this parameter is not specified, Elasticsearch performs the rollover unconditionally. +If conditions are specified, at least one of them must be a `max_*` condition. +The index will rollover if any `max_*` condition is satisfied and all `min_*` conditions are satisfied. +- **`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })**: Mapping for fields in the index. +If specified, this mapping can include field names, field data types, and mapping paramaters. +- **`settings` (Optional, Record)**: Configuration options for the index. +Data streams do not support this parameter. +- **`dry_run` (Optional, boolean)**: If `true`, checks whether the current index satisfies the specified conditions but does not perform a rollover. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. +Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). +- **`lazy` (Optional, boolean)**: If set to true, the rollover action will only mark a data stream to signal that it needs to be rolled over at the next write. +Only allowed on data streams. + +## client.indices.segments [_indices.segments] +Get index segments. +Get low-level information about the Lucene segments in index shards. +For data streams, the API returns information about the stream's backing indices. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-segments) + +```ts +client.indices.segments({ ... }) +``` + +### Arguments [_arguments_indices.segments] + +#### Request (object) [_request_indices.segments] +- **`index` (Optional, string \| string[])**: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. + +## client.indices.shardStores [_indices.shard_stores] +Get index shard stores. +Get store information about replica shards in one or more indices. +For data streams, the API retrieves store information for the stream's backing indices. + +The index shard stores API returns the following information: + +* The node on which each replica shard exists. +* The allocation ID for each replica shard. +* A unique ID for each replica shard. +* Any errors encountered while opening the shard index or from an earlier failure. + +By default, the API returns store information only for primary shards that are unassigned or have one or more unassigned replica shards. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-shard-stores) + +```ts +client.indices.shardStores({ ... }) +``` + +### Arguments [_arguments_indices.shard_stores] + +#### Request (object) [_request_indices.shard_stores] +- **`index` (Optional, string \| string[])**: List of data streams, indices, and aliases used to limit the request. +- **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or _all +value targets only missing or closed indices. This behavior applies even if the request +targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, +this argument determines whether wildcard expressions match hidden data streams. +- **`ignore_unavailable` (Optional, boolean)**: If true, missing or closed indices are not included in the response. +- **`status` (Optional, Enum("green" \| "yellow" \| "red" \| "all") \| Enum("green" \| "yellow" \| "red" \| "all")[])**: List of shard health statuses used to limit the request. + +## client.indices.shrink [_indices.shrink] +Shrink an index. +Shrink an index into a new index with fewer primary shards. + +Before you can shrink an index: + +* The index must be read-only. +* A copy of every shard in the index must reside on the same node. +* The index must have a green health status. + +To make shard allocation easier, we recommend you also remove the index's replica shards. +You can later re-add replica shards as part of the shrink operation. + +The requested number of primary shards in the target index must be a factor of the number of shards in the source index. +For example an index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an index with 15 primary shards can be shrunk into 5, 3 or 1. +If the number of shards in the index is a prime number it can only be shrunk into a single primary shard + Before shrinking, a (primary or replica) copy of every shard in the index must be present on the same node. + +The current write index on a data stream cannot be shrunk. In order to shrink the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be shrunk. + +A shrink operation: + +* Creates a new target index with the same definition as the source index, but with a smaller number of primary shards. +* Hard-links segments from the source index into the target index. If the file system does not support hard-linking, then all segments are copied into the new index, which is a much more time consuming process. Also if using multiple data paths, shards on different data paths require a full copy of segment files if they are not on the same disk since hardlinks do not work across disks. +* Recovers the target index as though it were a closed index which had just been re-opened. Recovers shards to the `.routing.allocation.initial_recovery._id` index setting. + +IMPORTANT: Indices can only be shrunk if they satisfy the following requirements: + +* The target index must not exist. +* The source index must have more primary shards than the target index. +* The number of primary shards in the target index must be a factor of the number of primary shards in the source index. The source index must have more primary shards than the target index. +* The index must not contain more than 2,147,483,519 documents in total across all shards that will be shrunk into a single shard on the target index as this is the maximum number of docs that can fit into a single shard. +* The node handling the shrink process must have sufficient free disk space to accommodate a second copy of the existing index. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-shrink) + +```ts +client.indices.shrink({ index, target }) +``` + +### Arguments [_arguments_indices.shrink] + +#### Request (object) [_request_indices.shrink] +- **`index` (string)**: Name of the source index to shrink. +- **`target` (string)**: Name of the target index to create. +- **`aliases` (Optional, Record)**: The key is the alias name. +Index alias names support date math. +- **`settings` (Optional, Record)**: Configuration options for the target index. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. +Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + +## client.indices.simulateIndexTemplate [_indices.simulate_index_template] +Simulate an index. +Get the index configuration that would be applied to the specified index from an existing index template. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-simulate-index-template) + +```ts +client.indices.simulateIndexTemplate({ name }) +``` + +### Arguments [_arguments_indices.simulate_index_template] + +#### Request (object) [_request_indices.simulate_index_template] +- **`name` (string)**: Name of the index to simulate +- **`index_template` (Optional, { index_patterns, composed_of, template, version, priority, _meta, allow_auto_create, data_stream, deprecated, ignore_missing_component_templates, created_date, created_date_millis, modified_date, modified_date_millis })** +- **`create` (Optional, boolean)**: Whether the index template we optionally defined in the body should only be dry-run added if new or can also replace an existing one +- **`cause` (Optional, string)**: User defined reason for dry-run creating the new template for simulation purposes +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`include_defaults` (Optional, boolean)**: If true, returns all relevant default configurations for the index template. + +## client.indices.simulateTemplate [_indices.simulate_template] +Simulate an index template. +Get the index configuration that would be applied by a particular index template. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-simulate-template) + +```ts +client.indices.simulateTemplate({ ... }) +``` + +### Arguments [_arguments_indices.simulate_template] + +#### Request (object) [_request_indices.simulate_template] +- **`name` (Optional, string)**: Name of the index template to simulate. To test a template configuration before you add it to the cluster, omit +this parameter and specify the template configuration in the request body. +- **`allow_auto_create` (Optional, boolean)**: This setting overrides the value of the `action.auto_create_index` cluster setting. +If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. +If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created. +- **`index_patterns` (Optional, string \| string[])**: Array of wildcard (`*`) expressions used to match the names of data streams and indices during creation. +- **`composed_of` (Optional, string[])**: An ordered list of component template names. +Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. +- **`template` (Optional, { aliases, mappings, settings, lifecycle })**: Template to be applied. +It may optionally include an `aliases`, `mappings`, or `settings` configuration. +- **`data_stream` (Optional, { hidden, allow_custom_routing })**: If this object is included, the template is used to create data streams and their backing indices. +Supports an empty object. +Data streams require a matching index template with a `data_stream` object. +- **`priority` (Optional, number)**: Priority to determine index template precedence when a new data stream or index is created. +The index template with the highest priority is chosen. +If no priority is specified the template is treated as though it is of priority 0 (lowest priority). +This number is not automatically generated by Elasticsearch. +- **`version` (Optional, number)**: Version number used to manage index templates externally. +This number is not automatically generated by Elasticsearch. +- **`_meta` (Optional, Record)**: Optional user metadata about the index template. +May have any contents. +This map is not automatically generated by Elasticsearch. +- **`ignore_missing_component_templates` (Optional, string[])**: The configuration option ignore_missing_component_templates can be used when an index template +references a component template that might not exist +- **`deprecated` (Optional, boolean)**: Marks this index template as deprecated. When creating or updating a non-deprecated index template +that uses deprecated components, Elasticsearch will emit a deprecation warning. +- **`create` (Optional, boolean)**: If true, the template passed in the body is only used if no existing templates match the same index patterns. If false, the simulation uses the template with the highest priority. Note that the template is not permanently added or updated in either case; it is only used for the simulation. +- **`cause` (Optional, string)**: User defined reason for dry-run creating the new template for simulation purposes +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`include_defaults` (Optional, boolean)**: If true, returns all relevant default configurations for the index template. + +## client.indices.split [_indices.split] +Split an index. +Split an index into a new index with more primary shards. +* Before you can split an index: + +* The index must be read-only. +* The cluster health status must be green. + +You can do make an index read-only with the following request using the add index block API: + +``` +PUT /my_source_index/_block/write +``` + +The current write index on a data stream cannot be split. +In order to split the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be split. + +The number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` setting. +The number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing. +For instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3. + +A split operation: + +* Creates a new target index with the same definition as the source index, but with a larger number of primary shards. +* Hard-links segments from the source index into the target index. If the file system doesn't support hard-linking, all segments are copied into the new index, which is a much more time consuming process. +* Hashes all documents again, after low level files are created, to delete documents that belong to a different shard. +* Recovers the target index as though it were a closed index which had just been re-opened. + +IMPORTANT: Indices can only be split if they satisfy the following requirements: + +* The target index must not exist. +* The source index must have fewer primary shards than the target index. +* The number of primary shards in the target index must be a multiple of the number of primary shards in the source index. +* The node handling the split process must have sufficient free disk space to accommodate a second copy of the existing index. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-split) + +```ts +client.indices.split({ index, target }) +``` + +### Arguments [_arguments_indices.split] + +#### Request (object) [_request_indices.split] +- **`index` (string)**: Name of the source index to split. +- **`target` (string)**: Name of the target index to create. +- **`aliases` (Optional, Record)**: Aliases for the resulting index. +- **`settings` (Optional, Record)**: Configuration options for the target index. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +- **`wait_for_active_shards` (Optional, number \| Enum("all" \| "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. +Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + +## client.indices.stats [_indices.stats] +Get index statistics. +For data streams, the API retrieves statistics for the stream's backing indices. + +By default, the returned statistics are index-level with `primaries` and `total` aggregations. +`primaries` are the values for only the primary shards. +`total` are the accumulated values for both primary and replica shards. + +To get shard-level statistics, set the `level` parameter to `shards`. + +NOTE: When moving to another node, the shard-level statistics for a shard are cleared. +Although the shard is no longer part of the node, that node retains any node-level statistics to which the shard contributed. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-stats) + +```ts +client.indices.stats({ ... }) +``` + +### Arguments [_arguments_indices.stats] + +#### Request (object) [_request_indices.stats] +- **`metric` (Optional, Enum("_all" \| "store" \| "indexing" \| "get" \| "search" \| "merge" \| "flush" \| "refresh" \| "query_cache" \| "fielddata" \| "docs" \| "warmer" \| "completion" \| "segments" \| "translog" \| "request_cache" \| "recovery" \| "bulk" \| "shard_stats" \| "mappings" \| "dense_vector" \| "sparse_vector") \| Enum("_all" \| "store" \| "indexing" \| "get" \| "search" \| "merge" \| "flush" \| "refresh" \| "query_cache" \| "fielddata" \| "docs" \| "warmer" \| "completion" \| "segments" \| "translog" \| "request_cache" \| "recovery" \| "bulk" \| "shard_stats" \| "mappings" \| "dense_vector" \| "sparse_vector")[])**: Limit the information returned the specific metrics. +- **`index` (Optional, string \| string[])**: A list of index names; use `_all` or empty string to perform the operation on all indices +- **`completion_fields` (Optional, string \| string[])**: List or wildcard expressions of fields to include in fielddata and suggest statistics. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument +determines whether wildcard expressions match hidden data streams. Supports a list of values, +such as `open,hidden`. +- **`fielddata_fields` (Optional, string \| string[])**: List or wildcard expressions of fields to include in fielddata statistics. +- **`fields` (Optional, string \| string[])**: List or wildcard expressions of fields to include in the statistics. +- **`forbid_closed_indices` (Optional, boolean)**: If true, statistics are not collected from closed indices. +- **`groups` (Optional, string \| string[])**: List of search groups to include in the search statistics. +- **`include_segment_file_sizes` (Optional, boolean)**: If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested). +- **`include_unloaded_segments` (Optional, boolean)**: If true, the response includes information from segments that are not loaded into memory. +- **`level` (Optional, Enum("cluster" \| "indices" \| "shards"))**: Indicates whether statistics are aggregated at the cluster, indices, or shards level. + +## client.indices.updateAliases [_indices.update_aliases] +Create or update an alias. +Adds a data stream or index to an alias. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-update-aliases) + +```ts +client.indices.updateAliases({ ... }) +``` + +### Arguments [_arguments_indices.update_aliases] + +#### Request (object) [_request_indices.update_aliases] +- **`actions` (Optional, { add_backing_index, remove_backing_index }[])**: Actions to perform. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.indices.validateQuery [_indices.validate_query] +Validate a query. +Validates a query without running it. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-validate-query) + +```ts +client.indices.validateQuery({ ... }) +``` + +### Arguments [_arguments_indices.validate_query] + +#### Request (object) [_request_indices.validate_query] +- **`index` (Optional, string \| string[])**: List of data streams, indices, and aliases to search. +Supports wildcards (`*`). +To search all data streams or indices, omit this parameter or use `*` or `_all`. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Query in the Lucene query string syntax. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`all_shards` (Optional, boolean)**: If `true`, the validation is executed on all shards instead of one random shard per index. +- **`analyzer` (Optional, string)**: Analyzer to use for the query string. +This parameter can only be used when the `q` query string parameter is specified. +- **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. +- **`default_operator` (Optional, Enum("and" \| "or"))**: The default operator for query string query: `and` or `or`. +- **`df` (Optional, string)**: Field to use as default where no field prefix is given in the query string. +This parameter can only be used when the `q` query string parameter is specified. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +- **`explain` (Optional, boolean)**: If `true`, the response returns detailed information if an error has occurred. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. +- **`rewrite` (Optional, boolean)**: If `true`, returns a more detailed explanation showing the actual Lucene query that will be executed. +- **`q` (Optional, string)**: Query in the Lucene query string syntax. + +## client.inference.chatCompletionUnified [_inference.chat_completion_unified] +Perform chat completion inference on the service + +The chat completion inference API enables real-time responses for chat completion tasks by delivering answers incrementally, reducing response times during computation. +It only works with the `chat_completion` task type for `openai` and `elastic` inference services. + +NOTE: The `chat_completion` task type is only available within the _stream API and only supports streaming. +The Chat completion inference API and the Stream inference API differ in their response structure and capabilities. +The Chat completion inference API provides more comprehensive customization options through more fields and function calling support. +If you use the `openai`, `hugging_face` or the `elastic` service, use the Chat completion inference API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-unified-inference) + +```ts +client.inference.chatCompletionUnified({ inference_id }) +``` + +### Arguments [_arguments_inference.chat_completion_unified] + +#### Request (object) [_request_inference.chat_completion_unified] +- **`inference_id` (string)**: The inference Id +- **`chat_completion_request` (Optional, { messages, model, max_completion_tokens, stop, temperature, tool_choice, tools, top_p })** +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference request to complete. + +## client.inference.completion [_inference.completion] +Perform completion inference on the service +Get responses for completion tasks. +This API works only with the completion task type. + +IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. + +This API requires the `monitor_inference` cluster privilege (the built-in `inference_admin` and `inference_user` roles grant this privilege). + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference) + +```ts +client.inference.completion({ inference_id, input }) +``` + +### Arguments [_arguments_inference.completion] + +#### Request (object) [_request_inference.completion] +- **`inference_id` (string)**: The inference Id +- **`input` (string \| string[])**: Inference input. +Either a string or an array of strings. +- **`task_settings` (Optional, User-defined value)**: Task settings for the individual inference request. These settings are specific to the you specified and override the task settings specified when initializing the service. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference request to complete. + +## client.inference.delete [_inference.delete] +Delete an inference endpoint +This API requires the manage_inference cluster privilege (the built-in `inference_admin` role grants this privilege). + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-delete) + +```ts +client.inference.delete({ inference_id }) +``` + +### Arguments [_arguments_inference.delete] + +#### Request (object) [_request_inference.delete] +- **`inference_id` (string)**: The inference identifier. +- **`task_type` (Optional, Enum("sparse_embedding" \| "text_embedding" \| "rerank" \| "completion" \| "chat_completion"))**: The task type +- **`dry_run` (Optional, boolean)**: When true, checks the semantic_text fields and inference processors that reference the endpoint and returns them in a list, but does not delete the endpoint. +- **`force` (Optional, boolean)**: When true, the inference endpoint is forcefully deleted even if it is still being used by ingest processors or semantic text fields. + +## client.inference.get [_inference.get] +Get an inference endpoint +This API requires the `monitor_inference` cluster privilege (the built-in `inference_admin` and `inference_user` roles grant this privilege). + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-get) + +```ts +client.inference.get({ ... }) +``` + +### Arguments [_arguments_inference.get] + +#### Request (object) [_request_inference.get] +- **`task_type` (Optional, Enum("sparse_embedding" \| "text_embedding" \| "rerank" \| "completion" \| "chat_completion"))**: The task type +- **`inference_id` (Optional, string)**: The inference Id + +## client.inference.inference [_inference.inference] +Perform inference on the service. + +This API enables you to use machine learning models to perform specific tasks on data that you provide as an input. +It returns a response with the results of the tasks. +The inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API. + +For details about using this API with a service, such as Amazon Bedrock, Anthropic, or HuggingFace, refer to the service-specific documentation. + +> info +> The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference) + +```ts +client.inference.inference({ inference_id, input }) +``` + +### Arguments [_arguments_inference.inference] + +#### Request (object) [_request_inference.inference] +- **`inference_id` (string)**: The unique identifier for the inference endpoint. +- **`input` (string \| string[])**: The text on which you want to perform the inference task. +It can be a single string or an array. + +> info +> Inference endpoints for the `completion` task type currently only support a single string as input. +- **`task_type` (Optional, Enum("sparse_embedding" \| "text_embedding" \| "rerank" \| "completion" \| "chat_completion"))**: The type of inference task that the model performs. +- **`query` (Optional, string)**: The query input, which is required only for the `rerank` task. +It is not required for other tasks. +- **`input_type` (Optional, string)**: Specifies the input data type for the text embedding model. The `input_type` parameter only applies to Inference Endpoints with the `text_embedding` task type. Possible values include: +* `SEARCH` +* `INGEST` +* `CLASSIFICATION` +* `CLUSTERING` +Not all services support all values. Unsupported values will trigger a validation exception. +Accepted values depend on the configured inference service, refer to the relevant service-specific documentation for more info. + +> info +> The `input_type` parameter specified on the root level of the request body will take precedence over the `input_type` parameter specified in `task_settings`. +- **`task_settings` (Optional, User-defined value)**: Task settings for the individual inference request. +These settings are specific to the task type you specified and override the task settings specified when initializing the service. +- **`timeout` (Optional, string \| -1 \| 0)**: The amount of time to wait for the inference request to complete. + +## client.inference.put [_inference.put] +Create an inference endpoint. + +IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. +For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. +However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. + +The following integrations are available through the inference API. You can find the available task types next to the integration name: +* AI21 (`chat_completion`, `completion`) +* AlibabaCloud AI Search (`completion`, `rerank`, `sparse_embedding`, `text_embedding`) +* Amazon Bedrock (`completion`, `text_embedding`) +* Amazon SageMaker (`chat_completion`, `completion`, `rerank`, `sparse_embedding`, `text_embedding`) +* Anthropic (`completion`) +* Azure AI Studio (`completion`, 'rerank', `text_embedding`) +* Azure OpenAI (`completion`, `text_embedding`) +* Cohere (`completion`, `rerank`, `text_embedding`) +* DeepSeek (`chat_completion`, `completion`) +* Elasticsearch (`rerank`, `sparse_embedding`, `text_embedding` - this service is for built-in models and models uploaded through Eland) +* ELSER (`sparse_embedding`) +* Google AI Studio (`completion`, `text_embedding`) +* Google Vertex AI (`chat_completion`, `completion`, `rerank`, `text_embedding`) +* Hugging Face (`chat_completion`, `completion`, `rerank`, `text_embedding`) +* JinaAI (`rerank`, `text_embedding`) +* Llama (`chat_completion`, `completion`, `text_embedding`) +* Mistral (`chat_completion`, `completion`, `text_embedding`) +* OpenAI (`chat_completion`, `completion`, `text_embedding`) +* VoyageAI (`rerank`, `text_embedding`) +* Watsonx inference integration (`text_embedding`) + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put) + +```ts +client.inference.put({ inference_id }) +``` + +### Arguments [_arguments_inference.put] + +#### Request (object) [_request_inference.put] +- **`inference_id` (string)**: The inference Id +- **`task_type` (Optional, Enum("sparse_embedding" \| "text_embedding" \| "rerank" \| "completion" \| "chat_completion"))**: The task type. Refer to the integration list in the API description for the available task types. +- **`inference_config` (Optional, { chunking_settings, service, service_settings, task_settings })** +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. + +## client.inference.putAi21 [_inference.put_ai21] +Create a AI21 inference endpoint. + +Create an inference endpoint to perform an inference task with the `ai21` service. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-ai21) + +```ts +client.inference.putAi21({ task_type, ai21_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_ai21] + +#### Request (object) [_request_inference.put_ai21] +- **`task_type` (Enum("completion" \| "chat_completion"))**: The type of the inference task that the model will perform. +- **`ai21_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("ai21"))**: The type of service supported for the specified task type. In this case, `ai21`. +- **`service_settings` ({ model_id, api_key, rate_limit })**: Settings used to install the inference model. These settings are specific to the `ai21` service. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. + +## client.inference.putAlibabacloud [_inference.put_alibabacloud] +Create an AlibabaCloud AI Search inference endpoint. + +Create an inference endpoint to perform an inference task with the `alibabacloud-ai-search` service. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-alibabacloud) + +```ts +client.inference.putAlibabacloud({ task_type, alibabacloud_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_alibabacloud] + +#### Request (object) [_request_inference.put_alibabacloud] +- **`task_type` (Enum("completion" \| "rerank" \| "sparse_embedding" \| "text_embedding"))**: The type of the inference task that the model will perform. +- **`alibabacloud_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("alibabacloud-ai-search"))**: The type of service supported for the specified task type. In this case, `alibabacloud-ai-search`. +- **`service_settings` ({ api_key, host, rate_limit, service_id, workspace })**: Settings used to install the inference model. These settings are specific to the `alibabacloud-ai-search` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. +Applies only to the `sparse_embedding` or `text_embedding` task types. +Not applicable to the `rerank` or `completion` task types. +- **`task_settings` (Optional, { input_type, return_token })**: Settings to configure the inference task. +These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. + +## client.inference.putAmazonbedrock [_inference.put_amazonbedrock] +Create an Amazon Bedrock inference endpoint. + +Create an inference endpoint to perform an inference task with the `amazonbedrock` service. + +>info +> You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-amazonbedrock) + +```ts +client.inference.putAmazonbedrock({ task_type, amazonbedrock_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_amazonbedrock] + +#### Request (object) [_request_inference.put_amazonbedrock] +- **`task_type` (Enum("completion" \| "text_embedding"))**: The type of the inference task that the model will perform. +- **`amazonbedrock_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("amazonbedrock"))**: The type of service supported for the specified task type. In this case, `amazonbedrock`. +- **`service_settings` ({ access_key, model, provider, region, rate_limit, secret_key })**: Settings used to install the inference model. These settings are specific to the `amazonbedrock` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. +Applies only to the `text_embedding` task type. +Not applicable to the `completion` task type. +- **`task_settings` (Optional, { max_new_tokens, temperature, top_k, top_p })**: Settings to configure the inference task. +These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. + +## client.inference.putAmazonsagemaker [_inference.put_amazonsagemaker] +Create an Amazon SageMaker inference endpoint. + +Create an inference endpoint to perform an inference task with the `amazon_sagemaker` service. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-amazonsagemaker) + +```ts +client.inference.putAmazonsagemaker({ task_type, amazonsagemaker_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_amazonsagemaker] + +#### Request (object) [_request_inference.put_amazonsagemaker] +- **`task_type` (Enum("text_embedding" \| "completion" \| "chat_completion" \| "sparse_embedding" \| "rerank"))**: The type of the inference task that the model will perform. +- **`amazonsagemaker_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("amazon_sagemaker"))**: The type of service supported for the specified task type. In this case, `amazon_sagemaker`. +- **`service_settings` ({ access_key, endpoint_name, api, region, secret_key, target_model, target_container_hostname, inference_component_name, batch_size, dimensions })**: Settings used to install the inference model. +These settings are specific to the `amazon_sagemaker` service and `service_settings.api` you specified. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. +Applies only to the `sparse_embedding` or `text_embedding` task types. +Not applicable to the `rerank`, `completion`, or `chat_completion` task types. +- **`task_settings` (Optional, { custom_attributes, enable_explanations, inference_id, session_id, target_variant })**: Settings to configure the inference task. +These settings are specific to the task type and `service_settings.api` you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. + +## client.inference.putAnthropic [_inference.put_anthropic] +Create an Anthropic inference endpoint. + +Create an inference endpoint to perform an inference task with the `anthropic` service. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-anthropic) + +```ts +client.inference.putAnthropic({ task_type, anthropic_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_anthropic] + +#### Request (object) [_request_inference.put_anthropic] +- **`task_type` (Enum("completion"))**: The task type. +The only valid task type for the model to perform is `completion`. +- **`anthropic_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("anthropic"))**: The type of service supported for the specified task type. In this case, `anthropic`. +- **`service_settings` ({ api_key, model_id, rate_limit })**: Settings used to install the inference model. These settings are specific to the `anthropic` service. +- **`task_settings` (Optional, { max_tokens, temperature, top_k, top_p })**: Settings to configure the inference task. +These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. + +## client.inference.putAzureaistudio [_inference.put_azureaistudio] +Create an Azure AI studio inference endpoint. + +Create an inference endpoint to perform an inference task with the `azureaistudio` service. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-azureaistudio) + +```ts +client.inference.putAzureaistudio({ task_type, azureaistudio_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_azureaistudio] + +#### Request (object) [_request_inference.put_azureaistudio] +- **`task_type` (Enum("completion" \| "rerank" \| "text_embedding"))**: The type of the inference task that the model will perform. +- **`azureaistudio_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("azureaistudio"))**: The type of service supported for the specified task type. In this case, `azureaistudio`. +- **`service_settings` ({ api_key, endpoint_type, target, provider, rate_limit })**: Settings used to install the inference model. These settings are specific to the `azureaistudio` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. +Applies only to the `text_embedding` task type. +Not applicable to the `rerank` or `completion` task types. +- **`task_settings` (Optional, { do_sample, max_new_tokens, temperature, top_p, user, return_documents, top_n })**: Settings to configure the inference task. +These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. + +## client.inference.putAzureopenai [_inference.put_azureopenai] +Create an Azure OpenAI inference endpoint. + +Create an inference endpoint to perform an inference task with the `azureopenai` service. + +The list of chat completion models that you can choose from in your Azure OpenAI deployment include: + +* [GPT-4 and GPT-4 Turbo models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-4-and-gpt-4-turbo-models) +* [GPT-3.5](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-35) + +The list of embeddings models that you can choose from in your deployment can be found in the [Azure models documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings). + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-azureopenai) + +```ts +client.inference.putAzureopenai({ task_type, azureopenai_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_azureopenai] + +#### Request (object) [_request_inference.put_azureopenai] +- **`task_type` (Enum("completion" \| "text_embedding"))**: The type of the inference task that the model will perform. +NOTE: The `chat_completion` task type only supports streaming and only through the _stream API. +- **`azureopenai_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("azureopenai"))**: The type of service supported for the specified task type. In this case, `azureopenai`. +- **`service_settings` ({ api_key, api_version, deployment_id, entra_id, rate_limit, resource_name })**: Settings used to install the inference model. These settings are specific to the `azureopenai` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. +Applies only to the `text_embedding` task type. +Not applicable to the `completion` task type. +- **`task_settings` (Optional, { user })**: Settings to configure the inference task. +These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. + +## client.inference.putCohere [_inference.put_cohere] +Create a Cohere inference endpoint. + +Create an inference endpoint to perform an inference task with the `cohere` service. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-cohere) + +```ts +client.inference.putCohere({ task_type, cohere_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_cohere] + +#### Request (object) [_request_inference.put_cohere] +- **`task_type` (Enum("completion" \| "rerank" \| "text_embedding"))**: The type of the inference task that the model will perform. +- **`cohere_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("cohere"))**: The type of service supported for the specified task type. In this case, `cohere`. +- **`service_settings` ({ api_key, embedding_type, model_id, rate_limit, similarity })**: Settings used to install the inference model. +These settings are specific to the `cohere` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. +Applies only to the `text_embedding` task type. +Not applicable to the `rerank` or `completion` task type. +- **`task_settings` (Optional, { input_type, return_documents, top_n, truncate })**: Settings to configure the inference task. +These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. + +## client.inference.putContextualai [_inference.put_contextualai] +Create an Contextual AI inference endpoint. + +Create an inference endpoint to perform an inference task with the `contexualai` service. + +To review the available `rerank` models, refer to . + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-contextualai) + +```ts +client.inference.putContextualai({ task_type, contextualai_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_contextualai] + +#### Request (object) [_request_inference.put_contextualai] +- **`task_type` (Enum("rerank"))**: The type of the inference task that the model will perform. +- **`contextualai_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("contextualai"))**: The type of service supported for the specified task type. In this case, `contextualai`. +- **`service_settings` ({ api_key, model_id, rate_limit })**: Settings used to install the inference model. These settings are specific to the `contextualai` service. +- **`task_settings` (Optional, { instruction, return_documents, top_k })**: Settings to configure the inference task. +These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. + +## client.inference.putCustom [_inference.put_custom] +Create a custom inference endpoint. + +The custom service gives more control over how to interact with external inference services that aren't explicitly supported through dedicated integrations. +The custom service gives you the ability to define the headers, url, query parameters, request body, and secrets. +The custom service supports the template replacement functionality, which enables you to define a template that can be replaced with the value associated with that key. +Templates are portions of a string that start with `${` and end with `}`. +The parameters `secret_parameters` and `task_settings` are checked for keys for template replacement. Template replacement is supported in the `request`, `headers`, `url`, and `query_parameters`. +If the definition (key) is not found for a template, an error message is returned. +In case of an endpoint definition like the following: +``` +PUT _inference/text_embedding/test-text-embedding +{ + "service": "custom", + "service_settings": { + "secret_parameters": { + "api_key": "" + }, + "url": "...endpoints.huggingface.cloud/v1/embeddings", + "headers": { + "Authorization": "Bearer ${api_key}", + "Content-Type": "application/json" + }, + "request": "{\"input\": ${input}}", + "response": { + "json_parser": { + "text_embeddings":"$.data[*].embedding[*]" + } + } + } +} +``` +To replace `${api_key}` the `secret_parameters` and `task_settings` are checked for a key named `api_key`. + +> info +> Templates should not be surrounded by quotes. + +Pre-defined templates: +* `${input}` refers to the array of input strings that comes from the `input` field of the subsequent inference requests. +* `${input_type}` refers to the input type translation values. +* `${query}` refers to the query field used specifically for reranking tasks. +* `${top_n}` refers to the `top_n` field available when performing rerank requests. +* `${return_documents}` refers to the `return_documents` field available when performing rerank requests. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-custom) + +```ts +client.inference.putCustom({ task_type, custom_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_custom] + +#### Request (object) [_request_inference.put_custom] +- **`task_type` (Enum("text_embedding" \| "sparse_embedding" \| "rerank" \| "completion"))**: The type of the inference task that the model will perform. +- **`custom_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("custom"))**: The type of service supported for the specified task type. In this case, `custom`. +- **`service_settings` ({ batch_size, headers, input_type, query_parameters, request, response, secret_parameters, url })**: Settings used to install the inference model. +These settings are specific to the `custom` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. +Applies only to the `sparse_embedding` or `text_embedding` task types. +Not applicable to the `rerank` or `completion` task types. +- **`task_settings` (Optional, { parameters })**: Settings to configure the inference task. +These settings are specific to the task type you specified. + +## client.inference.putDeepseek [_inference.put_deepseek] +Create a DeepSeek inference endpoint. + +Create an inference endpoint to perform an inference task with the `deepseek` service. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-deepseek) + +```ts +client.inference.putDeepseek({ task_type, deepseek_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_deepseek] + +#### Request (object) [_request_inference.put_deepseek] +- **`task_type` (Enum("completion" \| "chat_completion"))**: The type of the inference task that the model will perform. +- **`deepseek_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("deepseek"))**: The type of service supported for the specified task type. In this case, `deepseek`. +- **`service_settings` ({ api_key, model_id, url })**: Settings used to install the inference model. +These settings are specific to the `deepseek` service. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. + +## client.inference.putElasticsearch [_inference.put_elasticsearch] +Create an Elasticsearch inference endpoint. + +Create an inference endpoint to perform an inference task with the `elasticsearch` service. + +> info +> Your Elasticsearch deployment contains preconfigured ELSER and E5 inference endpoints, you only need to create the enpoints using the API if you want to customize the settings. + +If you use the ELSER or the E5 model through the `elasticsearch` service, the API request will automatically download and deploy the model if it isn't downloaded yet. + +> info +> You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value. + +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-elasticsearch) + +```ts +client.inference.putElasticsearch({ task_type, elasticsearch_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_elasticsearch] + +#### Request (object) [_request_inference.put_elasticsearch] +- **`task_type` (Enum("rerank" \| "sparse_embedding" \| "text_embedding"))**: The type of the inference task that the model will perform. +- **`elasticsearch_inference_id` (string)**: The unique identifier of the inference endpoint. +The must not match the `model_id`. +- **`service` (Enum("elasticsearch"))**: The type of service supported for the specified task type. In this case, `elasticsearch`. +- **`service_settings` ({ adaptive_allocations, deployment_id, model_id, num_allocations, num_threads, long_document_strategy, max_chunks_per_doc })**: Settings used to install the inference model. These settings are specific to the `elasticsearch` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. +Applies only to the `sparse_embedding` and `text_embedding` task types. +Not applicable to the `rerank` task type. +- **`task_settings` (Optional, { return_documents })**: Settings to configure the inference task. +These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. + +## client.inference.putElser [_inference.put_elser] +Create an ELSER inference endpoint. + +Create an inference endpoint to perform an inference task with the `elser` service. +You can also deploy ELSER by using the Elasticsearch inference integration. + +> info +> Your Elasticsearch deployment contains a preconfigured ELSER inference endpoint, you only need to create the enpoint using the API if you want to customize the settings. + +The API request will automatically download and deploy the ELSER model if it isn't already downloaded. + +> info +> You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value. + +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-elser) + +```ts +client.inference.putElser({ task_type, elser_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_elser] + +#### Request (object) [_request_inference.put_elser] +- **`task_type` (Enum("sparse_embedding"))**: The type of the inference task that the model will perform. +- **`elser_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("elser"))**: The type of service supported for the specified task type. In this case, `elser`. +- **`service_settings` ({ adaptive_allocations, num_allocations, num_threads })**: Settings used to install the inference model. These settings are specific to the `elser` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. +Note that for ELSER endpoints, the max_chunk_size may not exceed `300`. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. + +## client.inference.putGoogleaistudio [_inference.put_googleaistudio] +Create an Google AI Studio inference endpoint. + +Create an inference endpoint to perform an inference task with the `googleaistudio` service. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-googleaistudio) + +```ts +client.inference.putGoogleaistudio({ task_type, googleaistudio_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_googleaistudio] + +#### Request (object) [_request_inference.put_googleaistudio] +- **`task_type` (Enum("completion" \| "text_embedding"))**: The type of the inference task that the model will perform. +- **`googleaistudio_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("googleaistudio"))**: The type of service supported for the specified task type. In this case, `googleaistudio`. +- **`service_settings` ({ api_key, model_id, rate_limit })**: Settings used to install the inference model. These settings are specific to the `googleaistudio` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. +Applies only to the `text_embedding` task type. +Not applicable to the `completion` task type. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. + +## client.inference.putGooglevertexai [_inference.put_googlevertexai] +Create a Google Vertex AI inference endpoint. + +Create an inference endpoint to perform an inference task with the `googlevertexai` service. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-googlevertexai) + +```ts +client.inference.putGooglevertexai({ task_type, googlevertexai_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_googlevertexai] + +#### Request (object) [_request_inference.put_googlevertexai] +- **`task_type` (Enum("rerank" \| "text_embedding" \| "completion" \| "chat_completion"))**: The type of the inference task that the model will perform. +- **`googlevertexai_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("googlevertexai"))**: The type of service supported for the specified task type. In this case, `googlevertexai`. +- **`service_settings` ({ provider, url, streaming_url, location, model_id, project_id, rate_limit, service_account_json, dimensions })**: Settings used to install the inference model. These settings are specific to the `googlevertexai` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. +Applies only to the `text_embedding` task type. +Not applicable to the `rerank`, `completion`, or `chat_completion` task types. +- **`task_settings` (Optional, { auto_truncate, top_n, thinking_config, max_tokens })**: Settings to configure the inference task. +These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. + +## client.inference.putHuggingFace [_inference.put_hugging_face] +Create a Hugging Face inference endpoint. + +Create an inference endpoint to perform an inference task with the `hugging_face` service. +Supported tasks include: `text_embedding`, `completion`, and `chat_completion`. + +To configure the endpoint, first visit the Hugging Face Inference Endpoints page and create a new endpoint. +Select a model that supports the task you intend to use. + +For Elastic's `text_embedding` task: +The selected model must support the `Sentence Embeddings` task. On the new endpoint creation page, select the `Sentence Embeddings` task under the `Advanced Configuration` section. +After the endpoint has initialized, copy the generated endpoint URL. +Recommended models for `text_embedding` task: + +* `all-MiniLM-L6-v2` +* `all-MiniLM-L12-v2` +* `all-mpnet-base-v2` +* `e5-base-v2` +* `e5-small-v2` +* `multilingual-e5-base` +* `multilingual-e5-small` + +For Elastic's `chat_completion` and `completion` tasks: +The selected model must support the `Text Generation` task and expose OpenAI API. HuggingFace supports both serverless and dedicated endpoints for `Text Generation`. When creating dedicated endpoint select the `Text Generation` task. +After the endpoint is initialized (for dedicated) or ready (for serverless), ensure it supports the OpenAI API and includes `/v1/chat/completions` part in URL. Then, copy the full endpoint URL for use. +Recommended models for `chat_completion` and `completion` tasks: + +* `Mistral-7B-Instruct-v0.2` +* `QwQ-32B` +* `Phi-3-mini-128k-instruct` + +For Elastic's `rerank` task: +The selected model must support the `sentence-ranking` task and expose OpenAI API. +HuggingFace supports only dedicated (not serverless) endpoints for `Rerank` so far. +After the endpoint is initialized, copy the full endpoint URL for use. +Tested models for `rerank` task: + +* `bge-reranker-base` +* `jina-reranker-v1-turbo-en-GGUF` + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-hugging-face) + +```ts +client.inference.putHuggingFace({ task_type, huggingface_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_hugging_face] + +#### Request (object) [_request_inference.put_hugging_face] +- **`task_type` (Enum("chat_completion" \| "completion" \| "rerank" \| "text_embedding"))**: The type of the inference task that the model will perform. +- **`huggingface_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("hugging_face"))**: The type of service supported for the specified task type. In this case, `hugging_face`. +- **`service_settings` ({ api_key, rate_limit, url, model_id })**: Settings used to install the inference model. These settings are specific to the `hugging_face` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. +Applies only to the `text_embedding` task type. +Not applicable to the `rerank`, `completion`, or `chat_completion` task types. +- **`task_settings` (Optional, { return_documents, top_n })**: Settings to configure the inference task. +These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. + +## client.inference.putJinaai [_inference.put_jinaai] +Create an JinaAI inference endpoint. + +Create an inference endpoint to perform an inference task with the `jinaai` service. + +To review the available `rerank` models, refer to . +To review the available `text_embedding` models, refer to the . + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-jinaai) + +```ts +client.inference.putJinaai({ task_type, jinaai_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_jinaai] + +#### Request (object) [_request_inference.put_jinaai] +- **`task_type` (Enum("rerank" \| "text_embedding"))**: The type of the inference task that the model will perform. +- **`jinaai_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("jinaai"))**: The type of service supported for the specified task type. In this case, `jinaai`. +- **`service_settings` ({ api_key, model_id, rate_limit, similarity })**: Settings used to install the inference model. These settings are specific to the `jinaai` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. +Applies only to the `text_embedding` task type. +Not applicable to the `rerank` task type. +- **`task_settings` (Optional, { return_documents, task, top_n })**: Settings to configure the inference task. +These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. + +## client.inference.putLlama [_inference.put_llama] +Create a Llama inference endpoint. + +Create an inference endpoint to perform an inference task with the `llama` service. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-llama) + +```ts +client.inference.putLlama({ task_type, llama_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_llama] + +#### Request (object) [_request_inference.put_llama] +- **`task_type` (Enum("text_embedding" \| "completion" \| "chat_completion"))**: The type of the inference task that the model will perform. +- **`llama_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("llama"))**: The type of service supported for the specified task type. In this case, `llama`. +- **`service_settings` ({ url, model_id, max_input_tokens, similarity, rate_limit })**: Settings used to install the inference model. These settings are specific to the `llama` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. +Applies only to the `text_embedding` task type. +Not applicable to the `completion` or `chat_completion` task types. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. + +## client.inference.putMistral [_inference.put_mistral] +Create a Mistral inference endpoint. + +Create an inference endpoint to perform an inference task with the `mistral` service. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-mistral) + +```ts +client.inference.putMistral({ task_type, mistral_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_mistral] + +#### Request (object) [_request_inference.put_mistral] +- **`task_type` (Enum("text_embedding" \| "completion" \| "chat_completion"))**: The type of the inference task that the model will perform. +- **`mistral_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("mistral"))**: The type of service supported for the specified task type. In this case, `mistral`. +- **`service_settings` ({ api_key, max_input_tokens, model, rate_limit })**: Settings used to install the inference model. These settings are specific to the `mistral` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. +Applies only to the `text_embedding` task type. +Not applicable to the `completion` or `chat_completion` task types. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. + +## client.inference.putOpenai [_inference.put_openai] +Create an OpenAI inference endpoint. + +Create an inference endpoint to perform an inference task with the `openai` service or `openai` compatible APIs. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-openai) + +```ts +client.inference.putOpenai({ task_type, openai_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_openai] + +#### Request (object) [_request_inference.put_openai] +- **`task_type` (Enum("chat_completion" \| "completion" \| "text_embedding"))**: The type of the inference task that the model will perform. +NOTE: The `chat_completion` task type only supports streaming and only through the _stream API. +- **`openai_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("openai"))**: The type of service supported for the specified task type. In this case, `openai`. +- **`service_settings` ({ api_key, dimensions, model_id, organization_id, rate_limit, url })**: Settings used to install the inference model. These settings are specific to the `openai` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. +Applies only to the `text_embedding` task type. +Not applicable to the `completion` or `chat_completion` task types. +- **`task_settings` (Optional, { user, headers })**: Settings to configure the inference task. +These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. + +## client.inference.putVoyageai [_inference.put_voyageai] +Create a VoyageAI inference endpoint. + +Create an inference endpoint to perform an inference task with the `voyageai` service. + +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-voyageai) + +```ts +client.inference.putVoyageai({ task_type, voyageai_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_voyageai] + +#### Request (object) [_request_inference.put_voyageai] +- **`task_type` (Enum("text_embedding" \| "rerank"))**: The type of the inference task that the model will perform. +- **`voyageai_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("voyageai"))**: The type of service supported for the specified task type. In this case, `voyageai`. +- **`service_settings` ({ dimensions, model_id, rate_limit, embedding_type })**: Settings used to install the inference model. These settings are specific to the `voyageai` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. +Applies only to the `text_embedding` task type. +Not applicable to the `rerank` task type. +- **`task_settings` (Optional, { input_type, return_documents, top_k, truncation })**: Settings to configure the inference task. +These settings are specific to the task type you specified. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. + +## client.inference.putWatsonx [_inference.put_watsonx] +Create a Watsonx inference endpoint. + +Create an inference endpoint to perform an inference task with the `watsonxai` service. +You need an IBM Cloud Databases for Elasticsearch deployment to use the `watsonxai` inference service. +You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-watsonx) + +```ts +client.inference.putWatsonx({ task_type, watsonx_inference_id, service, service_settings }) +``` + +### Arguments [_arguments_inference.put_watsonx] + +#### Request (object) [_request_inference.put_watsonx] +- **`task_type` (Enum("text_embedding" \| "chat_completion" \| "completion"))**: The type of the inference task that the model will perform. +- **`watsonx_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("watsonxai"))**: The type of service supported for the specified task type. In this case, `watsonxai`. +- **`service_settings` ({ api_key, api_version, model_id, project_id, rate_limit, url })**: Settings used to install the inference model. These settings are specific to the `watsonxai` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. +Applies only to the `text_embedding` task type. +Not applicable to the `completion` or `chat_completion` task types. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. + +## client.inference.rerank [_inference.rerank] +Perform reranking inference on the service + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference) + +```ts +client.inference.rerank({ inference_id, query, input }) +``` + +### Arguments [_arguments_inference.rerank] + +#### Request (object) [_request_inference.rerank] +- **`inference_id` (string)**: The unique identifier for the inference endpoint. +- **`query` (string)**: Query input. +- **`input` (string[])**: The documents to rank. +- **`return_documents` (Optional, boolean)**: Include the document text in the response. +- **`top_n` (Optional, number)**: Limit the response to the top N documents. +- **`task_settings` (Optional, User-defined value)**: Task settings for the individual inference request. +These settings are specific to the task type you specified and override the task settings specified when initializing the service. +- **`timeout` (Optional, string \| -1 \| 0)**: The amount of time to wait for the inference request to complete. + +## client.inference.sparseEmbedding [_inference.sparse_embedding] +Perform sparse embedding inference on the service + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference) + +```ts +client.inference.sparseEmbedding({ inference_id, input }) +``` + +### Arguments [_arguments_inference.sparse_embedding] + +#### Request (object) [_request_inference.sparse_embedding] +- **`inference_id` (string)**: The inference Id +- **`input` (string \| string[])**: Inference input. +Either a string or an array of strings. +- **`task_settings` (Optional, User-defined value)**: Task settings for the individual inference request. These settings are specific to the you specified and override the task settings specified when initializing the service. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference request to complete. + +## client.inference.streamCompletion [_inference.stream_completion] +Perform streaming completion inference on the service +Get real-time responses for completion tasks by delivering answers incrementally, reducing response times during computation. +This API works only with the completion task type. + +IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. + +This API requires the `monitor_inference` cluster privilege (the built-in `inference_admin` and `inference_user` roles grant this privilege). You must use a client that supports streaming. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-stream-inference) + +```ts +client.inference.streamCompletion({ inference_id, input }) +``` + +### Arguments [_arguments_inference.stream_completion] + +#### Request (object) [_request_inference.stream_completion] +- **`inference_id` (string)**: The unique identifier for the inference endpoint. +- **`input` (string \| string[])**: The text on which you want to perform the inference task. +It can be a single string or an array. + +NOTE: Inference endpoints for the completion task type currently only support a single string as input. +- **`task_settings` (Optional, User-defined value)**: Task settings for the individual inference request. These settings are specific to the you specified and override the task settings specified when initializing the service. +- **`timeout` (Optional, string \| -1 \| 0)**: The amount of time to wait for the inference request to complete. + +## client.inference.textEmbedding [_inference.text_embedding] +Perform text embedding inference on the service + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference) + +```ts +client.inference.textEmbedding({ inference_id, input }) +``` + +### Arguments [_arguments_inference.text_embedding] + +#### Request (object) [_request_inference.text_embedding] +- **`inference_id` (string)**: The inference Id +- **`input` (string \| string[])**: Inference input. +Either a string or an array of strings. +- **`input_type` (Optional, string)**: The input data type for the text embedding model. Possible values include: +* `SEARCH` +* `INGEST` +* `CLASSIFICATION` +* `CLUSTERING` +Not all services support all values. Unsupported values will trigger a validation exception. +Accepted values depend on the configured inference service, refer to the relevant service-specific documentation for more info. + +> info +> The `input_type` parameter specified on the root level of the request body will take precedence over the `input_type` parameter specified in `task_settings`. +- **`task_settings` (Optional, User-defined value)**: Task settings for the individual inference request. These settings are specific to the you specified and override the task settings specified when initializing the service. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference request to complete. + +## client.inference.update [_inference.update] +Update an inference endpoint. + +Modify `task_settings`, secrets (within `service_settings`), or `num_allocations` for an inference endpoint, depending on the specific endpoint service and `task_type`. + +IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. +For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. +However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-update) + +```ts +client.inference.update({ inference_id }) +``` + +### Arguments [_arguments_inference.update] + +#### Request (object) [_request_inference.update] +- **`inference_id` (string)**: The unique identifier of the inference endpoint. +- **`task_type` (Optional, Enum("sparse_embedding" \| "text_embedding" \| "rerank" \| "completion" \| "chat_completion"))**: The type of inference task that the model performs. +- **`inference_config` (Optional, { chunking_settings, service, service_settings, task_settings })** + +## client.ingest.deleteGeoipDatabase [_ingest.delete_geoip_database] +Delete GeoIP database configurations. + +Delete one or more IP geolocation database configurations. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-geoip-database) + +```ts +client.ingest.deleteGeoipDatabase({ id }) +``` + +### Arguments [_arguments_ingest.delete_geoip_database] + +#### Request (object) [_request_ingest.delete_geoip_database] +- **`id` (string \| string[])**: A list of geoip database configurations to delete +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.ingest.deleteIpLocationDatabase [_ingest.delete_ip_location_database] +Delete IP geolocation database configurations. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-ip-location-database) + +```ts +client.ingest.deleteIpLocationDatabase({ id }) +``` + +### Arguments [_arguments_ingest.delete_ip_location_database] + +#### Request (object) [_request_ingest.delete_ip_location_database] +- **`id` (string \| string[])**: A list of IP location database configurations. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +A value of `-1` indicates that the request should never time out. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +A value of `-1` indicates that the request should never time out. + +## client.ingest.deletePipeline [_ingest.delete_pipeline] +Delete pipelines. +Delete one or more ingest pipelines. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-pipeline) + +```ts +client.ingest.deletePipeline({ id }) +``` + +### Arguments [_arguments_ingest.delete_pipeline] + +#### Request (object) [_request_ingest.delete_pipeline] +- **`id` (string)**: Pipeline ID or wildcard expression of pipeline IDs used to limit the request. +To delete all ingest pipelines in a cluster, use a value of `*`. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.ingest.geoIpStats [_ingest.geo_ip_stats] +Get GeoIP statistics. +Get download statistics for GeoIP2 databases that are used with the GeoIP processor. + +[Endpoint documentation](https://www.elastic.co/docs/reference/enrich-processor/geoip-processor) + +```ts +client.ingest.geoIpStats() +``` + + +## client.ingest.getGeoipDatabase [_ingest.get_geoip_database] +Get GeoIP database configurations. + +Get information about one or more IP geolocation database configurations. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-geoip-database) + +```ts +client.ingest.getGeoipDatabase({ ... }) +``` + +### Arguments [_arguments_ingest.get_geoip_database] + +#### Request (object) [_request_ingest.get_geoip_database] +- **`id` (Optional, string \| string[])**: A list of database configuration IDs to retrieve. +Wildcard (`*`) expressions are supported. +To get all database configurations, omit this parameter or use `*`. + +## client.ingest.getIpLocationDatabase [_ingest.get_ip_location_database] +Get IP geolocation database configurations. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-ip-location-database) + +```ts +client.ingest.getIpLocationDatabase({ ... }) +``` + +### Arguments [_arguments_ingest.get_ip_location_database] + +#### Request (object) [_request_ingest.get_ip_location_database] +- **`id` (Optional, string \| string[])**: List of database configuration IDs to retrieve. +Wildcard (`*`) expressions are supported. +To get all database configurations, omit this parameter or use `*`. + +## client.ingest.getPipeline [_ingest.get_pipeline] +Get pipelines. + +Get information about one or more ingest pipelines. +This API returns a local reference of the pipeline. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-pipeline) + +```ts +client.ingest.getPipeline({ ... }) +``` + +### Arguments [_arguments_ingest.get_pipeline] + +#### Request (object) [_request_ingest.get_pipeline] +- **`id` (Optional, string)**: List of pipeline IDs to retrieve. +Wildcard (`*`) expressions are supported. +To get all ingest pipelines, omit this parameter or use `*`. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`summary` (Optional, boolean)**: Return pipelines without their definitions (default: false) + +## client.ingest.processorGrok [_ingest.processor_grok] +Run a grok processor. +Extract structured fields out of a single text field within a document. +You must choose which field to extract matched fields from, as well as the grok pattern you expect will match. +A grok pattern is like a regular expression that supports aliased expressions that can be reused. + +[Endpoint documentation](https://www.elastic.co/docs/reference/enrich-processor/grok-processor) + +```ts +client.ingest.processorGrok() +``` + + +## client.ingest.putGeoipDatabase [_ingest.put_geoip_database] +Create or update a GeoIP database configuration. + +Refer to the create or update IP geolocation database configuration API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-put-geoip-database) + +```ts +client.ingest.putGeoipDatabase({ id, name, maxmind }) +``` + +### Arguments [_arguments_ingest.put_geoip_database] + +#### Request (object) [_request_ingest.put_geoip_database] +- **`id` (string)**: ID of the database configuration to create or update. +- **`name` (string)**: The provider-assigned name of the IP geolocation database to download. +- **`maxmind` ({ account_id })**: The configuration necessary to identify which IP geolocation provider to use to download the database, as well as any provider-specific configuration necessary for such downloading. +At present, the only supported provider is maxmind, and the maxmind provider requires that an account_id (string) is configured. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.ingest.putIpLocationDatabase [_ingest.put_ip_location_database] +Create or update an IP geolocation database configuration. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-put-ip-location-database) + +```ts +client.ingest.putIpLocationDatabase({ id }) +``` + +### Arguments [_arguments_ingest.put_ip_location_database] + +#### Request (object) [_request_ingest.put_ip_location_database] +- **`id` (string)**: The database configuration identifier. +- **`configuration` (Optional, { name, maxmind, ipinfo })** +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +A value of `-1` indicates that the request should never time out. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. +If no response is received before the timeout expires, the cluster metadata update still applies but the response indicates that it was not completely acknowledged. +A value of `-1` indicates that the request should never time out. + +## client.ingest.putPipeline [_ingest.put_pipeline] +Create or update a pipeline. +Changes made using this API take effect immediately. + +[Endpoint documentation](https://www.elastic.co/docs/manage-data/ingest/transform-enrich/ingest-pipelines) + +```ts +client.ingest.putPipeline({ id }) +``` + +### Arguments [_arguments_ingest.put_pipeline] + +#### Request (object) [_request_ingest.put_pipeline] +- **`id` (string)**: ID of the ingest pipeline to create or update. +- **`_meta` (Optional, Record)**: Optional metadata about the ingest pipeline. May have any contents. This map is not automatically generated by Elasticsearch. +- **`description` (Optional, string)**: Description of the ingest pipeline. +- **`on_failure` (Optional, { append, attachment, bytes, circle, community_id, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, fingerprint, foreach, ip_location, geo_grid, geoip, grok, gsub, html_strip, inference, join, json, kv, lowercase, network_direction, pipeline, redact, registered_domain, remove, rename, reroute, script, set, set_security_user, sort, split, terminate, trim, uppercase, urldecode, uri_parts, user_agent }[])**: Processors to run immediately after a processor failure. Each processor supports a processor-level `on_failure` value. If a processor without an `on_failure` value fails, Elasticsearch uses this pipeline-level parameter as a fallback. The processors in this parameter run sequentially in the order specified. Elasticsearch will not attempt to run the pipeline's remaining processors. +- **`processors` (Optional, { append, attachment, bytes, circle, community_id, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, fingerprint, foreach, ip_location, geo_grid, geoip, grok, gsub, html_strip, inference, join, json, kv, lowercase, network_direction, pipeline, redact, registered_domain, remove, rename, reroute, script, set, set_security_user, sort, split, terminate, trim, uppercase, urldecode, uri_parts, user_agent }[])**: Processors used to perform transformations on documents before indexing. Processors run sequentially in the order specified. +- **`version` (Optional, number)**: Version number used by external systems to track ingest pipelines. This parameter is intended for external systems only. Elasticsearch does not use or validate pipeline version numbers. +- **`deprecated` (Optional, boolean)**: Marks this ingest pipeline as deprecated. +When a deprecated ingest pipeline is referenced as the default or final pipeline when creating or updating a non-deprecated index template, Elasticsearch will emit a deprecation warning. +- **`field_access_pattern` (Optional, Enum("classic" \| "flexible"))**: Controls how processors in this pipeline should read and write data on a document's source. +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +- **`if_version` (Optional, number)**: Required version for optimistic concurrency control for pipeline updates + +## client.ingest.simulate [_ingest.simulate] +Simulate a pipeline. + +Run an ingest pipeline against a set of provided documents. +You can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-simulate) + +```ts +client.ingest.simulate({ docs }) +``` + +### Arguments [_arguments_ingest.simulate] + +#### Request (object) [_request_ingest.simulate] +- **`docs` ({ _id, _index, _source }[])**: Sample documents to test in the pipeline. +- **`id` (Optional, string)**: The pipeline to test. +If you don't specify a `pipeline` in the request body, this parameter is required. +- **`pipeline` (Optional, { description, on_failure, processors, version, deprecated, _meta, created_date, created_date_millis, modified_date, modified_date_millis, field_access_pattern })**: The pipeline to test. +If you don't specify the `pipeline` request path parameter, this parameter is required. +If you specify both this and the request path parameter, the API only uses the request path parameter. +- **`verbose` (Optional, boolean)**: If `true`, the response includes output data for each processor in the executed pipeline. + +## client.license.delete [_license.delete] +Delete the license. + +When the license expires, your subscription level reverts to Basic. + +If the operator privileges feature is enabled, only operator users can use this API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-delete) + +```ts +client.license.delete({ ... }) +``` + +### Arguments [_arguments_license.delete] + +#### Request (object) [_request_license.delete] +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.license.get [_license.get] +Get license information. + +Get information about your Elastic license including its type, its status, when it was issued, and when it expires. + +>info +> If the master node is generating a new cluster state, the get license API may return a `404 Not Found` response. +> If you receive an unexpected 404 response after cluster startup, wait a short period and retry the request. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get) + +```ts +client.license.get({ ... }) +``` + +### Arguments [_arguments_license.get] + +#### Request (object) [_request_license.get] +- **`accept_enterprise` (Optional, boolean)**: If `true`, this parameter returns enterprise for Enterprise license types. If `false`, this parameter returns platinum for both platinum and enterprise license types. This behavior is maintained for backwards compatibility. +This parameter is deprecated and will always be set to true in 8.x. +- **`local` (Optional, boolean)**: Specifies whether to retrieve local information. +From 9.2 onwards the default value is `true`, which means the information is retrieved from the responding node. +In earlier versions the default is `false`, which means the information is retrieved from the elected master node. + +## client.license.getBasicStatus [_license.get_basic_status] +Get the basic license status. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get-basic-status) + +```ts +client.license.getBasicStatus() +``` + + +## client.license.getTrialStatus [_license.get_trial_status] +Get the trial status. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get-trial-status) + +```ts +client.license.getTrialStatus() +``` + + +## client.license.post [_license.post] +Update the license. + +You can update your license at runtime without shutting down your nodes. +License updates take effect immediately. +If the license you are installing does not support all of the features that were available with your previous license, however, you are notified in the response. +You must then re-submit the API request with the acknowledge parameter set to true. + +NOTE: If Elasticsearch security features are enabled and you are installing a gold or higher license, you must enable TLS on the transport networking layer before you install the license. +If the operator privileges feature is enabled, only operator users can use this API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post) + +```ts +client.license.post({ ... }) +``` + +### Arguments [_arguments_license.post] + +#### Request (object) [_request_license.post] +- **`license` (Optional, { expiry_date_in_millis, issue_date_in_millis, start_date_in_millis, issued_to, issuer, max_nodes, max_resource_units, signature, type, uid })** +- **`licenses` (Optional, { expiry_date_in_millis, issue_date_in_millis, start_date_in_millis, issued_to, issuer, max_nodes, max_resource_units, signature, type, uid }[])**: A sequence of one or more JSON documents containing the license information. +- **`acknowledge` (Optional, boolean)**: Specifies whether you acknowledge the license changes. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.license.postStartBasic [_license.post_start_basic] +Start a basic license. + +Start an indefinite basic license, which gives access to all the basic features. + +NOTE: In order to start a basic license, you must not currently have a basic license. + +If the basic license does not support all of the features that are available with your current license, however, you are notified in the response. +You must then re-submit the API request with the `acknowledge` parameter set to `true`. + +To check the status of your basic license, use the get basic license API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post-start-basic) + +```ts +client.license.postStartBasic({ ... }) +``` + +### Arguments [_arguments_license.post_start_basic] + +#### Request (object) [_request_license.post_start_basic] +- **`acknowledge` (Optional, boolean)**: whether the user has acknowledged acknowledge messages (default: false) +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.license.postStartTrial [_license.post_start_trial] +Start a trial. +Start a 30-day trial, which gives access to all subscription features. + +NOTE: You are allowed to start a trial only if your cluster has not already activated a trial for the current major product version. +For example, if you have already activated a trial for v8.0, you cannot start a new trial until v9.0. You can, however, request an extended trial at https://www.elastic.co/trialextension. + +To check the status of your trial, use the get trial status API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post-start-trial) + +```ts +client.license.postStartTrial({ ... }) +``` + +### Arguments [_arguments_license.post_start_trial] + +#### Request (object) [_request_license.post_start_trial] +- **`acknowledge` (Optional, boolean)**: whether the user has acknowledged acknowledge messages (default: false) +- **`type` (Optional, string)**: The type of trial license to generate (default: "trial") +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. + +## client.logstash.deletePipeline [_logstash.delete_pipeline] +Delete a Logstash pipeline. +Delete a pipeline that is used for Logstash Central Management. +If the request succeeds, you receive an empty response with an appropriate status code. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-delete-pipeline) + +```ts +client.logstash.deletePipeline({ id }) +``` + +### Arguments [_arguments_logstash.delete_pipeline] + +#### Request (object) [_request_logstash.delete_pipeline] +- **`id` (string)**: An identifier for the pipeline. + +## client.logstash.getPipeline [_logstash.get_pipeline] +Get Logstash pipelines. +Get pipelines that are used for Logstash Central Management. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-get-pipeline) + +```ts +client.logstash.getPipeline({ ... }) +``` + +### Arguments [_arguments_logstash.get_pipeline] + +#### Request (object) [_request_logstash.get_pipeline] +- **`id` (Optional, string \| string[])**: A list of pipeline identifiers. + +## client.logstash.putPipeline [_logstash.put_pipeline] +Create or update a Logstash pipeline. + +Create a pipeline that is used for Logstash Central Management. +If the specified pipeline exists, it is replaced. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-put-pipeline) + +```ts +client.logstash.putPipeline({ id }) +``` + +### Arguments [_arguments_logstash.put_pipeline] + +#### Request (object) [_request_logstash.put_pipeline] +- **`id` (string)**: An identifier for the pipeline. +Pipeline IDs must begin with a letter or underscore and contain only letters, underscores, dashes, hyphens and numbers. +- **`pipeline` (Optional, { description, on_failure, processors, version, deprecated, _meta, created_date, created_date_millis, modified_date, modified_date_millis, field_access_pattern })** + +## client.migration.deprecations [_migration.deprecations] +Get deprecation information. +Get information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version. + +TIP: This APIs is designed for indirect use by the Upgrade Assistant. +You are strongly recommended to use the Upgrade Assistant. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-deprecations) + +```ts +client.migration.deprecations({ ... }) +``` + +### Arguments [_arguments_migration.deprecations] + +#### Request (object) [_request_migration.deprecations] +- **`index` (Optional, string)**: Comma-separate list of data streams or indices to check. Wildcard (*) expressions are supported. + +## client.migration.getFeatureUpgradeStatus [_migration.get_feature_upgrade_status] +Get feature migration information. +Version upgrades sometimes require changes to how features store configuration information and data in system indices. +Check which features need to be migrated and the status of any migrations that are in progress. + +TIP: This API is designed for indirect use by the Upgrade Assistant. +You are strongly recommended to use the Upgrade Assistant. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-get-feature-upgrade-status) + +```ts +client.migration.getFeatureUpgradeStatus() +``` + + +## client.migration.postFeatureUpgrade [_migration.post_feature_upgrade] +Start the feature migration. +Version upgrades sometimes require changes to how features store configuration information and data in system indices. +This API starts the automatic migration process. + +Some functionality might be temporarily unavailable during the migration process. + +TIP: The API is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-get-feature-upgrade-status) + +```ts +client.migration.postFeatureUpgrade() +``` + + +## client.ml.clearTrainedModelDeploymentCache [_ml.clear_trained_model_deployment_cache] +Clear trained model deployment cache. + +Cache will be cleared on all nodes where the trained model is assigned. +A trained model deployment may have an inference cache enabled. +As requests are handled by each allocated node, their responses may be cached on that individual node. +Calling this API clears the caches without restarting the deployment. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-clear-trained-model-deployment-cache) + +```ts +client.ml.clearTrainedModelDeploymentCache({ model_id }) +``` + +### Arguments [_arguments_ml.clear_trained_model_deployment_cache] + +#### Request (object) [_request_ml.clear_trained_model_deployment_cache] +- **`model_id` (string)**: The unique identifier of the trained model. + +## client.ml.closeJob [_ml.close_job] +Close anomaly detection jobs. + +A job can be opened and closed multiple times throughout its lifecycle. A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results. +When you close a job, it runs housekeeping tasks such as pruning the model history, flushing buffers, calculating final results and persisting the model snapshots. Depending upon the size of the job, it could take several minutes to close and the equivalent time to re-open. After it is closed, the job has a minimal overhead on the cluster except for maintaining its meta data. Therefore it is a best practice to close jobs that are no longer required to process data. +If you close an anomaly detection job whose datafeed is running, the request first tries to stop the datafeed. This behavior is equivalent to calling stop datafeed API with the same timeout and force parameters as the close job request. +When a datafeed that has a specified end date stops, it automatically closes its associated job. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-close-job) + +```ts +client.ml.closeJob({ job_id }) +``` + +### Arguments [_arguments_ml.close_job] + +#### Request (object) [_request_ml.close_job] +- **`job_id` (string)**: Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. You can close multiple anomaly detection jobs in a single API request by using a group name, a list of jobs, or a wildcard expression. You can close all jobs by using `_all` or by specifying `*` as the job identifier. +- **`allow_no_match` (Optional, boolean)**: Refer to the description for the `allow_no_match` query parameter. +- **`force` (Optional, boolean)**: Refer to the descriptiion for the `force` query parameter. +- **`timeout` (Optional, string \| -1 \| 0)**: Refer to the description for the `timeout` query parameter. + +## client.ml.deleteCalendar [_ml.delete_calendar] +Delete a calendar. + +Remove all scheduled events from a calendar, then delete it. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar) + +```ts +client.ml.deleteCalendar({ calendar_id }) +``` + +### Arguments [_arguments_ml.delete_calendar] + +#### Request (object) [_request_ml.delete_calendar] +- **`calendar_id` (string)**: A string that uniquely identifies a calendar. + +## client.ml.deleteCalendarEvent [_ml.delete_calendar_event] +Delete events from a calendar. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar-event) + +```ts +client.ml.deleteCalendarEvent({ calendar_id, event_id }) +``` + +### Arguments [_arguments_ml.delete_calendar_event] + +#### Request (object) [_request_ml.delete_calendar_event] +- **`calendar_id` (string)**: A string that uniquely identifies a calendar. +- **`event_id` (string)**: Identifier for the scheduled event. +You can obtain this identifier by using the get calendar events API. + +## client.ml.deleteCalendarJob [_ml.delete_calendar_job] +Delete anomaly jobs from a calendar. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar-job) + +```ts +client.ml.deleteCalendarJob({ calendar_id, job_id }) +``` + +### Arguments [_arguments_ml.delete_calendar_job] + +#### Request (object) [_request_ml.delete_calendar_job] +- **`calendar_id` (string)**: A string that uniquely identifies a calendar. +- **`job_id` (string \| string[])**: An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a +list of jobs or groups. + +## client.ml.deleteDataFrameAnalytics [_ml.delete_data_frame_analytics] +Delete a data frame analytics job. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-data-frame-analytics) + +```ts +client.ml.deleteDataFrameAnalytics({ id }) +``` + +### Arguments [_arguments_ml.delete_data_frame_analytics] + +#### Request (object) [_request_ml.delete_data_frame_analytics] +- **`id` (string)**: Identifier for the data frame analytics job. +- **`force` (Optional, boolean)**: If `true`, it deletes a job that is not stopped; this method is quicker than stopping and deleting the job. +- **`timeout` (Optional, string \| -1 \| 0)**: The time to wait for the job to be deleted. + +## client.ml.deleteDatafeed [_ml.delete_datafeed] +Delete a datafeed. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-datafeed) + +```ts +client.ml.deleteDatafeed({ datafeed_id }) +``` + +### Arguments [_arguments_ml.delete_datafeed] + +#### Request (object) [_request_ml.delete_datafeed] +- **`datafeed_id` (string)**: A numerical character string that uniquely identifies the datafeed. This +identifier can contain lowercase alphanumeric characters (a-z and 0-9), +hyphens, and underscores. It must start and end with alphanumeric +characters. +- **`force` (Optional, boolean)**: Use to forcefully delete a started datafeed; this method is quicker than +stopping and deleting the datafeed. + +## client.ml.deleteExpiredData [_ml.delete_expired_data] +Delete expired ML data. + +Delete all job results, model snapshots and forecast data that have exceeded +their retention days period. Machine learning state documents that are not +associated with any job are also deleted. +You can limit the request to a single or set of anomaly detection jobs by +using a job identifier, a group name, a list of jobs, or a +wildcard expression. You can delete expired data for all anomaly detection +jobs by using `_all`, by specifying `*` as the ``, or by omitting the +``. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-expired-data) + +```ts +client.ml.deleteExpiredData({ ... }) +``` + +### Arguments [_arguments_ml.delete_expired_data] + +#### Request (object) [_request_ml.delete_expired_data] +- **`job_id` (Optional, string)**: Identifier for an anomaly detection job. It can be a job identifier, a +group name, or a wildcard expression. +- **`requests_per_second` (Optional, float)**: The desired requests per second for the deletion processes. The default +behavior is no throttling. +- **`timeout` (Optional, string \| -1 \| 0)**: How long can the underlying delete processes run until they are canceled. + +## client.ml.deleteFilter [_ml.delete_filter] +Delete a filter. + +If an anomaly detection job references the filter, you cannot delete the +filter. You must update or delete the job before you can delete the filter. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-filter) + +```ts +client.ml.deleteFilter({ filter_id }) +``` + +### Arguments [_arguments_ml.delete_filter] + +#### Request (object) [_request_ml.delete_filter] +- **`filter_id` (string)**: A string that uniquely identifies a filter. + +## client.ml.deleteForecast [_ml.delete_forecast] +Delete forecasts from a job. + +By default, forecasts are retained for 14 days. You can specify a +different retention period with the `expires_in` parameter in the forecast +jobs API. The delete forecast API enables you to delete one or more +forecasts before they expire. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-forecast) + +```ts +client.ml.deleteForecast({ job_id }) +``` + +### Arguments [_arguments_ml.delete_forecast] + +#### Request (object) [_request_ml.delete_forecast] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`forecast_id` (Optional, string)**: A list of forecast identifiers. If you do not specify +this optional parameter or if you specify `_all` or `*` the API deletes +all forecasts from the job. +- **`allow_no_forecasts` (Optional, boolean)**: Specifies whether an error occurs when there are no forecasts. In +particular, if this parameter is set to `false` and there are no +forecasts associated with the job, attempts to delete all forecasts +return an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the period of time to wait for the completion of the delete +operation. When this period of time elapses, the API fails and returns an +error. + +## client.ml.deleteJob [_ml.delete_job] +Delete an anomaly detection job. + +All job configuration, model state and results are deleted. +It is not currently possible to delete multiple jobs using wildcards or a +comma separated list. If you delete a job that has a datafeed, the request +first tries to delete the datafeed. This behavior is equivalent to calling +the delete datafeed API with the same timeout and force parameters as the +delete job request. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-job) + +```ts +client.ml.deleteJob({ job_id }) +``` + +### Arguments [_arguments_ml.delete_job] + +#### Request (object) [_request_ml.delete_job] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`force` (Optional, boolean)**: Use to forcefully delete an opened job; this method is quicker than +closing and deleting the job. +- **`delete_user_annotations` (Optional, boolean)**: Specifies whether annotations that have been added by the +user should be deleted along with any auto-generated annotations when the job is +reset. +- **`wait_for_completion` (Optional, boolean)**: Specifies whether the request should return immediately or wait until the +job deletion completes. + +## client.ml.deleteModelSnapshot [_ml.delete_model_snapshot] +Delete a model snapshot. + +You cannot delete the active model snapshot. To delete that snapshot, first +revert to a different one. To identify the active model snapshot, refer to +the `model_snapshot_id` in the results from the get jobs API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-model-snapshot) + +```ts +client.ml.deleteModelSnapshot({ job_id, snapshot_id }) +``` + +### Arguments [_arguments_ml.delete_model_snapshot] + +#### Request (object) [_request_ml.delete_model_snapshot] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`snapshot_id` (string)**: Identifier for the model snapshot. + +## client.ml.deleteTrainedModel [_ml.delete_trained_model] +Delete an unreferenced trained model. + +The request deletes a trained inference model that is not referenced by an ingest pipeline. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-trained-model) + +```ts +client.ml.deleteTrainedModel({ model_id }) +``` + +### Arguments [_arguments_ml.delete_trained_model] + +#### Request (object) [_request_ml.delete_trained_model] +- **`model_id` (string)**: The unique identifier of the trained model. +- **`force` (Optional, boolean)**: Forcefully deletes a trained model that is referenced by ingest pipelines or has a started deployment. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.ml.deleteTrainedModelAlias [_ml.delete_trained_model_alias] +Delete a trained model alias. + +This API deletes an existing model alias that refers to a trained model. If +the model alias is missing or refers to a model other than the one identified +by the `model_id`, this API returns an error. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-trained-model-alias) + +```ts +client.ml.deleteTrainedModelAlias({ model_alias, model_id }) +``` + +### Arguments [_arguments_ml.delete_trained_model_alias] + +#### Request (object) [_request_ml.delete_trained_model_alias] +- **`model_alias` (string)**: The model alias to delete. +- **`model_id` (string)**: The trained model ID to which the model alias refers. + +## client.ml.estimateModelMemory [_ml.estimate_model_memory] +Estimate job model memory usage. + +Make an estimation of the memory usage for an anomaly detection job model. +The estimate is based on analysis configuration details for the job and cardinality +estimates for the fields it references. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-estimate-model-memory) + +```ts +client.ml.estimateModelMemory({ ... }) +``` + +### Arguments [_arguments_ml.estimate_model_memory] + +#### Request (object) [_request_ml.estimate_model_memory] +- **`analysis_config` (Optional, { bucket_span, categorization_analyzer, categorization_field_name, categorization_filters, detectors, influencers, latency, model_prune_window, multivariate_by_fields, per_partition_categorization, summary_count_field_name })**: For a list of the properties that you can specify in the +`analysis_config` component of the body of this API. +- **`max_bucket_cardinality` (Optional, Record)**: Estimates of the highest cardinality in a single bucket that is observed +for influencer fields over the time period that the job analyzes data. +To produce a good answer, values must be provided for all influencer +fields. Providing values for fields that are not listed as `influencers` +has no effect on the estimation. +- **`overall_cardinality` (Optional, Record)**: Estimates of the cardinality that is observed for fields over the whole +time period that the job analyzes data. To produce a good answer, values +must be provided for fields referenced in the `by_field_name`, +`over_field_name` and `partition_field_name` of any detectors. Providing +values for other fields has no effect on the estimation. It can be +omitted from the request if no detectors have a `by_field_name`, +`over_field_name` or `partition_field_name`. + +## client.ml.evaluateDataFrame [_ml.evaluate_data_frame] +Evaluate data frame analytics. + +The API packages together commonly used evaluation metrics for various types +of machine learning features. This has been designed for use on indexes +created by data frame analytics. Evaluation requires both a ground truth +field and an analytics result field to be present. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-evaluate-data-frame) + +```ts +client.ml.evaluateDataFrame({ evaluation, index }) +``` + +### Arguments [_arguments_ml.evaluate_data_frame] + +#### Request (object) [_request_ml.evaluate_data_frame] +- **`evaluation` ({ classification, outlier_detection, regression })**: Defines the type of evaluation you want to perform. +- **`index` (string)**: Defines the `index` in which the evaluation will be performed. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: A query clause that retrieves a subset of data from the source index. + +## client.ml.explainDataFrameAnalytics [_ml.explain_data_frame_analytics] +Explain data frame analytics config. + +This API provides explanations for a data frame analytics config that either +exists already or one that has not been created yet. The following +explanations are provided: +* which fields are included or not in the analysis and why, +* how much memory is estimated to be required. The estimate can be used when deciding the appropriate value for model_memory_limit setting later on. +If you have object fields or fields that are excluded via source filtering, they are not included in the explanation. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-explain-data-frame-analytics) + +```ts +client.ml.explainDataFrameAnalytics({ ... }) +``` + +### Arguments [_arguments_ml.explain_data_frame_analytics] + +#### Request (object) [_request_ml.explain_data_frame_analytics] +- **`id` (Optional, string)**: Identifier for the data frame analytics job. This identifier can contain +lowercase alphanumeric characters (a-z and 0-9), hyphens, and +underscores. It must start and end with alphanumeric characters. +- **`source` (Optional, { index, query, runtime_mappings, _source })**: The configuration of how to source the analysis data. It requires an +index. Optionally, query and _source may be specified. +- **`dest` (Optional, { index, results_field })**: The destination configuration, consisting of index and optionally +results_field (ml by default). +- **`analysis` (Optional, { classification, outlier_detection, regression })**: The analysis configuration, which contains the information necessary to +perform one of the following types of analysis: classification, outlier +detection, or regression. +- **`description` (Optional, string)**: A description of the job. +- **`model_memory_limit` (Optional, string)**: The approximate maximum amount of memory resources that are permitted for +analytical processing. If your `elasticsearch.yml` file contains an +`xpack.ml.max_model_memory_limit` setting, an error occurs when you try to +create data frame analytics jobs that have `model_memory_limit` values +greater than that setting. +- **`max_num_threads` (Optional, number)**: The maximum number of threads to be used by the analysis. Using more +threads may decrease the time necessary to complete the analysis at the +cost of using more CPU. Note that the process may use additional threads +for operational functionality other than the analysis itself. +- **`analyzed_fields` (Optional, { includes, excludes })**: Specify includes and/or excludes patterns to select which fields will be +included in the analysis. The patterns specified in excludes are applied +last, therefore excludes takes precedence. In other words, if the same +field is specified in both includes and excludes, then the field will not +be included in the analysis. +- **`allow_lazy_start` (Optional, boolean)**: Specifies whether this job can start when there is insufficient machine +learning node capacity for it to be immediately assigned to a node. + +## client.ml.flushJob [_ml.flush_job] +Force buffered data to be processed. +The flush jobs API is only applicable when sending data for analysis using +the post data API. Depending on the content of the buffer, then it might +additionally calculate new results. Both flush and close operations are +similar, however the flush is more efficient if you are expecting to send +more data for analysis. When flushing, the job remains open and is available +to continue analyzing data. A close operation additionally prunes and +persists the model state to disk and the job must be opened again before +analyzing further data. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-flush-job) + +```ts +client.ml.flushJob({ job_id }) +``` + +### Arguments [_arguments_ml.flush_job] + +#### Request (object) [_request_ml.flush_job] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`advance_time` (Optional, string \| Unit)**: Refer to the description for the `advance_time` query parameter. +- **`calc_interim` (Optional, boolean)**: Refer to the description for the `calc_interim` query parameter. +- **`end` (Optional, string \| Unit)**: Refer to the description for the `end` query parameter. +- **`skip_time` (Optional, string \| Unit)**: Refer to the description for the `skip_time` query parameter. +- **`start` (Optional, string \| Unit)**: Refer to the description for the `start` query parameter. + +## client.ml.forecast [_ml.forecast] +Predict future behavior of a time series. + +Forecasts are not supported for jobs that perform population analysis; an +error occurs if you try to create a forecast for a job that has an +`over_field_name` in its configuration. Forcasts predict future behavior +based on historical data. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-forecast) + +```ts +client.ml.forecast({ job_id }) +``` + +### Arguments [_arguments_ml.forecast] + +#### Request (object) [_request_ml.forecast] +- **`job_id` (string)**: Identifier for the anomaly detection job. The job must be open when you +create a forecast; otherwise, an error occurs. +- **`duration` (Optional, string \| -1 \| 0)**: Refer to the description for the `duration` query parameter. +- **`expires_in` (Optional, string \| -1 \| 0)**: Refer to the description for the `expires_in` query parameter. +- **`max_model_memory` (Optional, string)**: Refer to the description for the `max_model_memory` query parameter. + +## client.ml.getBuckets [_ml.get_buckets] +Get anomaly detection job results for buckets. +The API presents a chronological view of the records, grouped by bucket. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-buckets) + +```ts +client.ml.getBuckets({ job_id }) +``` + +### Arguments [_arguments_ml.get_buckets] + +#### Request (object) [_request_ml.get_buckets] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`timestamp` (Optional, string \| Unit)**: The timestamp of a single bucket result. If you do not specify this +parameter, the API returns information about all buckets. +- **`anomaly_score` (Optional, number)**: Refer to the description for the `anomaly_score` query parameter. +- **`desc` (Optional, boolean)**: Refer to the description for the `desc` query parameter. +- **`end` (Optional, string \| Unit)**: Refer to the description for the `end` query parameter. +- **`exclude_interim` (Optional, boolean)**: Refer to the description for the `exclude_interim` query parameter. +- **`expand` (Optional, boolean)**: Refer to the description for the `expand` query parameter. +- **`page` (Optional, { from, size })** +- **`sort` (Optional, string)**: Refer to the desription for the `sort` query parameter. +- **`start` (Optional, string \| Unit)**: Refer to the description for the `start` query parameter. +- **`from` (Optional, number)**: Skips the specified number of buckets. +- **`size` (Optional, number)**: Specifies the maximum number of buckets to obtain. + +## client.ml.getCalendarEvents [_ml.get_calendar_events] +Get info about events in calendars. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-calendar-events) + +```ts +client.ml.getCalendarEvents({ calendar_id }) +``` + +### Arguments [_arguments_ml.get_calendar_events] + +#### Request (object) [_request_ml.get_calendar_events] +- **`calendar_id` (string)**: A string that uniquely identifies a calendar. You can get information for multiple calendars by using a list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier. +- **`end` (Optional, string \| Unit)**: Specifies to get events with timestamps earlier than this time. +- **`from` (Optional, number)**: Skips the specified number of events. +- **`job_id` (Optional, string)**: Specifies to get events for a specific anomaly detection job identifier or job group. It must be used with a calendar identifier of `_all` or `*`. +- **`size` (Optional, number)**: Specifies the maximum number of events to obtain. +- **`start` (Optional, string \| Unit)**: Specifies to get events with timestamps after this time. + +## client.ml.getCalendars [_ml.get_calendars] +Get calendar configuration info. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-calendars) + +```ts +client.ml.getCalendars({ ... }) +``` + +### Arguments [_arguments_ml.get_calendars] + +#### Request (object) [_request_ml.get_calendars] +- **`calendar_id` (Optional, string)**: A string that uniquely identifies a calendar. You can get information for multiple calendars by using a list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier. +- **`page` (Optional, { from, size })**: This object is supported only when you omit the calendar identifier. +- **`from` (Optional, number)**: Skips the specified number of calendars. This parameter is supported only when you omit the calendar identifier. +- **`size` (Optional, number)**: Specifies the maximum number of calendars to obtain. This parameter is supported only when you omit the calendar identifier. + +## client.ml.getCategories [_ml.get_categories] +Get anomaly detection job results for categories. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-categories) + +```ts +client.ml.getCategories({ job_id }) +``` + +### Arguments [_arguments_ml.get_categories] + +#### Request (object) [_request_ml.get_categories] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`category_id` (Optional, string)**: Identifier for the category, which is unique in the job. If you specify +neither the category ID nor the partition_field_value, the API returns +information about all categories. If you specify only the +partition_field_value, it returns information about all categories for +the specified partition. +- **`page` (Optional, { from, size })**: Configures pagination. +This parameter has the `from` and `size` properties. +- **`from` (Optional, number)**: Skips the specified number of categories. +- **`partition_field_value` (Optional, string)**: Only return categories for the specified partition. +- **`size` (Optional, number)**: Specifies the maximum number of categories to obtain. + +## client.ml.getDataFrameAnalytics [_ml.get_data_frame_analytics] +Get data frame analytics job configuration info. +You can get information for multiple data frame analytics jobs in a single +API request by using a list of data frame analytics jobs or a +wildcard expression. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-data-frame-analytics) + +```ts +client.ml.getDataFrameAnalytics({ ... }) +``` + +### Arguments [_arguments_ml.get_data_frame_analytics] + +#### Request (object) [_request_ml.get_data_frame_analytics] +- **`id` (Optional, string)**: Identifier for the data frame analytics job. If you do not specify this +option, the API returns information for the first hundred data frame +analytics jobs. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: + +1. Contains wildcard expressions and there are no data frame analytics +jobs that match. +2. Contains the `_all` string or no identifiers and there are no matches. +3. Contains wildcard expressions and there are only partial matches. + +The default value returns an empty data_frame_analytics array when there +are no matches and the subset of results when there are partial matches. +If this parameter is `false`, the request returns a 404 status code when +there are no matches or only partial matches. +- **`from` (Optional, number)**: Skips the specified number of data frame analytics jobs. +- **`size` (Optional, number)**: Specifies the maximum number of data frame analytics jobs to obtain. +- **`exclude_generated` (Optional, boolean)**: Indicates if certain fields should be removed from the configuration on +retrieval. This allows the configuration to be in an acceptable format to +be retrieved and then added to another cluster. + +## client.ml.getDataFrameAnalyticsStats [_ml.get_data_frame_analytics_stats] +Get data frame analytics job stats. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-data-frame-analytics-stats) + +```ts +client.ml.getDataFrameAnalyticsStats({ ... }) +``` + +### Arguments [_arguments_ml.get_data_frame_analytics_stats] + +#### Request (object) [_request_ml.get_data_frame_analytics_stats] +- **`id` (Optional, string)**: Identifier for the data frame analytics job. If you do not specify this +option, the API returns information for the first hundred data frame +analytics jobs. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: + +1. Contains wildcard expressions and there are no data frame analytics +jobs that match. +2. Contains the `_all` string or no identifiers and there are no matches. +3. Contains wildcard expressions and there are only partial matches. + +The default value returns an empty data_frame_analytics array when there +are no matches and the subset of results when there are partial matches. +If this parameter is `false`, the request returns a 404 status code when +there are no matches or only partial matches. +- **`from` (Optional, number)**: Skips the specified number of data frame analytics jobs. +- **`size` (Optional, number)**: Specifies the maximum number of data frame analytics jobs to obtain. +- **`verbose` (Optional, boolean)**: Defines whether the stats response should be verbose. + +## client.ml.getDatafeedStats [_ml.get_datafeed_stats] +Get datafeed stats. +You can get statistics for multiple datafeeds in a single API request by +using a list of datafeeds or a wildcard expression. You can +get statistics for all datafeeds by using `_all`, by specifying `*` as the +``, or by omitting the ``. If the datafeed is stopped, the +only information you receive is the `datafeed_id` and the `state`. +This API returns a maximum of 10,000 datafeeds. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-datafeed-stats) + +```ts +client.ml.getDatafeedStats({ ... }) +``` + +### Arguments [_arguments_ml.get_datafeed_stats] + +#### Request (object) [_request_ml.get_datafeed_stats] +- **`datafeed_id` (Optional, string \| string[])**: Identifier for the datafeed. It can be a datafeed identifier or a +wildcard expression. If you do not specify one of these options, the API +returns information about all datafeeds. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: + +1. Contains wildcard expressions and there are no datafeeds that match. +2. Contains the `_all` string or no identifiers and there are no matches. +3. Contains wildcard expressions and there are only partial matches. + +The default value is `true`, which returns an empty `datafeeds` array +when there are no matches and the subset of results when there are +partial matches. If this parameter is `false`, the request returns a +`404` status code when there are no matches or only partial matches. + +## client.ml.getDatafeeds [_ml.get_datafeeds] +Get datafeeds configuration info. +You can get information for multiple datafeeds in a single API request by +using a list of datafeeds or a wildcard expression. You can +get information for all datafeeds by using `_all`, by specifying `*` as the +``, or by omitting the ``. +This API returns a maximum of 10,000 datafeeds. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-datafeeds) + +```ts +client.ml.getDatafeeds({ ... }) +``` + +### Arguments [_arguments_ml.get_datafeeds] + +#### Request (object) [_request_ml.get_datafeeds] +- **`datafeed_id` (Optional, string \| string[])**: Identifier for the datafeed. It can be a datafeed identifier or a +wildcard expression. If you do not specify one of these options, the API +returns information about all datafeeds. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: + +1. Contains wildcard expressions and there are no datafeeds that match. +2. Contains the `_all` string or no identifiers and there are no matches. +3. Contains wildcard expressions and there are only partial matches. + +The default value is `true`, which returns an empty `datafeeds` array +when there are no matches and the subset of results when there are +partial matches. If this parameter is `false`, the request returns a +`404` status code when there are no matches or only partial matches. +- **`exclude_generated` (Optional, boolean)**: Indicates if certain fields should be removed from the configuration on +retrieval. This allows the configuration to be in an acceptable format to +be retrieved and then added to another cluster. + +## client.ml.getFilters [_ml.get_filters] +Get filters. +You can get a single filter or all filters. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-filters) + +```ts +client.ml.getFilters({ ... }) +``` + +### Arguments [_arguments_ml.get_filters] + +#### Request (object) [_request_ml.get_filters] +- **`filter_id` (Optional, string \| string[])**: A string that uniquely identifies a filter. +- **`from` (Optional, number)**: Skips the specified number of filters. +- **`size` (Optional, number)**: Specifies the maximum number of filters to obtain. + +## client.ml.getInfluencers [_ml.get_influencers] +Get anomaly detection job results for influencers. +Influencers are the entities that have contributed to, or are to blame for, +the anomalies. Influencer results are available only if an +`influencer_field_name` is specified in the job configuration. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-influencers) + +```ts +client.ml.getInfluencers({ job_id }) +``` + +### Arguments [_arguments_ml.get_influencers] + +#### Request (object) [_request_ml.get_influencers] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`page` (Optional, { from, size })**: Configures pagination. +This parameter has the `from` and `size` properties. +- **`desc` (Optional, boolean)**: If true, the results are sorted in descending order. +- **`end` (Optional, string \| Unit)**: Returns influencers with timestamps earlier than this time. +The default value means it is unset and results are not limited to +specific timestamps. +- **`exclude_interim` (Optional, boolean)**: If true, the output excludes interim results. By default, interim results +are included. +- **`influencer_score` (Optional, number)**: Returns influencers with anomaly scores greater than or equal to this +value. +- **`from` (Optional, number)**: Skips the specified number of influencers. +- **`size` (Optional, number)**: Specifies the maximum number of influencers to obtain. +- **`sort` (Optional, string)**: Specifies the sort field for the requested influencers. By default, the +influencers are sorted by the `influencer_score` value. +- **`start` (Optional, string \| Unit)**: Returns influencers with timestamps after this time. The default value +means it is unset and results are not limited to specific timestamps. + +## client.ml.getJobStats [_ml.get_job_stats] +Get anomaly detection job stats. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-job-stats) + +```ts +client.ml.getJobStats({ ... }) +``` + +### Arguments [_arguments_ml.get_job_stats] + +#### Request (object) [_request_ml.get_job_stats] +- **`job_id` (Optional, string)**: Identifier for the anomaly detection job. It can be a job identifier, a +group name, a list of jobs, or a wildcard expression. If +you do not specify one of these options, the API returns information for +all anomaly detection jobs. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: + +1. Contains wildcard expressions and there are no jobs that match. +2. Contains the _all string or no identifiers and there are no matches. +3. Contains wildcard expressions and there are only partial matches. + +If `true`, the API returns an empty `jobs` array when +there are no matches and the subset of results when there are partial +matches. If `false`, the API returns a `404` status +code when there are no matches or only partial matches. + +## client.ml.getJobs [_ml.get_jobs] +Get anomaly detection jobs configuration info. +You can get information for multiple anomaly detection jobs in a single API +request by using a group name, a list of jobs, or a wildcard +expression. You can get information for all anomaly detection jobs by using +`_all`, by specifying `*` as the ``, or by omitting the ``. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-jobs) + +```ts +client.ml.getJobs({ ... }) +``` + +### Arguments [_arguments_ml.get_jobs] + +#### Request (object) [_request_ml.get_jobs] +- **`job_id` (Optional, string \| string[])**: Identifier for the anomaly detection job. It can be a job identifier, a +group name, or a wildcard expression. If you do not specify one of these +options, the API returns information for all anomaly detection jobs. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: + +1. Contains wildcard expressions and there are no jobs that match. +2. Contains the _all string or no identifiers and there are no matches. +3. Contains wildcard expressions and there are only partial matches. + +The default value is `true`, which returns an empty `jobs` array when +there are no matches and the subset of results when there are partial +matches. If this parameter is `false`, the request returns a `404` status +code when there are no matches or only partial matches. +- **`exclude_generated` (Optional, boolean)**: Indicates if certain fields should be removed from the configuration on +retrieval. This allows the configuration to be in an acceptable format to +be retrieved and then added to another cluster. + +## client.ml.getMemoryStats [_ml.get_memory_stats] +Get machine learning memory usage info. +Get information about how machine learning jobs and trained models are using memory, +on each node, both within the JVM heap, and natively, outside of the JVM. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-memory-stats) + +```ts +client.ml.getMemoryStats({ ... }) +``` + +### Arguments [_arguments_ml.get_memory_stats] + +#### Request (object) [_request_ml.get_memory_stats] +- **`node_id` (Optional, string)**: The names of particular nodes in the cluster to target. For example, `nodeId1,nodeId2` or +`ml:true` +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout +expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request +fails and returns an error. + +## client.ml.getModelSnapshotUpgradeStats [_ml.get_model_snapshot_upgrade_stats] +Get anomaly detection job model snapshot upgrade usage info. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-model-snapshot-upgrade-stats) + +```ts +client.ml.getModelSnapshotUpgradeStats({ job_id, snapshot_id }) +``` + +### Arguments [_arguments_ml.get_model_snapshot_upgrade_stats] + +#### Request (object) [_request_ml.get_model_snapshot_upgrade_stats] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`snapshot_id` (string)**: A numerical character string that uniquely identifies the model snapshot. You can get information for multiple +snapshots by using a list or a wildcard expression. You can get all snapshots by using `_all`, +by specifying `*` as the snapshot ID, or by omitting the snapshot ID. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: + + - Contains wildcard expressions and there are no jobs that match. + - Contains the _all string or no identifiers and there are no matches. + - Contains wildcard expressions and there are only partial matches. + +The default value is true, which returns an empty jobs array when there are no matches and the subset of results +when there are partial matches. If this parameter is false, the request returns a 404 status code when there are +no matches or only partial matches. + +## client.ml.getModelSnapshots [_ml.get_model_snapshots] +Get model snapshots info. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-model-snapshots) + +```ts +client.ml.getModelSnapshots({ job_id }) +``` + +### Arguments [_arguments_ml.get_model_snapshots] + +#### Request (object) [_request_ml.get_model_snapshots] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`snapshot_id` (Optional, string)**: A numerical character string that uniquely identifies the model snapshot. You can get information for multiple +snapshots by using a list or a wildcard expression. You can get all snapshots by using `_all`, +by specifying `*` as the snapshot ID, or by omitting the snapshot ID. +- **`desc` (Optional, boolean)**: Refer to the description for the `desc` query parameter. +- **`end` (Optional, string \| Unit)**: Refer to the description for the `end` query parameter. +- **`page` (Optional, { from, size })** +- **`sort` (Optional, string)**: Refer to the description for the `sort` query parameter. +- **`start` (Optional, string \| Unit)**: Refer to the description for the `start` query parameter. +- **`from` (Optional, number)**: Skips the specified number of snapshots. +- **`size` (Optional, number)**: Specifies the maximum number of snapshots to obtain. + +## client.ml.getOverallBuckets [_ml.get_overall_buckets] +Get overall bucket results. + +Retrievs overall bucket results that summarize the bucket results of +multiple anomaly detection jobs. + +The `overall_score` is calculated by combining the scores of all the +buckets within the overall bucket span. First, the maximum +`anomaly_score` per anomaly detection job in the overall bucket is +calculated. Then the `top_n` of those scores are averaged to result in +the `overall_score`. This means that you can fine-tune the +`overall_score` so that it is more or less sensitive to the number of +jobs that detect an anomaly at the same time. For example, if you set +`top_n` to `1`, the `overall_score` is the maximum bucket score in the +overall bucket. Alternatively, if you set `top_n` to the number of jobs, +the `overall_score` is high only when all jobs detect anomalies in that +overall bucket. If you set the `bucket_span` parameter (to a value +greater than its default), the `overall_score` is the maximum +`overall_score` of the overall buckets that have a span equal to the +jobs' largest bucket span. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-overall-buckets) + +```ts +client.ml.getOverallBuckets({ job_id }) +``` + +### Arguments [_arguments_ml.get_overall_buckets] + +#### Request (object) [_request_ml.get_overall_buckets] +- **`job_id` (string)**: Identifier for the anomaly detection job. It can be a job identifier, a +group name, a list of jobs or groups, or a wildcard +expression. + +You can summarize the bucket results for all anomaly detection jobs by +using `_all` or by specifying `*` as the ``. +- **`allow_no_match` (Optional, boolean)**: Refer to the description for the `allow_no_match` query parameter. +- **`bucket_span` (Optional, string \| -1 \| 0)**: Refer to the description for the `bucket_span` query parameter. +- **`end` (Optional, string \| Unit)**: Refer to the description for the `end` query parameter. +- **`exclude_interim` (Optional, boolean)**: Refer to the description for the `exclude_interim` query parameter. +- **`overall_score` (Optional, number)**: Refer to the description for the `overall_score` query parameter. +- **`start` (Optional, string \| Unit)**: Refer to the description for the `start` query parameter. +- **`top_n` (Optional, number)**: Refer to the description for the `top_n` query parameter. + +## client.ml.getRecords [_ml.get_records] +Get anomaly records for an anomaly detection job. +Records contain the detailed analytical results. They describe the anomalous +activity that has been identified in the input data based on the detector +configuration. +There can be many anomaly records depending on the characteristics and size +of the input data. In practice, there are often too many to be able to +manually process them. The machine learning features therefore perform a +sophisticated aggregation of the anomaly records into buckets. +The number of record results depends on the number of anomalies found in each +bucket, which relates to the number of time series being modeled and the +number of detectors. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-records) + +```ts +client.ml.getRecords({ job_id }) +``` + +### Arguments [_arguments_ml.get_records] + +#### Request (object) [_request_ml.get_records] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`desc` (Optional, boolean)**: Refer to the description for the `desc` query parameter. +- **`end` (Optional, string \| Unit)**: Refer to the description for the `end` query parameter. +- **`exclude_interim` (Optional, boolean)**: Refer to the description for the `exclude_interim` query parameter. +- **`page` (Optional, { from, size })** +- **`record_score` (Optional, number)**: Refer to the description for the `record_score` query parameter. +- **`sort` (Optional, string)**: Refer to the description for the `sort` query parameter. +- **`start` (Optional, string \| Unit)**: Refer to the description for the `start` query parameter. +- **`from` (Optional, number)**: Skips the specified number of records. +- **`size` (Optional, number)**: Specifies the maximum number of records to obtain. + +## client.ml.getTrainedModels [_ml.get_trained_models] +Get trained model configuration info. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-trained-models) + +```ts +client.ml.getTrainedModels({ ... }) +``` + +### Arguments [_arguments_ml.get_trained_models] + +#### Request (object) [_request_ml.get_trained_models] +- **`model_id` (Optional, string \| string[])**: The unique identifier of the trained model or a model alias. + +You can get information for multiple trained models in a single API +request by using a list of model IDs or a wildcard +expression. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: + +- Contains wildcard expressions and there are no models that match. +- Contains the _all string or no identifiers and there are no matches. +- Contains wildcard expressions and there are only partial matches. + +If true, it returns an empty array when there are no matches and the +subset of results when there are partial matches. +- **`decompress_definition` (Optional, boolean)**: Specifies whether the included model definition should be returned as a +JSON map (true) or in a custom compressed format (false). +- **`exclude_generated` (Optional, boolean)**: Indicates if certain fields should be removed from the configuration on +retrieval. This allows the configuration to be in an acceptable format to +be retrieved and then added to another cluster. +- **`from` (Optional, number)**: Skips the specified number of models. +- **`include` (Optional, Enum("definition" \| "feature_importance_baseline" \| "hyperparameters" \| "total_feature_importance" \| "definition_status"))**: A comma delimited string of optional fields to include in the response +body. +- **`size` (Optional, number)**: Specifies the maximum number of models to obtain. +- **`tags` (Optional, string \| string[])**: A comma delimited string of tags. A trained model can have many tags, or +none. When supplied, only trained models that contain all the supplied +tags are returned. + +## client.ml.getTrainedModelsStats [_ml.get_trained_models_stats] +Get trained models usage info. +You can get usage information for multiple trained +models in a single API request by using a list of model IDs or a wildcard expression. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-trained-models-stats) + +```ts +client.ml.getTrainedModelsStats({ ... }) +``` + +### Arguments [_arguments_ml.get_trained_models_stats] + +#### Request (object) [_request_ml.get_trained_models_stats] +- **`model_id` (Optional, string \| string[])**: The unique identifier of the trained model or a model alias. It can be a +list or a wildcard expression. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: + +- Contains wildcard expressions and there are no models that match. +- Contains the _all string or no identifiers and there are no matches. +- Contains wildcard expressions and there are only partial matches. + +If true, it returns an empty array when there are no matches and the +subset of results when there are partial matches. +- **`from` (Optional, number)**: Skips the specified number of models. +- **`size` (Optional, number)**: Specifies the maximum number of models to obtain. + +## client.ml.inferTrainedModel [_ml.infer_trained_model] +Evaluate a trained model. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-infer-trained-model) + +```ts +client.ml.inferTrainedModel({ model_id, docs }) +``` + +### Arguments [_arguments_ml.infer_trained_model] + +#### Request (object) [_request_ml.infer_trained_model] +- **`model_id` (string)**: The unique identifier of the trained model. +- **`docs` (Record[])**: An array of objects to pass to the model for inference. The objects should contain a fields matching your +configured trained model input. Typically, for NLP models, the field name is `text_field`. +Currently, for NLP models, only a single value is allowed. +- **`inference_config` (Optional, { regression, classification, text_classification, zero_shot_classification, fill_mask, ner, pass_through, text_embedding, text_expansion, question_answering })**: The inference configuration updates to apply on the API call +- **`timeout` (Optional, string \| -1 \| 0)**: Controls the amount of time to wait for inference results. + +## client.ml.info [_ml.info] +Get machine learning information. +Get defaults and limits used by machine learning. +This endpoint is designed to be used by a user interface that needs to fully +understand machine learning configurations where some options are not +specified, meaning that the defaults should be used. This endpoint may be +used to find out what those defaults are. It also provides information about +the maximum size of machine learning jobs that could run in the current +cluster configuration. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-info) + +```ts +client.ml.info() +``` + + +## client.ml.openJob [_ml.open_job] +Open anomaly detection jobs. + +An anomaly detection job must be opened to be ready to receive and analyze +data. It can be opened and closed multiple times throughout its lifecycle. +When you open a new job, it starts with an empty model. +When you open an existing job, the most recent model state is automatically +loaded. The job is ready to resume its analysis from where it left off, once +new data is received. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-open-job) + +```ts +client.ml.openJob({ job_id }) +``` + +### Arguments [_arguments_ml.open_job] + +#### Request (object) [_request_ml.open_job] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`timeout` (Optional, string \| -1 \| 0)**: Refer to the description for the `timeout` query parameter. + +## client.ml.postCalendarEvents [_ml.post_calendar_events] +Add scheduled events to the calendar. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-post-calendar-events) + +```ts +client.ml.postCalendarEvents({ calendar_id, events }) +``` + +### Arguments [_arguments_ml.post_calendar_events] + +#### Request (object) [_request_ml.post_calendar_events] +- **`calendar_id` (string)**: A string that uniquely identifies a calendar. +- **`events` ({ calendar_id, event_id, description, end_time, start_time, skip_result, skip_model_update, force_time_shift }[])**: A list of one of more scheduled events. The event’s start and end times can be specified as integer milliseconds since the epoch or as a string in ISO 8601 format. + +## client.ml.postData [_ml.post_data] +Send data to an anomaly detection job for analysis. + +IMPORTANT: For each job, data can be accepted from only a single connection at a time. +It is not currently possible to post data to multiple jobs using wildcards or a list. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-post-data) + +```ts +client.ml.postData({ job_id }) +``` + +### Arguments [_arguments_ml.post_data] + +#### Request (object) [_request_ml.post_data] +- **`job_id` (string)**: Identifier for the anomaly detection job. The job must have a state of open to receive and process the data. +- **`data` (Optional, TData[])** +- **`reset_end` (Optional, string \| Unit)**: Specifies the end of the bucket resetting range. +- **`reset_start` (Optional, string \| Unit)**: Specifies the start of the bucket resetting range. + +## client.ml.previewDataFrameAnalytics [_ml.preview_data_frame_analytics] +Preview features used by data frame analytics. +Preview the extracted features used by a data frame analytics config. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-preview-data-frame-analytics) + +```ts +client.ml.previewDataFrameAnalytics({ ... }) +``` + +### Arguments [_arguments_ml.preview_data_frame_analytics] + +#### Request (object) [_request_ml.preview_data_frame_analytics] +- **`id` (Optional, string)**: Identifier for the data frame analytics job. +- **`config` (Optional, { source, analysis, model_memory_limit, max_num_threads, analyzed_fields })**: A data frame analytics config as described in create data frame analytics +jobs. Note that `id` and `dest` don’t need to be provided in the context of +this API. + +## client.ml.previewDatafeed [_ml.preview_datafeed] +Preview a datafeed. +This API returns the first "page" of search results from a datafeed. +You can preview an existing datafeed or provide configuration details for a datafeed +and anomaly detection job in the API. The preview shows the structure of the data +that will be passed to the anomaly detection engine. +IMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that +called the API. However, when the datafeed starts it uses the roles of the last user that created or updated the +datafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials. +You can also use secondary authorization headers to supply the credentials. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-preview-datafeed) + +```ts +client.ml.previewDatafeed({ ... }) +``` + +### Arguments [_arguments_ml.preview_datafeed] + +#### Request (object) [_request_ml.preview_datafeed] +- **`datafeed_id` (Optional, string)**: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase +alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric +characters. NOTE: If you use this path parameter, you cannot provide datafeed or anomaly detection job +configuration details in the request body. +- **`datafeed_config` (Optional, { aggregations, chunking_config, datafeed_id, delayed_data_check_config, frequency, indices, indices_options, job_id, max_empty_searches, query, query_delay, runtime_mappings, script_fields, scroll_size })**: The datafeed definition to preview. +- **`job_config` (Optional, { allow_lazy_open, analysis_config, analysis_limits, background_persist_interval, custom_settings, daily_model_snapshot_retention_after_days, data_description, datafeed_config, description, groups, job_id, job_type, model_plot_config, model_snapshot_retention_days, renormalization_window_days, results_index_name, results_retention_days })**: The configuration details for the anomaly detection job that is associated with the datafeed. If the +`datafeed_config` object does not include a `job_id` that references an existing anomaly detection job, you must +supply this `job_config` object. If you include both a `job_id` and a `job_config`, the latter information is +used. You cannot specify a `job_config` object unless you also supply a `datafeed_config` object. +- **`start` (Optional, string \| Unit)**: The start time from where the datafeed preview should begin +- **`end` (Optional, string \| Unit)**: The end time when the datafeed preview should stop + +## client.ml.putCalendar [_ml.put_calendar] +Create a calendar. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-calendar) + +```ts +client.ml.putCalendar({ calendar_id }) +``` + +### Arguments [_arguments_ml.put_calendar] + +#### Request (object) [_request_ml.put_calendar] +- **`calendar_id` (string)**: A string that uniquely identifies a calendar. +- **`job_ids` (Optional, string[])**: An array of anomaly detection job identifiers. +- **`description` (Optional, string)**: A description of the calendar. + +## client.ml.putCalendarJob [_ml.put_calendar_job] +Add anomaly detection job to calendar. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-calendar-job) + +```ts +client.ml.putCalendarJob({ calendar_id, job_id }) +``` + +### Arguments [_arguments_ml.put_calendar_job] + +#### Request (object) [_request_ml.put_calendar_job] +- **`calendar_id` (string)**: A string that uniquely identifies a calendar. +- **`job_id` (string \| string[])**: An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a list of jobs or groups. + +## client.ml.putDataFrameAnalytics [_ml.put_data_frame_analytics] +Create a data frame analytics job. +This API creates a data frame analytics job that performs an analysis on the +source indices and stores the outcome in a destination index. +By default, the query used in the source configuration is `{"match_all": {}}`. + +If the destination index does not exist, it is created automatically when you start the job. + +If you supply only a subset of the regression or classification parameters, hyperparameter optimization occurs. It determines a value for each of the undefined parameters. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-data-frame-analytics) + +```ts +client.ml.putDataFrameAnalytics({ id, analysis, dest, source }) +``` + +### Arguments [_arguments_ml.put_data_frame_analytics] + +#### Request (object) [_request_ml.put_data_frame_analytics] +- **`id` (string)**: Identifier for the data frame analytics job. This identifier can contain +lowercase alphanumeric characters (a-z and 0-9), hyphens, and +underscores. It must start and end with alphanumeric characters. +- **`analysis` ({ classification, outlier_detection, regression })**: The analysis configuration, which contains the information necessary to +perform one of the following types of analysis: classification, outlier +detection, or regression. +- **`dest` ({ index, results_field })**: The destination configuration. +- **`source` ({ index, query, runtime_mappings, _source })**: The configuration of how to source the analysis data. +- **`allow_lazy_start` (Optional, boolean)**: Specifies whether this job can start when there is insufficient machine +learning node capacity for it to be immediately assigned to a node. If +set to `false` and a machine learning node with capacity to run the job +cannot be immediately found, the API returns an error. If set to `true`, +the API does not return an error; the job waits in the `starting` state +until sufficient machine learning node capacity is available. This +behavior is also affected by the cluster-wide +`xpack.ml.max_lazy_ml_nodes` setting. +- **`analyzed_fields` (Optional, { includes, excludes })**: Specifies `includes` and/or `excludes` patterns to select which fields +will be included in the analysis. The patterns specified in `excludes` +are applied last, therefore `excludes` takes precedence. In other words, +if the same field is specified in both `includes` and `excludes`, then +the field will not be included in the analysis. If `analyzed_fields` is +not set, only the relevant fields will be included. For example, all the +numeric fields for outlier detection. +The supported fields vary for each type of analysis. Outlier detection +requires numeric or `boolean` data to analyze. The algorithms don’t +support missing values therefore fields that have data types other than +numeric or boolean are ignored. Documents where included fields contain +missing values, null values, or an array are also ignored. Therefore the +`dest` index may contain documents that don’t have an outlier score. +Regression supports fields that are numeric, `boolean`, `text`, +`keyword`, and `ip` data types. It is also tolerant of missing values. +Fields that are supported are included in the analysis, other fields are +ignored. Documents where included fields contain an array with two or +more values are also ignored. Documents in the `dest` index that don’t +contain a results field are not included in the regression analysis. +Classification supports fields that are numeric, `boolean`, `text`, +`keyword`, and `ip` data types. It is also tolerant of missing values. +Fields that are supported are included in the analysis, other fields are +ignored. Documents where included fields contain an array with two or +more values are also ignored. Documents in the `dest` index that don’t +contain a results field are not included in the classification analysis. +Classification analysis can be improved by mapping ordinal variable +values to a single number. For example, in case of age ranges, you can +model the values as `0-14 = 0`, `15-24 = 1`, `25-34 = 2`, and so on. +- **`description` (Optional, string)**: A description of the job. +- **`max_num_threads` (Optional, number)**: The maximum number of threads to be used by the analysis. Using more +threads may decrease the time necessary to complete the analysis at the +cost of using more CPU. Note that the process may use additional threads +for operational functionality other than the analysis itself. +- **`_meta` (Optional, Record)** +- **`model_memory_limit` (Optional, string)**: The approximate maximum amount of memory resources that are permitted for +analytical processing. If your `elasticsearch.yml` file contains an +`xpack.ml.max_model_memory_limit` setting, an error occurs when you try +to create data frame analytics jobs that have `model_memory_limit` values +greater than that setting. +- **`headers` (Optional, Record)** +- **`version` (Optional, string)** + +## client.ml.putDatafeed [_ml.put_datafeed] +Create a datafeed. +Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job. +You can associate only one datafeed with each anomaly detection job. +The datafeed contains a query that runs at a defined interval (`frequency`). +If you are concerned about delayed data, you can add a delay (`query_delay') at each interval. +By default, the datafeed uses the following query: `{"match_all": {"boost": 1}}`. + +When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had +at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, +those credentials are used instead. +You must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed +directly to the `.ml-config` index. Do not give users `write` privileges on the `.ml-config` index. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-datafeed) + +```ts +client.ml.putDatafeed({ datafeed_id }) +``` + +### Arguments [_arguments_ml.put_datafeed] + +#### Request (object) [_request_ml.put_datafeed] +- **`datafeed_id` (string)**: A numerical character string that uniquely identifies the datafeed. +This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. +It must start and end with alphanumeric characters. +- **`aggregations` (Optional, Record)**: If set, the datafeed performs aggregation searches. +Support for aggregations is limited and should be used only with low cardinality data. +- **`chunking_config` (Optional, { mode, time_span })**: Datafeeds might be required to search over long time periods, for several months or years. +This search is split into time chunks in order to ensure the load on Elasticsearch is managed. +Chunking configuration controls how the size of these time chunks are calculated; +it is an advanced configuration option. +- **`delayed_data_check_config` (Optional, { check_window, enabled })**: Specifies whether the datafeed checks for missing data and the size of the window. +The datafeed can optionally search over indices that have already been read in an effort to determine whether +any data has subsequently been added to the index. If missing data is found, it is a good indication that the +`query_delay` is set too low and the data is being indexed after the datafeed has passed that moment in time. +This check runs only on real-time datafeeds. +- **`frequency` (Optional, string \| -1 \| 0)**: The interval at which scheduled queries are made while the datafeed runs in real time. +The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible +fraction of the bucket span. When `frequency` is shorter than the bucket span, interim results for the last +(partial) bucket are written then eventually overwritten by the full bucket results. If the datafeed uses +aggregations, this value must be divisible by the interval of the date histogram aggregation. +- **`indices` (Optional, string \| string[])**: An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the master +nodes and the machine learning nodes must have the `remote_cluster_client` role. +- **`indices_options` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, ignore_throttled })**: Specifies index expansion options that are used during search +- **`job_id` (Optional, string)**: Identifier for the anomaly detection job. +- **`max_empty_searches` (Optional, number)**: If a real-time datafeed has never seen any data (including during any initial training period), it automatically +stops and closes the associated job after this many real-time searches return no documents. In other words, +it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no +end time that sees no data remains started until it is explicitly stopped. By default, it is not set. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an +Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this +object is passed verbatim to Elasticsearch. +- **`query_delay` (Optional, string \| -1 \| 0)**: The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might +not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default +value is randomly selected between `60s` and `120s`. This randomness improves the query performance +when there are multiple jobs running on the same node. +- **`runtime_mappings` (Optional, Record)**: Specifies runtime fields for the datafeed search. +- **`script_fields` (Optional, Record)**: Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. +The detector configuration objects in a job can contain functions that use these script fields. +- **`scroll_size` (Optional, number)**: The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. +The maximum value is the value of `index.max_result_window`, which is 10,000 by default. +- **`headers` (Optional, Record)** +- **`allow_no_indices` (Optional, boolean)**: If true, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` +string or when no indices are specified. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines +whether wildcard expressions match hidden data streams. Supports a list of values. +- **`ignore_throttled` (Optional, boolean)**: If true, concrete, expanded, or aliased indices are ignored when frozen. +- **`ignore_unavailable` (Optional, boolean)**: If true, unavailable indices (missing or closed) are ignored. + +## client.ml.putFilter [_ml.put_filter] +Create a filter. +A filter contains a list of strings. It can be used by one or more anomaly detection jobs. +Specifically, filters are referenced in the `custom_rules` property of detector configuration objects. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-filter) + +```ts +client.ml.putFilter({ filter_id }) +``` + +### Arguments [_arguments_ml.put_filter] + +#### Request (object) [_request_ml.put_filter] +- **`filter_id` (string)**: A string that uniquely identifies a filter. +- **`description` (Optional, string)**: A description of the filter. +- **`items` (Optional, string[])**: The items of the filter. A wildcard `*` can be used at the beginning or the end of an item. +Up to 10000 items are allowed in each filter. + +## client.ml.putJob [_ml.put_job] +Create an anomaly detection job. + +If you include a `datafeed_config`, you must have read index privileges on the source index. +If you include a `datafeed_config` but do not provide a query, the datafeed uses `{"match_all": {"boost": 1}}`. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-job) + +```ts +client.ml.putJob({ job_id, analysis_config, data_description }) +``` + +### Arguments [_arguments_ml.put_job] + +#### Request (object) [_request_ml.put_job] +- **`job_id` (string)**: The identifier for the anomaly detection job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. +- **`analysis_config` ({ bucket_span, categorization_analyzer, categorization_field_name, categorization_filters, detectors, influencers, latency, model_prune_window, multivariate_by_fields, per_partition_categorization, summary_count_field_name })**: Specifies how to analyze the data. After you create a job, you cannot change the analysis configuration; all the properties are informational. +- **`data_description` ({ format, time_field, time_format, field_delimiter })**: Defines the format of the input data when you send data to the job by using the post data API. Note that when configure a datafeed, these properties are automatically set. When data is received via the post data API, it is not stored in Elasticsearch. Only the results for anomaly detection are retained. +- **`allow_lazy_open` (Optional, boolean)**: Advanced configuration option. Specifies whether this job can open when there is insufficient machine learning node capacity for it to be immediately assigned to a node. By default, if a machine learning node with capacity to run the job cannot immediately be found, the open anomaly detection jobs API returns an error. However, this is also subject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this option is set to true, the open anomaly detection jobs API does not return an error and the job waits in the opening state until sufficient machine learning node capacity is available. +- **`analysis_limits` (Optional, { categorization_examples_limit, model_memory_limit })**: Limits can be applied for the resources required to hold the mathematical models in memory. These limits are approximate and can be set per job. They do not control the memory used by other processes, for example the Elasticsearch Java processes. +- **`background_persist_interval` (Optional, string \| -1 \| 0)**: Advanced configuration option. The time between each periodic persistence of the model. The default value is a randomized value between 3 to 4 hours, which avoids all jobs persisting at exactly the same time. The smallest allowed value is 1 hour. For very large models (several GB), persistence could take 10-20 minutes, so do not set the `background_persist_interval` value too low. +- **`custom_settings` (Optional, User-defined value)**: Advanced configuration option. Contains custom meta data about the job. +- **`daily_model_snapshot_retention_after_days` (Optional, number)**: Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies a period of time (in days) after which only the first snapshot per day is retained. This period is relative to the timestamp of the most recent snapshot for this job. Valid values range from 0 to `model_snapshot_retention_days`. +- **`datafeed_config` (Optional, { aggregations, chunking_config, datafeed_id, delayed_data_check_config, frequency, indices, indices_options, job_id, max_empty_searches, query, query_delay, runtime_mappings, script_fields, scroll_size })**: Defines a datafeed for the anomaly detection job. If Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. +- **`description` (Optional, string)**: A description of the job. +- **`groups` (Optional, string[])**: A list of job groups. A job can belong to no groups or many. +- **`model_plot_config` (Optional, { annotations_enabled, enabled, terms })**: This advanced configuration option stores model information along with the results. It provides a more detailed view into anomaly detection. If you enable model plot it can add considerable overhead to the performance of the system; it is not feasible for jobs with many entities. Model plot provides a simplified and indicative view of the model and its bounds. It does not display complex features such as multivariate correlations or multimodal data. As such, anomalies may occasionally be reported which cannot be seen in the model plot. Model plot config can be configured when the job is created or updated later. It must be disabled if performance issues are experienced. +- **`model_snapshot_retention_days` (Optional, number)**: Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies the maximum period of time (in days) that snapshots are retained. This period is relative to the timestamp of the most recent snapshot for this job. By default, snapshots ten days older than the newest snapshot are deleted. +- **`renormalization_window_days` (Optional, number)**: Advanced configuration option. The period over which adjustments to the score are applied, as new data is seen. The default value is the longer of 30 days or 100 bucket spans. +- **`results_index_name` (Optional, string)**: A text string that affects the name of the machine learning results index. By default, the job generates an index named `.ml-anomalies-shared`. +- **`results_retention_days` (Optional, number)**: Advanced configuration option. The period of time (in days) that results are retained. Age is calculated relative to the timestamp of the latest bucket result. If this property has a non-null value, once per day at 00:30 (server time), results that are the specified number of days older than the latest bucket result are deleted from Elasticsearch. The default value is null, which means all results are retained. Annotations generated by the system also count as results for retention purposes; they are deleted after the same number of days as results. Annotations added by users are retained forever. +- **`allow_no_indices` (Optional, boolean)**: If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the +`_all` string or when no indices are specified. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines +whether wildcard expressions match hidden data streams. Supports a list of values. +- **`ignore_throttled` (Optional, boolean)**: If `true`, concrete, expanded or aliased indices are ignored when frozen. +- **`ignore_unavailable` (Optional, boolean)**: If `true`, unavailable indices (missing or closed) are ignored. + +## client.ml.putTrainedModel [_ml.put_trained_model] +Create a trained model. +Enable you to supply a trained model that is not created by data frame analytics. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model) + +```ts +client.ml.putTrainedModel({ model_id }) +``` + +### Arguments [_arguments_ml.put_trained_model] + +#### Request (object) [_request_ml.put_trained_model] +- **`model_id` (string)**: The unique identifier of the trained model. +- **`compressed_definition` (Optional, string)**: The compressed (GZipped and Base64 encoded) inference definition of the +model. If compressed_definition is specified, then definition cannot be +specified. +- **`definition` (Optional, { preprocessors, trained_model })**: The inference definition for the model. If definition is specified, then +compressed_definition cannot be specified. +- **`description` (Optional, string)**: A human-readable description of the inference trained model. +- **`inference_config` (Optional, { regression, classification, text_classification, zero_shot_classification, fill_mask, learning_to_rank, ner, pass_through, text_embedding, text_expansion, question_answering })**: The default configuration for inference. This can be either a regression +or classification configuration. It must match the underlying +definition.trained_model's target_type. For pre-packaged models such as +ELSER the config is not required. +- **`input` (Optional, { field_names })**: The input field names for the model definition. +- **`metadata` (Optional, User-defined value)**: An object map that contains metadata about the model. +- **`model_type` (Optional, Enum("tree_ensemble" \| "lang_ident" \| "pytorch"))**: The model type. +- **`model_size_bytes` (Optional, number)**: The estimated memory usage in bytes to keep the trained model in memory. +This property is supported only if defer_definition_decompression is true +or the model definition is not supplied. +- **`platform_architecture` (Optional, string)**: The platform architecture (if applicable) of the trained mode. If the model +only works on one platform, because it is heavily optimized for a particular +processor architecture and OS combination, then this field specifies which. +The format of the string must match the platform identifiers used by Elasticsearch, +so one of, `linux-x86_64`, `linux-aarch64`, `darwin-x86_64`, `darwin-aarch64`, +or `windows-x86_64`. For portable models (those that work independent of processor +architecture or OS features), leave this field unset. +- **`tags` (Optional, string[])**: An array of tags to organize the model. +- **`prefix_strings` (Optional, { ingest, search })**: Optional prefix strings applied at inference +- **`defer_definition_decompression` (Optional, boolean)**: If set to `true` and a `compressed_definition` is provided, +the request defers definition decompression and skips relevant +validations. +- **`wait_for_completion` (Optional, boolean)**: Whether to wait for all child operations (e.g. model download) +to complete. + +## client.ml.putTrainedModelAlias [_ml.put_trained_model_alias] +Create or update a trained model alias. +A trained model alias is a logical name used to reference a single trained +model. +You can use aliases instead of trained model identifiers to make it easier to +reference your models. For example, you can use aliases in inference +aggregations and processors. +An alias must be unique and refer to only a single trained model. However, +you can have multiple aliases for each trained model. +If you use this API to update an alias such that it references a different +trained model ID and the model uses a different type of data frame analytics, +an error occurs. For example, this situation occurs if you have a trained +model for regression analysis and a trained model for classification +analysis; you cannot reassign an alias from one type of trained model to +another. +If you use this API to update an alias and there are very few input fields in +common between the old and new trained models for the model alias, the API +returns a warning. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-alias) + +```ts +client.ml.putTrainedModelAlias({ model_alias, model_id }) +``` + +### Arguments [_arguments_ml.put_trained_model_alias] + +#### Request (object) [_request_ml.put_trained_model_alias] +- **`model_alias` (string)**: The alias to create or update. This value cannot end in numbers. +- **`model_id` (string)**: The identifier for the trained model that the alias refers to. +- **`reassign` (Optional, boolean)**: Specifies whether the alias gets reassigned to the specified trained +model if it is already assigned to a different model. If the alias is +already assigned and this parameter is false, the API returns an error. + +## client.ml.putTrainedModelDefinitionPart [_ml.put_trained_model_definition_part] +Create part of a trained model definition. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-definition-part) + +```ts +client.ml.putTrainedModelDefinitionPart({ model_id, part, definition, total_definition_length, total_parts }) +``` + +### Arguments [_arguments_ml.put_trained_model_definition_part] + +#### Request (object) [_request_ml.put_trained_model_definition_part] +- **`model_id` (string)**: The unique identifier of the trained model. +- **`part` (number)**: The definition part number. When the definition is loaded for inference the definition parts are streamed in the +order of their part number. The first part must be `0` and the final part must be `total_parts - 1`. +- **`definition` (string)**: The definition part for the model. Must be a base64 encoded string. +- **`total_definition_length` (number)**: The total uncompressed definition length in bytes. Not base64 encoded. +- **`total_parts` (number)**: The total number of parts that will be uploaded. Must be greater than 0. + +## client.ml.putTrainedModelVocabulary [_ml.put_trained_model_vocabulary] +Create a trained model vocabulary. +This API is supported only for natural language processing (NLP) models. +The vocabulary is stored in the index as described in `inference_config.*.vocabulary` of the trained model definition. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-vocabulary) + +```ts +client.ml.putTrainedModelVocabulary({ model_id, vocabulary }) +``` + +### Arguments [_arguments_ml.put_trained_model_vocabulary] + +#### Request (object) [_request_ml.put_trained_model_vocabulary] +- **`model_id` (string)**: The unique identifier of the trained model. +- **`vocabulary` (string[])**: The model vocabulary, which must not be empty. +- **`merges` (Optional, string[])**: The optional model merges if required by the tokenizer. +- **`scores` (Optional, number[])**: The optional vocabulary value scores if required by the tokenizer. + +## client.ml.resetJob [_ml.reset_job] +Reset an anomaly detection job. +All model state and results are deleted. The job is ready to start over as if +it had just been created. +It is not currently possible to reset multiple jobs using wildcards or a +comma separated list. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-reset-job) + +```ts +client.ml.resetJob({ job_id }) +``` + +### Arguments [_arguments_ml.reset_job] + +#### Request (object) [_request_ml.reset_job] +- **`job_id` (string)**: The ID of the job to reset. +- **`wait_for_completion` (Optional, boolean)**: Should this request wait until the operation has completed before +returning. +- **`delete_user_annotations` (Optional, boolean)**: Specifies whether annotations that have been added by the +user should be deleted along with any auto-generated annotations when the job is +reset. + +## client.ml.revertModelSnapshot [_ml.revert_model_snapshot] +Revert to a snapshot. +The machine learning features react quickly to anomalous input, learning new +behaviors in data. Highly anomalous input increases the variance in the +models whilst the system learns whether this is a new step-change in behavior +or a one-off event. In the case where this anomalous input is known to be a +one-off, then it might be appropriate to reset the model state to a time +before this event. For example, you might consider reverting to a saved +snapshot after Black Friday or a critical system failure. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-revert-model-snapshot) + +```ts +client.ml.revertModelSnapshot({ job_id, snapshot_id }) +``` + +### Arguments [_arguments_ml.revert_model_snapshot] + +#### Request (object) [_request_ml.revert_model_snapshot] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`snapshot_id` (string)**: You can specify `empty` as the . Reverting to the empty +snapshot means the anomaly detection job starts learning a new model from +scratch when it is started. +- **`delete_intervening_results` (Optional, boolean)**: Refer to the description for the `delete_intervening_results` query parameter. + +## client.ml.setUpgradeMode [_ml.set_upgrade_mode] +Set upgrade_mode for ML indices. +Sets a cluster wide upgrade_mode setting that prepares machine learning +indices for an upgrade. +When upgrading your cluster, in some circumstances you must restart your +nodes and reindex your machine learning indices. In those circumstances, +there must be no machine learning jobs running. You can close the machine +learning jobs, do the upgrade, then open all the jobs again. Alternatively, +you can use this API to temporarily halt tasks associated with the jobs and +datafeeds and prevent new jobs from opening. You can also use this API +during upgrades that do not require you to reindex your machine learning +indices, though stopping jobs is not a requirement in that case. +You can see the current value for the upgrade_mode setting by using the get +machine learning info API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-set-upgrade-mode) + +```ts +client.ml.setUpgradeMode({ ... }) +``` + +### Arguments [_arguments_ml.set_upgrade_mode] + +#### Request (object) [_request_ml.set_upgrade_mode] +- **`enabled` (Optional, boolean)**: When `true`, it enables `upgrade_mode` which temporarily halts all job +and datafeed tasks and prohibits new job and datafeed tasks from +starting. +- **`timeout` (Optional, string \| -1 \| 0)**: The time to wait for the request to be completed. + +## client.ml.startDataFrameAnalytics [_ml.start_data_frame_analytics] +Start a data frame analytics job. +A data frame analytics job can be started and stopped multiple times +throughout its lifecycle. +If the destination index does not exist, it is created automatically the +first time you start the data frame analytics job. The +`index.number_of_shards` and `index.number_of_replicas` settings for the +destination index are copied from the source index. If there are multiple +source indices, the destination index copies the highest setting values. The +mappings for the destination index are also copied from the source indices. +If there are any mapping conflicts, the job fails to start. +If the destination index exists, it is used as is. You can therefore set up +the destination index in advance with custom settings and mappings. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-data-frame-analytics) + +```ts +client.ml.startDataFrameAnalytics({ id }) +``` + +### Arguments [_arguments_ml.start_data_frame_analytics] + +#### Request (object) [_request_ml.start_data_frame_analytics] +- **`id` (string)**: Identifier for the data frame analytics job. This identifier can contain +lowercase alphanumeric characters (a-z and 0-9), hyphens, and +underscores. It must start and end with alphanumeric characters. +- **`timeout` (Optional, string \| -1 \| 0)**: Controls the amount of time to wait until the data frame analytics job +starts. + +## client.ml.startDatafeed [_ml.start_datafeed] +Start datafeeds. + +A datafeed must be started in order to retrieve data from Elasticsearch. A datafeed can be started and stopped +multiple times throughout its lifecycle. + +Before you can start a datafeed, the anomaly detection job must be open. Otherwise, an error occurs. + +If you restart a stopped datafeed, it continues processing input data from the next millisecond after it was stopped. +If new data was indexed for that exact millisecond between stopping and starting, it will be ignored. + +When Elasticsearch security features are enabled, your datafeed remembers which roles the last user to create or +update it had at the time of creation or update and runs the query using those same roles. If you provided secondary +authorization headers when you created or updated the datafeed, those credentials are used instead. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-datafeed) + +```ts +client.ml.startDatafeed({ datafeed_id }) +``` + +### Arguments [_arguments_ml.start_datafeed] + +#### Request (object) [_request_ml.start_datafeed] +- **`datafeed_id` (string)**: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase +alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric +characters. +- **`end` (Optional, string \| Unit)**: Refer to the description for the `end` query parameter. +- **`start` (Optional, string \| Unit)**: Refer to the description for the `start` query parameter. +- **`timeout` (Optional, string \| -1 \| 0)**: Refer to the description for the `timeout` query parameter. + +## client.ml.startTrainedModelDeployment [_ml.start_trained_model_deployment] +Start a trained model deployment. +It allocates the model to every machine learning node. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-trained-model-deployment) + +```ts +client.ml.startTrainedModelDeployment({ model_id }) +``` + +### Arguments [_arguments_ml.start_trained_model_deployment] + +#### Request (object) [_request_ml.start_trained_model_deployment] +- **`model_id` (string)**: The unique identifier of the trained model. Currently, only PyTorch models are supported. +- **`adaptive_allocations` (Optional, { enabled, min_number_of_allocations, max_number_of_allocations })**: Adaptive allocations configuration. When enabled, the number of allocations +is set based on the current load. +If adaptive_allocations is enabled, do not set the number of allocations manually. +- **`cache_size` (Optional, number \| string)**: The inference cache size (in memory outside the JVM heap) per node for the model. +The default value is the same size as the `model_size_bytes`. To disable the cache, +`0b` can be provided. +- **`deployment_id` (Optional, string)**: A unique identifier for the deployment of the model. +- **`number_of_allocations` (Optional, number)**: The number of model allocations on each node where the model is deployed. +All allocations on a node share the same copy of the model in memory but use +a separate set of threads to evaluate the model. +Increasing this value generally increases the throughput. +If this setting is greater than the number of hardware threads +it will automatically be changed to a value less than the number of hardware threads. +If adaptive_allocations is enabled, do not set this value, because it’s automatically set. +- **`priority` (Optional, Enum("normal" \| "low"))**: The deployment priority. +- **`queue_capacity` (Optional, number)**: Specifies the number of inference requests that are allowed in the queue. After the number of requests exceeds +this value, new requests are rejected with a 429 error. +- **`threads_per_allocation` (Optional, number)**: Sets the number of threads used by each model allocation during inference. This generally increases +the inference speed. The inference process is a compute-bound process; any number +greater than the number of available hardware threads on the machine does not increase the +inference speed. If this setting is greater than the number of hardware threads +it will automatically be changed to a value less than the number of hardware threads. +- **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the model to deploy. +- **`wait_for` (Optional, Enum("started" \| "starting" \| "fully_allocated"))**: Specifies the allocation status to wait for before returning. + +## client.ml.stopDataFrameAnalytics [_ml.stop_data_frame_analytics] +Stop data frame analytics jobs. +A data frame analytics job can be started and stopped multiple times +throughout its lifecycle. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-data-frame-analytics) + +```ts +client.ml.stopDataFrameAnalytics({ id }) +``` + +### Arguments [_arguments_ml.stop_data_frame_analytics] + +#### Request (object) [_request_ml.stop_data_frame_analytics] +- **`id` (string)**: Identifier for the data frame analytics job. This identifier can contain +lowercase alphanumeric characters (a-z and 0-9), hyphens, and +underscores. It must start and end with alphanumeric characters. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: + +1. Contains wildcard expressions and there are no data frame analytics +jobs that match. +2. Contains the _all string or no identifiers and there are no matches. +3. Contains wildcard expressions and there are only partial matches. + +The default value is true, which returns an empty data_frame_analytics +array when there are no matches and the subset of results when there are +partial matches. If this parameter is false, the request returns a 404 +status code when there are no matches or only partial matches. +- **`force` (Optional, boolean)**: If true, the data frame analytics job is stopped forcefully. +- **`timeout` (Optional, string \| -1 \| 0)**: Controls the amount of time to wait until the data frame analytics job +stops. Defaults to 20 seconds. + +## client.ml.stopDatafeed [_ml.stop_datafeed] +Stop datafeeds. +A datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped +multiple times throughout its lifecycle. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-datafeed) + +```ts +client.ml.stopDatafeed({ datafeed_id }) +``` + +### Arguments [_arguments_ml.stop_datafeed] + +#### Request (object) [_request_ml.stop_datafeed] +- **`datafeed_id` (string)**: Identifier for the datafeed. You can stop multiple datafeeds in a single API request by using a comma-separated +list of datafeeds or a wildcard expression. You can close all datafeeds by using `_all` or by specifying `*` as +the identifier. +- **`allow_no_match` (Optional, boolean)**: Refer to the description for the `allow_no_match` query parameter. +- **`force` (Optional, boolean)**: Refer to the description for the `force` query parameter. +- **`timeout` (Optional, string \| -1 \| 0)**: Refer to the description for the `timeout` query parameter. + +## client.ml.stopTrainedModelDeployment [_ml.stop_trained_model_deployment] +Stop a trained model deployment. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-trained-model-deployment) + +```ts +client.ml.stopTrainedModelDeployment({ model_id }) +``` + +### Arguments [_arguments_ml.stop_trained_model_deployment] + +#### Request (object) [_request_ml.stop_trained_model_deployment] +- **`model_id` (string)**: The unique identifier of the trained model. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: contains wildcard expressions and there are no deployments that match; +contains the `_all` string or no identifiers and there are no matches; or contains wildcard expressions and +there are only partial matches. By default, it returns an empty array when there are no matches and the subset of results when there are partial matches. +If `false`, the request returns a 404 status code when there are no matches or only partial matches. +- **`force` (Optional, boolean)**: Forcefully stops the deployment, even if it is used by ingest pipelines. You can't use these pipelines until you +restart the model deployment. + +## client.ml.updateDataFrameAnalytics [_ml.update_data_frame_analytics] +Update a data frame analytics job. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-data-frame-analytics) + +```ts +client.ml.updateDataFrameAnalytics({ id }) +``` + +### Arguments [_arguments_ml.update_data_frame_analytics] + +#### Request (object) [_request_ml.update_data_frame_analytics] +- **`id` (string)**: Identifier for the data frame analytics job. This identifier can contain +lowercase alphanumeric characters (a-z and 0-9), hyphens, and +underscores. It must start and end with alphanumeric characters. +- **`description` (Optional, string)**: A description of the job. +- **`model_memory_limit` (Optional, string)**: The approximate maximum amount of memory resources that are permitted for +analytical processing. If your `elasticsearch.yml` file contains an +`xpack.ml.max_model_memory_limit` setting, an error occurs when you try +to create data frame analytics jobs that have `model_memory_limit` values +greater than that setting. +- **`max_num_threads` (Optional, number)**: The maximum number of threads to be used by the analysis. Using more +threads may decrease the time necessary to complete the analysis at the +cost of using more CPU. Note that the process may use additional threads +for operational functionality other than the analysis itself. +- **`allow_lazy_start` (Optional, boolean)**: Specifies whether this job can start when there is insufficient machine +learning node capacity for it to be immediately assigned to a node. + +## client.ml.updateDatafeed [_ml.update_datafeed] +Update a datafeed. +You must stop and start the datafeed for the changes to be applied. +When Elasticsearch security features are enabled, your datafeed remembers which roles the user who updated it had at +the time of the update and runs the query using those same roles. If you provide secondary authorization headers, +those credentials are used instead. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-datafeed) + +```ts +client.ml.updateDatafeed({ datafeed_id }) +``` + +### Arguments [_arguments_ml.update_datafeed] + +#### Request (object) [_request_ml.update_datafeed] +- **`datafeed_id` (string)**: A numerical character string that uniquely identifies the datafeed. +This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. +It must start and end with alphanumeric characters. +- **`aggregations` (Optional, Record)**: If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only +with low cardinality data. +- **`chunking_config` (Optional, { mode, time_span })**: Datafeeds might search over long time periods, for several months or years. This search is split into time +chunks in order to ensure the load on Elasticsearch is managed. Chunking configuration controls how the size of +these time chunks are calculated; it is an advanced configuration option. +- **`delayed_data_check_config` (Optional, { check_window, enabled })**: Specifies whether the datafeed checks for missing data and the size of the window. The datafeed can optionally +search over indices that have already been read in an effort to determine whether any data has subsequently been +added to the index. If missing data is found, it is a good indication that the `query_delay` is set too low and +the data is being indexed after the datafeed has passed that moment in time. This check runs only on real-time +datafeeds. +- **`frequency` (Optional, string \| -1 \| 0)**: The interval at which scheduled queries are made while the datafeed runs in real time. The default value is +either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket +span. When `frequency` is shorter than the bucket span, interim results for the last (partial) bucket are +written then eventually overwritten by the full bucket results. If the datafeed uses aggregations, this value +must be divisible by the interval of the date histogram aggregation. +- **`indices` (Optional, string[])**: An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine +learning nodes must have the `remote_cluster_client` role. +- **`indices_options` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, ignore_throttled })**: Specifies index expansion options that are used during search. +- **`job_id` (Optional, string)** +- **`max_empty_searches` (Optional, number)**: If a real-time datafeed has never seen any data (including during any initial training period), it automatically +stops and closes the associated job after this many real-time searches return no documents. In other words, +it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no +end time that sees no data remains started until it is explicitly stopped. By default, it is not set. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an +Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this +object is passed verbatim to Elasticsearch. Note that if you change the query, the analyzed data is also +changed. Therefore, the time required to learn might be long and the understandability of the results is +unpredictable. If you want to make significant changes to the source data, it is recommended that you +clone the job and datafeed and make the amendments in the clone. Let both run in parallel and close one +when you are satisfied with the results of the job. +- **`query_delay` (Optional, string \| -1 \| 0)**: The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might +not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default +value is randomly selected between `60s` and `120s`. This randomness improves the query performance +when there are multiple jobs running on the same node. +- **`runtime_mappings` (Optional, Record)**: Specifies runtime fields for the datafeed search. +- **`script_fields` (Optional, Record)**: Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. +The detector configuration objects in a job can contain functions that use these script fields. +- **`scroll_size` (Optional, number)**: The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. +The maximum value is the value of `index.max_result_window`. +- **`allow_no_indices` (Optional, boolean)**: If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the +`_all` string or when no indices are specified. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines +whether wildcard expressions match hidden data streams. Supports a list of values. +- **`ignore_throttled` (Optional, boolean)**: If `true`, concrete, expanded or aliased indices are ignored when frozen. +- **`ignore_unavailable` (Optional, boolean)**: If `true`, unavailable indices (missing or closed) are ignored. + +## client.ml.updateFilter [_ml.update_filter] +Update a filter. +Updates the description of a filter, adds items, or removes items from the list. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-filter) + +```ts +client.ml.updateFilter({ filter_id }) +``` + +### Arguments [_arguments_ml.update_filter] + +#### Request (object) [_request_ml.update_filter] +- **`filter_id` (string)**: A string that uniquely identifies a filter. +- **`add_items` (Optional, string[])**: The items to add to the filter. +- **`description` (Optional, string)**: A description for the filter. +- **`remove_items` (Optional, string[])**: The items to remove from the filter. + +## client.ml.updateJob [_ml.update_job] +Update an anomaly detection job. +Updates certain properties of an anomaly detection job. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-job) + +```ts +client.ml.updateJob({ job_id }) +``` + +### Arguments [_arguments_ml.update_job] + +#### Request (object) [_request_ml.update_job] +- **`job_id` (string)**: Identifier for the job. +- **`allow_lazy_open` (Optional, boolean)**: Advanced configuration option. Specifies whether this job can open when +there is insufficient machine learning node capacity for it to be +immediately assigned to a node. If `false` and a machine learning node +with capacity to run the job cannot immediately be found, the open +anomaly detection jobs API returns an error. However, this is also +subject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this +option is set to `true`, the open anomaly detection jobs API does not +return an error and the job waits in the opening state until sufficient +machine learning node capacity is available. +- **`analysis_limits` (Optional, { model_memory_limit })** +- **`background_persist_interval` (Optional, string \| -1 \| 0)**: Advanced configuration option. The time between each periodic persistence +of the model. +The default value is a randomized value between 3 to 4 hours, which +avoids all jobs persisting at exactly the same time. The smallest allowed +value is 1 hour. +For very large models (several GB), persistence could take 10-20 minutes, +so do not set the value too low. +If the job is open when you make the update, you must stop the datafeed, +close the job, then reopen the job and restart the datafeed for the +changes to take effect. +- **`custom_settings` (Optional, Record)**: Advanced configuration option. Contains custom meta data about the job. +For example, it can contain custom URL information as shown in Adding +custom URLs to machine learning results. +- **`categorization_filters` (Optional, string[])** +- **`description` (Optional, string)**: A description of the job. +- **`model_plot_config` (Optional, { annotations_enabled, enabled, terms })** +- **`model_prune_window` (Optional, string \| -1 \| 0)** +- **`daily_model_snapshot_retention_after_days` (Optional, number)**: Advanced configuration option, which affects the automatic removal of old +model snapshots for this job. It specifies a period of time (in days) +after which only the first snapshot per day is retained. This period is +relative to the timestamp of the most recent snapshot for this job. Valid +values range from 0 to `model_snapshot_retention_days`. For jobs created +before version 7.8.0, the default value matches +`model_snapshot_retention_days`. +- **`model_snapshot_retention_days` (Optional, number)**: Advanced configuration option, which affects the automatic removal of old +model snapshots for this job. It specifies the maximum period of time (in +days) that snapshots are retained. This period is relative to the +timestamp of the most recent snapshot for this job. +- **`renormalization_window_days` (Optional, number)**: Advanced configuration option. The period over which adjustments to the +score are applied, as new data is seen. +- **`results_retention_days` (Optional, number)**: Advanced configuration option. The period of time (in days) that results +are retained. Age is calculated relative to the timestamp of the latest +bucket result. If this property has a non-null value, once per day at +00:30 (server time), results that are the specified number of days older +than the latest bucket result are deleted from Elasticsearch. The default +value is null, which means all results are retained. +- **`groups` (Optional, string[])**: A list of job groups. A job can belong to no groups or many. +- **`detectors` (Optional, { detector_index, description, custom_rules }[])**: An array of detector update objects. +- **`per_partition_categorization` (Optional, { enabled, stop_on_warn })**: Settings related to how categorization interacts with partition fields. + +## client.ml.updateModelSnapshot [_ml.update_model_snapshot] +Update a snapshot. +Updates certain properties of a snapshot. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-model-snapshot) + +```ts +client.ml.updateModelSnapshot({ job_id, snapshot_id }) +``` + +### Arguments [_arguments_ml.update_model_snapshot] + +#### Request (object) [_request_ml.update_model_snapshot] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`snapshot_id` (string)**: Identifier for the model snapshot. +- **`description` (Optional, string)**: A description of the model snapshot. +- **`retain` (Optional, boolean)**: If `true`, this snapshot will not be deleted during automatic cleanup of +snapshots older than `model_snapshot_retention_days`. However, this +snapshot will be deleted when the job is deleted. + +## client.ml.updateTrainedModelDeployment [_ml.update_trained_model_deployment] +Update a trained model deployment. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-trained-model-deployment) + +```ts +client.ml.updateTrainedModelDeployment({ model_id }) +``` + +### Arguments [_arguments_ml.update_trained_model_deployment] + +#### Request (object) [_request_ml.update_trained_model_deployment] +- **`model_id` (string)**: The unique identifier of the trained model. Currently, only PyTorch models are supported. +- **`number_of_allocations` (Optional, number)**: The number of model allocations on each node where the model is deployed. +All allocations on a node share the same copy of the model in memory but use +a separate set of threads to evaluate the model. +Increasing this value generally increases the throughput. +If this setting is greater than the number of hardware threads +it will automatically be changed to a value less than the number of hardware threads. +If adaptive_allocations is enabled, do not set this value, because it’s automatically set. +- **`adaptive_allocations` (Optional, { enabled, min_number_of_allocations, max_number_of_allocations })**: Adaptive allocations configuration. When enabled, the number of allocations +is set based on the current load. +If adaptive_allocations is enabled, do not set the number of allocations manually. + +## client.ml.upgradeJobSnapshot [_ml.upgrade_job_snapshot] +Upgrade a snapshot. +Upgrade an anomaly detection model snapshot to the latest major version. +Over time, older snapshot formats are deprecated and removed. Anomaly +detection jobs support only snapshots that are from the current or previous +major version. +This API provides a means to upgrade a snapshot to the current major version. +This aids in preparing the cluster for an upgrade to the next major version. +Only one snapshot per anomaly detection job can be upgraded at a time and the +upgraded snapshot cannot be the current snapshot of the anomaly detection +job. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-upgrade-job-snapshot) + +```ts +client.ml.upgradeJobSnapshot({ job_id, snapshot_id }) +``` + +### Arguments [_arguments_ml.upgrade_job_snapshot] + +#### Request (object) [_request_ml.upgrade_job_snapshot] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`snapshot_id` (string)**: A numerical character string that uniquely identifies the model snapshot. +- **`wait_for_completion` (Optional, boolean)**: When true, the API won’t respond until the upgrade is complete. +Otherwise, it responds as soon as the upgrade task is assigned to a node. +- **`timeout` (Optional, string \| -1 \| 0)**: Controls the time to wait for the request to complete. + +## client.nodes.clearRepositoriesMeteringArchive [_nodes.clear_repositories_metering_archive] +Clear the archived repositories metering. +Clear the archived repositories metering information in the cluster. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-clear-repositories-metering-archive) + +```ts +client.nodes.clearRepositoriesMeteringArchive({ node_id, max_archive_version }) +``` + +### Arguments [_arguments_nodes.clear_repositories_metering_archive] + +#### Request (object) [_request_nodes.clear_repositories_metering_archive] +- **`node_id` (string \| string[])**: List of node IDs or names used to limit returned information. +- **`max_archive_version` (number)**: Specifies the maximum `archive_version` to be cleared from the archive. + +## client.nodes.getRepositoriesMeteringInfo [_nodes.get_repositories_metering_info] +Get cluster repositories metering. +Get repositories metering information for a cluster. +This API exposes monotonically non-decreasing counters and it is expected that clients would durably store the information needed to compute aggregations over a period of time. +Additionally, the information exposed by this API is volatile, meaning that it will not be present after node restarts. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-get-repositories-metering-info) + +```ts +client.nodes.getRepositoriesMeteringInfo({ node_id }) +``` + +### Arguments [_arguments_nodes.get_repositories_metering_info] + +#### Request (object) [_request_nodes.get_repositories_metering_info] +- **`node_id` (string \| string[])**: List of node IDs or names used to limit returned information. + +## client.nodes.hotThreads [_nodes.hot_threads] +Get the hot threads for nodes. +Get a breakdown of the hot threads on each selected node in the cluster. +The output is plain text with a breakdown of the top hot threads for each node. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-hot-threads) + +```ts +client.nodes.hotThreads({ ... }) +``` + +### Arguments [_arguments_nodes.hot_threads] + +#### Request (object) [_request_nodes.hot_threads] +- **`node_id` (Optional, string \| string[])**: List of node IDs or names used to limit returned information. +- **`ignore_idle_threads` (Optional, boolean)**: If true, known idle threads (e.g. waiting in a socket select, or to get +a task from an empty queue) are filtered out. +- **`interval` (Optional, string \| -1 \| 0)**: The interval to do the second sampling of threads. +- **`snapshots` (Optional, number)**: Number of samples of thread stacktrace. +- **`threads` (Optional, number)**: Specifies the number of hot threads to provide information for. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received +before the timeout expires, the request fails and returns an error. +- **`type` (Optional, Enum("cpu" \| "wait" \| "block" \| "gpu" \| "mem"))**: The type to sample. +- **`sort` (Optional, Enum("cpu" \| "wait" \| "block" \| "gpu" \| "mem"))**: The sort order for 'cpu' type (default: total) + +## client.nodes.info [_nodes.info] +Get node information. + +By default, the API returns all attributes and core settings for cluster nodes. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-info) + +```ts +client.nodes.info({ ... }) +``` + +### Arguments [_arguments_nodes.info] + +#### Request (object) [_request_nodes.info] +- **`node_id` (Optional, string \| string[])**: List of node IDs or names used to limit returned information. +- **`metric` (Optional, Enum("_all" \| "_none" \| "settings" \| "os" \| "process" \| "jvm" \| "thread_pool" \| "transport" \| "http" \| "remote_cluster_server" \| "plugins" \| "ingest" \| "aggregations" \| "indices") \| Enum("_all" \| "_none" \| "settings" \| "os" \| "process" \| "jvm" \| "thread_pool" \| "transport" \| "http" \| "remote_cluster_server" \| "plugins" \| "ingest" \| "aggregations" \| "indices")[])**: Limits the information returned to the specific metrics. Supports a list, such as http,ingest. +- **`flat_settings` (Optional, boolean)**: If true, returns settings in flat format. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.nodes.reloadSecureSettings [_nodes.reload_secure_settings] +Reload the keystore on nodes in the cluster. + +Secure settings are stored in an on-disk keystore. Certain of these settings are reloadable. +That is, you can change them on disk and reload them without restarting any nodes in the cluster. +When you have updated reloadable secure settings in your keystore, you can use this API to reload those settings on each node. + +When the Elasticsearch keystore is password protected and not simply obfuscated, you must provide the password for the keystore when you reload the secure settings. +Reloading the settings for the whole cluster assumes that the keystores for all nodes are protected with the same password; this method is allowed only when inter-node communications are encrypted. +Alternatively, you can reload the secure settings on each node by locally accessing the API and passing the node-specific Elasticsearch keystore password. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-reload-secure-settings) + +```ts +client.nodes.reloadSecureSettings({ ... }) +``` + +### Arguments [_arguments_nodes.reload_secure_settings] + +#### Request (object) [_request_nodes.reload_secure_settings] +- **`node_id` (Optional, string \| string[])**: The names of particular nodes in the cluster to target. +- **`secure_settings_password` (Optional, string)**: The password for the Elasticsearch keystore. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.nodes.stats [_nodes.stats] +Get node statistics. +Get statistics for nodes in a cluster. +By default, all stats are returned. You can limit the returned information by using metrics. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-stats) + +```ts +client.nodes.stats({ ... }) +``` + +### Arguments [_arguments_nodes.stats] + +#### Request (object) [_request_nodes.stats] +- **`node_id` (Optional, string \| string[])**: List of node IDs or names used to limit returned information. +- **`metric` (Optional, Enum("_all" \| "_none" \| "indices" \| "os" \| "process" \| "jvm" \| "thread_pool" \| "fs" \| "transport" \| "http" \| "breaker" \| "script" \| "discovery" \| "ingest" \| "adaptive_selection" \| "script_cache" \| "indexing_pressure" \| "repositories" \| "allocations") \| Enum("_all" \| "_none" \| "indices" \| "os" \| "process" \| "jvm" \| "thread_pool" \| "fs" \| "transport" \| "http" \| "breaker" \| "script" \| "discovery" \| "ingest" \| "adaptive_selection" \| "script_cache" \| "indexing_pressure" \| "repositories" \| "allocations")[])**: Limit the information returned to the specified metrics +- **`index_metric` (Optional, Enum("_all" \| "store" \| "indexing" \| "get" \| "search" \| "merge" \| "flush" \| "refresh" \| "query_cache" \| "fielddata" \| "docs" \| "warmer" \| "completion" \| "segments" \| "translog" \| "request_cache" \| "recovery" \| "bulk" \| "shard_stats" \| "mappings" \| "dense_vector" \| "sparse_vector") \| Enum("_all" \| "store" \| "indexing" \| "get" \| "search" \| "merge" \| "flush" \| "refresh" \| "query_cache" \| "fielddata" \| "docs" \| "warmer" \| "completion" \| "segments" \| "translog" \| "request_cache" \| "recovery" \| "bulk" \| "shard_stats" \| "mappings" \| "dense_vector" \| "sparse_vector")[])**: Limit the information returned for indices metric to the specific index metrics. It can be used only if indices (or all) metric is specified. +- **`completion_fields` (Optional, string \| string[])**: List or wildcard expressions of fields to include in fielddata and suggest statistics. +- **`fielddata_fields` (Optional, string \| string[])**: List or wildcard expressions of fields to include in fielddata statistics. +- **`fields` (Optional, string \| string[])**: List or wildcard expressions of fields to include in the statistics. +- **`groups` (Optional, boolean)**: List of search groups to include in the search statistics. +- **`include_segment_file_sizes` (Optional, boolean)**: If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested). +- **`level` (Optional, Enum("node" \| "indices" \| "shards"))**: Indicates whether statistics are aggregated at the node, indices, or shards level. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +- **`types` (Optional, string[])**: A list of document types for the indexing index metric. +- **`include_unloaded_segments` (Optional, boolean)**: If `true`, the response includes information from segments that are not loaded into memory. + +## client.nodes.usage [_nodes.usage] +Get feature usage information. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-usage) + +```ts +client.nodes.usage({ ... }) +``` + +### Arguments [_arguments_nodes.usage] + +#### Request (object) [_request_nodes.usage] +- **`node_id` (Optional, string \| string[])**: A list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes +- **`metric` (Optional, Enum("_all" \| "rest_actions" \| "aggregations") \| Enum("_all" \| "rest_actions" \| "aggregations")[])**: Limits the information returned to the specific metrics. +A list of the following options: `_all`, `rest_actions`, `aggregations`. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.queryRules.deleteRule [_query_rules.delete_rule] +Delete a query rule. +Delete a query rule within a query ruleset. +This is a destructive action that is only recoverable by re-adding the same rule with the create or update query rule API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-delete-rule) + +```ts +client.queryRules.deleteRule({ ruleset_id, rule_id }) +``` + +### Arguments [_arguments_query_rules.delete_rule] + +#### Request (object) [_request_query_rules.delete_rule] +- **`ruleset_id` (string)**: The unique identifier of the query ruleset containing the rule to delete +- **`rule_id` (string)**: The unique identifier of the query rule within the specified ruleset to delete + +## client.queryRules.deleteRuleset [_query_rules.delete_ruleset] +Delete a query ruleset. +Remove a query ruleset and its associated data. +This is a destructive action that is not recoverable. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-delete-ruleset) + +```ts +client.queryRules.deleteRuleset({ ruleset_id }) +``` + +### Arguments [_arguments_query_rules.delete_ruleset] + +#### Request (object) [_request_query_rules.delete_ruleset] +- **`ruleset_id` (string)**: The unique identifier of the query ruleset to delete + +## client.queryRules.getRule [_query_rules.get_rule] +Get a query rule. +Get details about a query rule within a query ruleset. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-get-rule) + +```ts +client.queryRules.getRule({ ruleset_id, rule_id }) +``` + +### Arguments [_arguments_query_rules.get_rule] + +#### Request (object) [_request_query_rules.get_rule] +- **`ruleset_id` (string)**: The unique identifier of the query ruleset containing the rule to retrieve +- **`rule_id` (string)**: The unique identifier of the query rule within the specified ruleset to retrieve + +## client.queryRules.getRuleset [_query_rules.get_ruleset] +Get a query ruleset. +Get details about a query ruleset. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-get-ruleset) + +```ts +client.queryRules.getRuleset({ ruleset_id }) +``` + +### Arguments [_arguments_query_rules.get_ruleset] + +#### Request (object) [_request_query_rules.get_ruleset] +- **`ruleset_id` (string)**: The unique identifier of the query ruleset + +## client.queryRules.listRulesets [_query_rules.list_rulesets] +Get all query rulesets. +Get summarized information about the query rulesets. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-list-rulesets) + +```ts +client.queryRules.listRulesets({ ... }) +``` + +### Arguments [_arguments_query_rules.list_rulesets] + +#### Request (object) [_request_query_rules.list_rulesets] +- **`from` (Optional, number)**: The offset from the first result to fetch. +- **`size` (Optional, number)**: The maximum number of results to retrieve. + +## client.queryRules.putRule [_query_rules.put_rule] +Create or update a query rule. +Create or update a query rule within a query ruleset. + +IMPORTANT: Due to limitations within pinned queries, you can only pin documents using ids or docs, but cannot use both in single rule. +It is advised to use one or the other in query rulesets, to avoid errors. +Additionally, pinned queries have a maximum limit of 100 pinned hits. +If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-put-rule) + +```ts +client.queryRules.putRule({ ruleset_id, rule_id, type, criteria, actions }) +``` + +### Arguments [_arguments_query_rules.put_rule] + +#### Request (object) [_request_query_rules.put_rule] +- **`ruleset_id` (string)**: The unique identifier of the query ruleset containing the rule to be created or updated. +- **`rule_id` (string)**: The unique identifier of the query rule within the specified ruleset to be created or updated. +- **`type` (Enum("pinned" \| "exclude"))**: The type of rule. +- **`criteria` ({ type, metadata, values } \| { type, metadata, values }[])**: The criteria that must be met for the rule to be applied. +If multiple criteria are specified for a rule, all criteria must be met for the rule to be applied. +- **`actions` ({ ids, docs })**: The actions to take when the rule is matched. +The format of this action depends on the rule type. +- **`priority` (Optional, number)** + +## client.queryRules.putRuleset [_query_rules.put_ruleset] +Create or update a query ruleset. +There is a limit of 100 rules per ruleset. +This limit can be increased by using the `xpack.applications.rules.max_rules_per_ruleset` cluster setting. + +IMPORTANT: Due to limitations within pinned queries, you can only select documents using `ids` or `docs`, but cannot use both in single rule. +It is advised to use one or the other in query rulesets, to avoid errors. +Additionally, pinned queries have a maximum limit of 100 pinned hits. +If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-put-ruleset) + +```ts +client.queryRules.putRuleset({ ruleset_id, rules }) +``` + +### Arguments [_arguments_query_rules.put_ruleset] + +#### Request (object) [_request_query_rules.put_ruleset] +- **`ruleset_id` (string)**: The unique identifier of the query ruleset to be created or updated. +- **`rules` ({ rule_id, type, criteria, actions, priority } \| { rule_id, type, criteria, actions, priority }[])** + +## client.queryRules.test [_query_rules.test] +Test a query ruleset. +Evaluate match criteria against a query ruleset to identify the rules that would match that criteria. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-test) + +```ts +client.queryRules.test({ ruleset_id, match_criteria }) +``` + +### Arguments [_arguments_query_rules.test] + +#### Request (object) [_request_query_rules.test] +- **`ruleset_id` (string)**: The unique identifier of the query ruleset to be created or updated +- **`match_criteria` (Record)**: The match criteria to apply to rules in the given query ruleset. +Match criteria should match the keys defined in the `criteria.metadata` field of the rule. + +## client.rollup.deleteJob [_rollup.delete_job] +Delete a rollup job. + +A job must be stopped before it can be deleted. +If you attempt to delete a started job, an error occurs. +Similarly, if you attempt to delete a nonexistent job, an exception occurs. + +IMPORTANT: When you delete a job, you remove only the process that is actively monitoring and rolling up data. +The API does not delete any previously rolled up data. +This is by design; a user may wish to roll up a static data set. +Because the data set is static, after it has been fully rolled up there is no need to keep the indexing rollup job around (as there will be no new data). +Thus the job can be deleted, leaving behind the rolled up data for analysis. +If you wish to also remove the rollup data and the rollup index contains the data for only a single job, you can delete the whole rollup index. +If the rollup index stores data from several jobs, you must issue a delete-by-query that targets the rollup job's identifier in the rollup index. For example: + +``` +POST my_rollup_index/_delete_by_query +{ + "query": { + "term": { + "_rollup.id": "the_rollup_job_id" + } + } +} +``` + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-delete-job) + +```ts +client.rollup.deleteJob({ id }) +``` + +### Arguments [_arguments_rollup.delete_job] + +#### Request (object) [_request_rollup.delete_job] +- **`id` (string)**: Identifier for the job. + +## client.rollup.getJobs [_rollup.get_jobs] +Get rollup job information. +Get the configuration, stats, and status of rollup jobs. + +NOTE: This API returns only active (both `STARTED` and `STOPPED`) jobs. +If a job was created, ran for a while, then was deleted, the API does not return any details about it. +For details about a historical rollup job, the rollup capabilities API may be more useful. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-jobs) + +```ts +client.rollup.getJobs({ ... }) +``` + +### Arguments [_arguments_rollup.get_jobs] + +#### Request (object) [_request_rollup.get_jobs] +- **`id` (Optional, string)**: Identifier for the rollup job. +If it is `_all` or omitted, the API returns all rollup jobs. + +## client.rollup.getRollupCaps [_rollup.get_rollup_caps] +Get the rollup job capabilities. +Get the capabilities of any rollup jobs that have been configured for a specific index or index pattern. + +This API is useful because a rollup job is often configured to rollup only a subset of fields from the source index. +Furthermore, only certain aggregations can be configured for various fields, leading to a limited subset of functionality depending on that configuration. +This API enables you to inspect an index and determine: + +1. Does this index have associated rollup data somewhere in the cluster? +2. If yes to the first question, what fields were rolled up, what aggregations can be performed, and where does the data live? + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-rollup-caps) + +```ts +client.rollup.getRollupCaps({ ... }) +``` + +### Arguments [_arguments_rollup.get_rollup_caps] + +#### Request (object) [_request_rollup.get_rollup_caps] +- **`id` (Optional, string)**: Index, indices or index-pattern to return rollup capabilities for. +`_all` may be used to fetch rollup capabilities from all jobs. + +## client.rollup.getRollupIndexCaps [_rollup.get_rollup_index_caps] +Get the rollup index capabilities. +Get the rollup capabilities of all jobs inside of a rollup index. +A single rollup index may store the data for multiple rollup jobs and may have a variety of capabilities depending on those jobs. This API enables you to determine: + +* What jobs are stored in an index (or indices specified via a pattern)? +* What target indices were rolled up, what fields were used in those rollups, and what aggregations can be performed on each job? + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-rollup-index-caps) + +```ts +client.rollup.getRollupIndexCaps({ index }) +``` + +### Arguments [_arguments_rollup.get_rollup_index_caps] + +#### Request (object) [_request_rollup.get_rollup_index_caps] +- **`index` (string \| string[])**: Data stream or index to check for rollup capabilities. +Wildcard (`*`) expressions are supported. + +## client.rollup.putJob [_rollup.put_job] +Create a rollup job. + +WARNING: From 8.15.0, calling this API in a cluster with no rollup usage will fail with a message about the deprecation and planned removal of rollup features. A cluster needs to contain either a rollup job or a rollup index in order for this API to be allowed to run. + +The rollup job configuration contains all the details about how the job should run, when it indexes documents, and what future queries will be able to run against the rollup index. + +There are three main sections to the job configuration: the logistical details about the job (for example, the cron schedule), the fields that are used for grouping, and what metrics to collect for each group. + +Jobs are created in a `STOPPED` state. You can start them with the start rollup jobs API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-put-job) + +```ts +client.rollup.putJob({ id, cron, groups, index_pattern, page_size, rollup_index }) +``` + +### Arguments [_arguments_rollup.put_job] + +#### Request (object) [_request_rollup.put_job] +- **`id` (string)**: Identifier for the rollup job. This can be any alphanumeric string and uniquely identifies the +data that is associated with the rollup job. The ID is persistent; it is stored with the rolled +up data. If you create a job, let it run for a while, then delete the job, the data that the job +rolled up is still be associated with this job ID. You cannot create a new job with the same ID +since that could lead to problems with mismatched job configurations. +- **`cron` (string)**: A cron string which defines the intervals when the rollup job should be executed. When the interval +triggers, the indexer attempts to rollup the data in the index pattern. The cron pattern is unrelated +to the time interval of the data being rolled up. For example, you may wish to create hourly rollups +of your document but to only run the indexer on a daily basis at midnight, as defined by the cron. The +cron pattern is defined just like a Watcher cron schedule. +- **`groups` ({ date_histogram, histogram, terms })**: Defines the grouping fields and aggregations that are defined for this rollup job. These fields will then be +available later for aggregating into buckets. These aggs and fields can be used in any combination. Think of +the groups configuration as defining a set of tools that can later be used in aggregations to partition the +data. Unlike raw data, we have to think ahead to which fields and aggregations might be used. Rollups provide +enough flexibility that you simply need to determine which fields are needed, not in what order they are needed. +- **`index_pattern` (string)**: The index or index pattern to roll up. Supports wildcard-style patterns (`logstash-*`). The job attempts to +rollup the entire index or index-pattern. +- **`page_size` (number)**: The number of bucket results that are processed on each iteration of the rollup indexer. A larger value tends +to execute faster, but requires more memory during processing. This value has no effect on how the data is +rolled up; it is merely used for tweaking the speed or memory cost of the indexer. +- **`rollup_index` (string)**: The index that contains the rollup results. The index can be shared with other rollup jobs. The data is stored so that it doesn’t interfere with unrelated jobs. +- **`metrics` (Optional, { field, metrics }[])**: Defines the metrics to collect for each grouping tuple. By default, only the doc_counts are collected for each +group. To make rollup useful, you will often add metrics like averages, mins, maxes, etc. Metrics are defined +on a per-field basis and for each field you configure which metric should be collected. +- **`timeout` (Optional, string \| -1 \| 0)**: Time to wait for the request to complete. +- **`headers` (Optional, Record)** + +## client.rollup.rollupSearch [_rollup.rollup_search] +Search rolled-up data. +The rollup search endpoint is needed because, internally, rolled-up documents utilize a different document structure than the original data. +It rewrites standard Query DSL into a format that matches the rollup documents then takes the response and rewrites it back to what a client would expect given the original query. + +The request body supports a subset of features from the regular search API. +The following functionality is not available: + +`size`: Because rollups work on pre-aggregated data, no search hits can be returned and so size must be set to zero or omitted entirely. +`highlighter`, `suggestors`, `post_filter`, `profile`, `explain`: These are similarly disallowed. + +For more detailed examples of using the rollup search API, including querying rolled-up data only or combining rolled-up and live data, refer to the External documentation. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-rollup-search) + +```ts +client.rollup.rollupSearch({ index }) +``` + +### Arguments [_arguments_rollup.rollup_search] + +#### Request (object) [_request_rollup.rollup_search] +- **`index` (string \| string[])**: A list of data streams and indices used to limit the request. +This parameter has the following rules: + +* At least one data stream, index, or wildcard expression must be specified. This target can include a rollup or non-rollup index. For data streams, the stream's backing indices can only serve as non-rollup indices. Omitting the parameter or using `_all` are not permitted. +* Multiple non-rollup indices may be specified. +* Only one rollup index may be specified. If more than one are supplied, an exception occurs. +* Wildcard expressions (`*`) may be used. If they match more than one rollup index, an exception occurs. However, you can use an expression to match multiple non-rollup indices or data streams. +- **`aggregations` (Optional, Record)**: Specifies aggregations. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Specifies a DSL query that is subject to some limitations. +- **`size` (Optional, number)**: Must be zero if set, as rollups work on pre-aggregated data. +- **`rest_total_hits_as_int` (Optional, boolean)**: Indicates whether hits.total should be rendered as an integer or an object in the rest search response +- **`typed_keys` (Optional, boolean)**: Specify whether aggregation and suggester names should be prefixed by their respective types in the response + +## client.rollup.startJob [_rollup.start_job] +Start rollup jobs. +If you try to start a job that does not exist, an exception occurs. +If you try to start a job that is already started, nothing happens. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-start-job) + +```ts +client.rollup.startJob({ id }) +``` + +### Arguments [_arguments_rollup.start_job] + +#### Request (object) [_request_rollup.start_job] +- **`id` (string)**: Identifier for the rollup job. + +## client.rollup.stopJob [_rollup.stop_job] +Stop rollup jobs. +If you try to stop a job that does not exist, an exception occurs. +If you try to stop a job that is already stopped, nothing happens. + +Since only a stopped job can be deleted, it can be useful to block the API until the indexer has fully stopped. +This is accomplished with the `wait_for_completion` query parameter, and optionally a timeout. For example: + +``` +POST _rollup/job/sensor/_stop?wait_for_completion=true&timeout=10s +``` +The parameter blocks the API call from returning until either the job has moved to STOPPED or the specified time has elapsed. +If the specified time elapses without the job moving to STOPPED, a timeout exception occurs. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-stop-job) + +```ts +client.rollup.stopJob({ id }) +``` + +### Arguments [_arguments_rollup.stop_job] + +#### Request (object) [_request_rollup.stop_job] +- **`id` (string)**: Identifier for the rollup job. +- **`timeout` (Optional, string \| -1 \| 0)**: If `wait_for_completion` is `true`, the API blocks for (at maximum) the specified duration while waiting for the job to stop. +If more than `timeout` time has passed, the API throws a timeout exception. +NOTE: Even if a timeout occurs, the stop request is still processing and eventually moves the job to STOPPED. +The timeout simply means the API call itself timed out while waiting for the status change. +- **`wait_for_completion` (Optional, boolean)**: If set to `true`, causes the API to block until the indexer state completely stops. +If set to `false`, the API returns immediately and the indexer is stopped asynchronously in the background. + +## client.searchApplication.delete [_search_application.delete] +Delete a search application. + +Remove a search application and its associated alias. Indices attached to the search application are not removed. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-delete) + +```ts +client.searchApplication.delete({ name }) +``` + +### Arguments [_arguments_search_application.delete] + +#### Request (object) [_request_search_application.delete] +- **`name` (string)**: The name of the search application to delete. + +## client.searchApplication.deleteBehavioralAnalytics [_search_application.delete_behavioral_analytics] +Delete a behavioral analytics collection. +The associated data stream is also deleted. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-delete-behavioral-analytics) + +```ts +client.searchApplication.deleteBehavioralAnalytics({ name }) +``` + +### Arguments [_arguments_search_application.delete_behavioral_analytics] + +#### Request (object) [_request_search_application.delete_behavioral_analytics] +- **`name` (string)**: The name of the analytics collection to be deleted + +## client.searchApplication.get [_search_application.get] +Get search application details. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-get) + +```ts +client.searchApplication.get({ name }) +``` + +### Arguments [_arguments_search_application.get] + +#### Request (object) [_request_search_application.get] +- **`name` (string)**: The name of the search application + +## client.searchApplication.getBehavioralAnalytics [_search_application.get_behavioral_analytics] +Get behavioral analytics collections. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-get-behavioral-analytics) + +```ts +client.searchApplication.getBehavioralAnalytics({ ... }) +``` + +### Arguments [_arguments_search_application.get_behavioral_analytics] + +#### Request (object) [_request_search_application.get_behavioral_analytics] +- **`name` (Optional, string[])**: A list of analytics collections to limit the returned information + +## client.searchApplication.list [_search_application.list] +Get search applications. +Get information about search applications. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-get-behavioral-analytics) + +```ts +client.searchApplication.list({ ... }) +``` + +### Arguments [_arguments_search_application.list] + +#### Request (object) [_request_search_application.list] +- **`q` (Optional, string)**: Query in the Lucene query string syntax. +- **`from` (Optional, number)**: Starting offset. +- **`size` (Optional, number)**: Specifies a max number of results to get. + +## client.searchApplication.postBehavioralAnalyticsEvent [_search_application.post_behavioral_analytics_event] +Create a behavioral analytics collection event. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-post-behavioral-analytics-event) + +```ts +client.searchApplication.postBehavioralAnalyticsEvent({ collection_name, event_type }) +``` + +### Arguments [_arguments_search_application.post_behavioral_analytics_event] + +#### Request (object) [_request_search_application.post_behavioral_analytics_event] +- **`collection_name` (string)**: The name of the behavioral analytics collection. +- **`event_type` (Enum("page_view" \| "search" \| "search_click"))**: The analytics event type. +- **`payload` (Optional, User-defined value)** +- **`debug` (Optional, boolean)**: Whether the response type has to include more details + +## client.searchApplication.put [_search_application.put] +Create or update a search application. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-put) + +```ts +client.searchApplication.put({ name }) +``` + +### Arguments [_arguments_search_application.put] + +#### Request (object) [_request_search_application.put] +- **`name` (string)**: The name of the search application to be created or updated. +- **`search_application` (Optional, { indices, analytics_collection_name, template })** +- **`create` (Optional, boolean)**: If `true`, this request cannot replace or update existing Search Applications. + +## client.searchApplication.putBehavioralAnalytics [_search_application.put_behavioral_analytics] +Create a behavioral analytics collection. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-put-behavioral-analytics) + +```ts +client.searchApplication.putBehavioralAnalytics({ name }) +``` + +### Arguments [_arguments_search_application.put_behavioral_analytics] + +#### Request (object) [_request_search_application.put_behavioral_analytics] +- **`name` (string)**: The name of the analytics collection to be created or updated. + +## client.searchApplication.renderQuery [_search_application.render_query] +Render a search application query. +Generate an Elasticsearch query using the specified query parameters and the search template associated with the search application or a default template if none is specified. +If a parameter used in the search template is not specified in `params`, the parameter's default value will be used. +The API returns the specific Elasticsearch query that would be generated and run by calling the search application search API. + +You must have `read` privileges on the backing alias of the search application. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-render-query) + +```ts +client.searchApplication.renderQuery({ name }) +``` + +### Arguments [_arguments_search_application.render_query] + +#### Request (object) [_request_search_application.render_query] +- **`name` (string)**: The name of the search application to render teh query for. +- **`params` (Optional, Record)** + +## client.searchApplication.search [_search_application.search] +Run a search application search. +Generate and run an Elasticsearch query that uses the specified query parameteter and the search template associated with the search application or default template. +Unspecified template parameters are assigned their default values if applicable. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-search) + +```ts +client.searchApplication.search({ name }) +``` + +### Arguments [_arguments_search_application.search] + +#### Request (object) [_request_search_application.search] +- **`name` (string)**: The name of the search application to be searched. +- **`params` (Optional, Record)**: Query parameters specific to this request, which will override any defaults specified in the template. +- **`typed_keys` (Optional, boolean)**: Determines whether aggregation names are prefixed by their respective types in the response. + +## client.searchableSnapshots.cacheStats [_searchable_snapshots.cache_stats] +Get cache statistics. +Get statistics about the shared cache for partially mounted indices. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-cache-stats) + +```ts +client.searchableSnapshots.cacheStats({ ... }) +``` + +### Arguments [_arguments_searchable_snapshots.cache_stats] + +#### Request (object) [_request_searchable_snapshots.cache_stats] +- **`node_id` (Optional, string \| string[])**: The names of the nodes in the cluster to target. +- **`master_timeout` (Optional, string \| -1 \| 0)** + +## client.searchableSnapshots.clearCache [_searchable_snapshots.clear_cache] +Clear the cache. +Clear indices and data streams from the shared cache for partially mounted indices. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-clear-cache) + +```ts +client.searchableSnapshots.clearCache({ ... }) +``` + +### Arguments [_arguments_searchable_snapshots.clear_cache] + +#### Request (object) [_request_searchable_snapshots.clear_cache] +- **`index` (Optional, string \| string[])**: A list of data streams, indices, and aliases to clear from the cache. +It supports wildcards (`*`). +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. +- **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +- **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) + +## client.searchableSnapshots.mount [_searchable_snapshots.mount] +Mount a snapshot. +Mount a snapshot as a searchable snapshot index. +Do not use this API for snapshots managed by index lifecycle management (ILM). +Manually mounting ILM-managed snapshots can interfere with ILM processes. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-mount) + +```ts +client.searchableSnapshots.mount({ repository, snapshot, index }) +``` + +### Arguments [_arguments_searchable_snapshots.mount] + +#### Request (object) [_request_searchable_snapshots.mount] +- **`repository` (string)**: The name of the repository containing the snapshot of the index to mount. +- **`snapshot` (string)**: The name of the snapshot of the index to mount. +- **`index` (string)**: The name of the index contained in the snapshot whose data is to be mounted. +If no `renamed_index` is specified, this name will also be used to create the new index. +- **`renamed_index` (Optional, string)**: The name of the index that will be created. +- **`index_settings` (Optional, Record)**: The settings that should be added to the index when it is mounted. +- **`ignore_index_settings` (Optional, string[])**: The names of settings that should be removed from the index when it is mounted. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +- **`wait_for_completion` (Optional, boolean)**: If true, the request blocks until the operation is complete. +- **`storage` (Optional, string)**: The mount option for the searchable snapshot index. + +## client.searchableSnapshots.stats [_searchable_snapshots.stats] +Get searchable snapshot statistics. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-stats) + +```ts +client.searchableSnapshots.stats({ ... }) +``` + +### Arguments [_arguments_searchable_snapshots.stats] + +#### Request (object) [_request_searchable_snapshots.stats] +- **`index` (Optional, string \| string[])**: A list of data streams and indices to retrieve statistics for. +- **`level` (Optional, Enum("cluster" \| "indices" \| "shards"))**: Return stats aggregated at cluster, index or shard level + +## client.security.activateUserProfile [_security.activate_user_profile] +Activate a user profile. + +Create or update a user profile on behalf of another user. + +NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. +Individual users and external applications should not call this API directly. +The calling application must have either an `access_token` or a combination of `username` and `password` for the user that the profile document is intended for. +Elastic reserves the right to change or remove this feature in future releases without prior notice. + +This API creates or updates a profile document for end users with information that is extracted from the user's authentication object including `username`, `full_name,` `roles`, and the authentication realm. +For example, in the JWT `access_token` case, the profile user's `username` is extracted from the JWT token claim pointed to by the `claims.principal` setting of the JWT realm that authenticated the token. + +When updating a profile document, the API enables the document if it was disabled. +Any updates do not change existing content for either the `labels` or `data` fields. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-activate-user-profile) + +```ts +client.security.activateUserProfile({ grant_type }) +``` + +### Arguments [_arguments_security.activate_user_profile] + +#### Request (object) [_request_security.activate_user_profile] +- **`grant_type` (Enum("password" \| "access_token"))**: The type of grant. +- **`access_token` (Optional, string)**: The user's Elasticsearch access token or JWT. +Both `access` and `id` JWT token types are supported and they depend on the underlying JWT realm configuration. +If you specify the `access_token` grant type, this parameter is required. +It is not valid with other grant types. +- **`password` (Optional, string)**: The user's password. +If you specify the `password` grant type, this parameter is required. +It is not valid with other grant types. +- **`username` (Optional, string)**: The username that identifies the user. +If you specify the `password` grant type, this parameter is required. +It is not valid with other grant types. + +## client.security.authenticate [_security.authenticate] +Authenticate a user. + +Authenticates a user and returns information about the authenticated user. +Include the user information in a [basic auth header](https://en.wikipedia.org/wiki/Basic_access_authentication). +A successful call returns a JSON structure that shows user information such as their username, the roles that are assigned to the user, any assigned metadata, and information about the realms that authenticated and authorized the user. +If the user cannot be authenticated, this API returns a 401 status code. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-authenticate) + +```ts +client.security.authenticate() +``` + + +## client.security.bulkDeleteRole [_security.bulk_delete_role] +Bulk delete roles. + +The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. +The bulk delete roles API cannot delete roles that are defined in roles files. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-bulk-delete-role) + +```ts +client.security.bulkDeleteRole({ names }) +``` + +### Arguments [_arguments_security.bulk_delete_role] + +#### Request (object) [_request_security.bulk_delete_role] +- **`names` (string[])**: An array of role names to delete +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +## client.security.bulkPutRole [_security.bulk_put_role] +Bulk create or update roles. + +The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. +The bulk create or update roles API cannot update roles that are defined in roles files. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-bulk-put-role) + +```ts +client.security.bulkPutRole({ roles }) +``` + +### Arguments [_arguments_security.bulk_put_role] + +#### Request (object) [_request_security.bulk_put_role] +- **`roles` (Record)**: A dictionary of role name to RoleDescriptor objects to add or update +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +## client.security.bulkUpdateApiKeys [_security.bulk_update_api_keys] +Bulk update API keys. +Update the attributes for multiple API keys. + +IMPORTANT: It is not possible to use an API key as the authentication credential for this API. To update API keys, the owner user's credentials are required. + +This API is similar to the update API key API but enables you to apply the same update to multiple API keys in one API call. This operation can greatly improve performance over making individual updates. + +It is not possible to update expired or invalidated API keys. + +This API supports updates to API key access scope, metadata and expiration. +The access scope of each API key is derived from the `role_descriptors` you specify in the request and a snapshot of the owner user's permissions at the time of the request. +The snapshot of the owner's permissions is updated automatically on every call. + +IMPORTANT: If you don't specify `role_descriptors` in the request, a call to this API might still change an API key's access scope. This change can occur if the owner user's permissions have changed since the API key was created or last modified. + +A successful request returns a JSON structure that contains the IDs of all updated API keys, the IDs of API keys that already had the requested changes and did not require an update, and error details for any failed update. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-bulk-update-api-keys) + +```ts +client.security.bulkUpdateApiKeys({ ids }) +``` + +### Arguments [_arguments_security.bulk_update_api_keys] + +#### Request (object) [_request_security.bulk_update_api_keys] +- **`ids` (string \| string[])**: The API key identifiers. +- **`expiration` (Optional, string \| -1 \| 0)**: Expiration time for the API keys. +By default, API keys never expire. +This property can be omitted to leave the value unchanged. +- **`metadata` (Optional, Record)**: Arbitrary nested metadata to associate with the API keys. +Within the `metadata` object, top-level keys beginning with an underscore (`_`) are reserved for system usage. +Any information specified with this parameter fully replaces metadata previously associated with the API key. +- **`role_descriptors` (Optional, Record)**: The role descriptors to assign to the API keys. +An API key's effective permissions are an intersection of its assigned privileges and the point-in-time snapshot of permissions of the owner user. +You can assign new privileges by specifying them in this parameter. +To remove assigned privileges, supply the `role_descriptors` parameter as an empty object `{}`. +If an API key has no assigned privileges, it inherits the owner user's full permissions. +The snapshot of the owner's permissions is always updated, whether you supply the `role_descriptors` parameter. +The structure of a role descriptor is the same as the request for the create API keys API. + +## client.security.changePassword [_security.change_password] +Change passwords. + +Change the passwords of users in the native realm and built-in users. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-change-password) + +```ts +client.security.changePassword({ ... }) +``` + +### Arguments [_arguments_security.change_password] + +#### Request (object) [_request_security.change_password] +- **`username` (Optional, string)**: The user whose password you want to change. If you do not specify this +parameter, the password is changed for the current user. +- **`password` (Optional, string)**: The new password value. Passwords must be at least 6 characters long. +- **`password_hash` (Optional, string)**: A hash of the new password value. This must be produced using the same +hashing algorithm as has been configured for password storage. For more details, +see the explanation of the `xpack.security.authc.password_hashing.algorithm` +setting. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +## client.security.clearApiKeyCache [_security.clear_api_key_cache] +Clear the API key cache. + +Evict a subset of all entries from the API key cache. +The cache is also automatically cleared on state changes of the security index. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-api-key-cache) + +```ts +client.security.clearApiKeyCache({ ids }) +``` + +### Arguments [_arguments_security.clear_api_key_cache] + +#### Request (object) [_request_security.clear_api_key_cache] +- **`ids` (string \| string[])**: List of API key IDs to evict from the API key cache. +To evict all API keys, use `*`. +Does not support other wildcard patterns. + +## client.security.clearCachedPrivileges [_security.clear_cached_privileges] +Clear the privileges cache. + +Evict privileges from the native application privilege cache. +The cache is also automatically cleared for applications that have their privileges updated. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-privileges) + +```ts +client.security.clearCachedPrivileges({ application }) +``` + +### Arguments [_arguments_security.clear_cached_privileges] + +#### Request (object) [_request_security.clear_cached_privileges] +- **`application` (string \| string[])**: A list of applications. +To clear all applications, use an asterism (`*`). +It does not support other wildcard patterns. + +## client.security.clearCachedRealms [_security.clear_cached_realms] +Clear the user cache. + +Evict users from the user cache. +You can completely clear the cache or evict specific users. + +User credentials are cached in memory on each node to avoid connecting to a remote authentication service or hitting the disk for every incoming request. +There are realm settings that you can use to configure the user cache. +For more information, refer to the documentation about controlling the user cache. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-realms) + +```ts +client.security.clearCachedRealms({ realms }) +``` + +### Arguments [_arguments_security.clear_cached_realms] + +#### Request (object) [_request_security.clear_cached_realms] +- **`realms` (string \| string[])**: A list of realms. +To clear all realms, use an asterisk (`*`). +It does not support other wildcard patterns. +- **`usernames` (Optional, string[])**: A list of the users to clear from the cache. +If you do not specify this parameter, the API evicts all users from the user cache. + +## client.security.clearCachedRoles [_security.clear_cached_roles] +Clear the roles cache. + +Evict roles from the native role cache. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-roles) + +```ts +client.security.clearCachedRoles({ name }) +``` + +### Arguments [_arguments_security.clear_cached_roles] + +#### Request (object) [_request_security.clear_cached_roles] +- **`name` (string \| string[])**: A list of roles to evict from the role cache. +To evict all roles, use an asterisk (`*`). +It does not support other wildcard patterns. + +## client.security.clearCachedServiceTokens [_security.clear_cached_service_tokens] +Clear service account token caches. + +Evict a subset of all entries from the service account token caches. +Two separate caches exist for service account tokens: one cache for tokens backed by the `service_tokens` file, and another for tokens backed by the `.security` index. +This API clears matching entries from both caches. + +The cache for service account tokens backed by the `.security` index is cleared automatically on state changes of the security index. +The cache for tokens backed by the `service_tokens` file is cleared automatically on file changes. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-service-tokens) + +```ts +client.security.clearCachedServiceTokens({ namespace, service, name }) +``` + +### Arguments [_arguments_security.clear_cached_service_tokens] + +#### Request (object) [_request_security.clear_cached_service_tokens] +- **`namespace` (string)**: The namespace, which is a top-level grouping of service accounts. +- **`service` (string)**: The name of the service, which must be unique within its namespace. +- **`name` (string \| string[])**: A list of token names to evict from the service account token caches. +Use a wildcard (`*`) to evict all tokens that belong to a service account. +It does not support other wildcard patterns. + +## client.security.createApiKey [_security.create_api_key] +Create an API key. + +Create an API key for access without requiring basic authentication. + +IMPORTANT: If the credential that is used to authenticate this request is an API key, the derived API key cannot have any privileges. +If you specify privileges, the API returns an error. + +A successful request returns a JSON structure that contains the API key, its unique id, and its name. +If applicable, it also returns expiration information for the API key in milliseconds. + +NOTE: By default, API keys never expire. You can specify expiration information when you create the API keys. + +The API keys are created by the Elasticsearch API key service, which is automatically enabled. +To configure or turn off the API key service, refer to API key service setting documentation. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key) + +```ts +client.security.createApiKey({ ... }) +``` + +### Arguments [_arguments_security.create_api_key] + +#### Request (object) [_request_security.create_api_key] +- **`expiration` (Optional, string \| -1 \| 0)**: The expiration time for the API key. +By default, API keys never expire. +- **`name` (Optional, string)**: A name for the API key. +- **`role_descriptors` (Optional, Record)**: An array of role descriptors for this API key. +When it is not specified or it is an empty array, the API key will have a point in time snapshot of permissions of the authenticated user. +If you supply role descriptors, the resultant permissions are an intersection of API keys permissions and the authenticated user's permissions thereby limiting the access scope for API keys. +The structure of role descriptor is the same as the request for the create role API. +For more details, refer to the create or update roles API. + +NOTE: Due to the way in which this permission intersection is calculated, it is not possible to create an API key that is a child of another API key, unless the derived key is created without any privileges. +In this case, you must explicitly specify a role descriptor with no privileges. +The derived API key can be used for authentication; it will not have authority to call Elasticsearch APIs. +- **`metadata` (Optional, Record)**: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +## client.security.createCrossClusterApiKey [_security.create_cross_cluster_api_key] +Create a cross-cluster API key. + +Create an API key of the `cross_cluster` type for the API key based remote cluster access. +A `cross_cluster` API key cannot be used to authenticate through the REST interface. + +IMPORTANT: To authenticate this request you must use a credential that is not an API key. Even if you use an API key that has the required privilege, the API returns an error. + +Cross-cluster API keys are created by the Elasticsearch API key service, which is automatically enabled. + +NOTE: Unlike REST API keys, a cross-cluster API key does not capture permissions of the authenticated user. The API key’s effective permission is exactly as specified with the `access` property. + +A successful request returns a JSON structure that contains the API key, its unique ID, and its name. If applicable, it also returns expiration information for the API key in milliseconds. + +By default, API keys never expire. You can specify expiration information when you create the API keys. + +Cross-cluster API keys can only be updated with the update cross-cluster API key API. +Attempting to update them with the update REST API key API or the bulk update REST API keys API will result in an error. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-cross-cluster-api-key) + +```ts +client.security.createCrossClusterApiKey({ access, name }) +``` + +### Arguments [_arguments_security.create_cross_cluster_api_key] + +#### Request (object) [_request_security.create_cross_cluster_api_key] +- **`access` ({ replication, search })**: The access to be granted to this API key. +The access is composed of permissions for cross-cluster search and cross-cluster replication. +At least one of them must be specified. + +NOTE: No explicit privileges should be specified for either search or replication access. +The creation process automatically converts the access specification to a role descriptor which has relevant privileges assigned accordingly. +- **`name` (string)**: Specifies the name for this API key. +- **`expiration` (Optional, string \| -1 \| 0)**: Expiration time for the API key. +By default, API keys never expire. +- **`metadata` (Optional, Record)**: Arbitrary metadata that you want to associate with the API key. +It supports nested data structure. +Within the metadata object, keys beginning with `_` are reserved for system usage. +- **`certificate_identity` (Optional, string)**: The certificate identity to associate with this API key. +This field is used to restrict the API key to connections authenticated by a specific TLS certificate. +The value should match the certificate's distinguished name (DN) pattern. + +## client.security.createServiceToken [_security.create_service_token] +Create a service account token. + +Create a service accounts token for access without requiring basic authentication. + +NOTE: Service account tokens never expire. +You must actively delete them if they are no longer needed. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-service-token) + +```ts +client.security.createServiceToken({ namespace, service }) +``` + +### Arguments [_arguments_security.create_service_token] + +#### Request (object) [_request_security.create_service_token] +- **`namespace` (string)**: The name of the namespace, which is a top-level grouping of service accounts. +- **`service` (string)**: The name of the service. +- **`name` (Optional, string)**: The name for the service account token. +If omitted, a random name will be generated. + +Token names must be at least one and no more than 256 characters. +They can contain alphanumeric characters (a-z, A-Z, 0-9), dashes (`-`), and underscores (`_`), but cannot begin with an underscore. + +NOTE: Token names must be unique in the context of the associated service account. +They must also be globally unique with their fully qualified names, which are comprised of the service account principal and token name, such as `//`. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +## client.security.delegatePki [_security.delegate_pki] +Delegate PKI authentication. + +This API implements the exchange of an X509Certificate chain for an Elasticsearch access token. +The certificate chain is validated, according to RFC 5280, by sequentially considering the trust configuration of every installed PKI realm that has `delegation.enabled` set to `true`. +A successfully trusted client certificate is also subject to the validation of the subject distinguished name according to thw `username_pattern` of the respective realm. + +This API is called by smart and trusted proxies, such as Kibana, which terminate the user's TLS session but still want to authenticate the user by using a PKI realm—-​as if the user connected directly to Elasticsearch. + +IMPORTANT: The association between the subject public key in the target certificate and the corresponding private key is not validated. +This is part of the TLS authentication process and it is delegated to the proxy that calls this API. +The proxy is trusted to have performed the TLS authentication and this API translates that authentication into an Elasticsearch access token. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delegate-pki) + +```ts +client.security.delegatePki({ x509_certificate_chain }) +``` + +### Arguments [_arguments_security.delegate_pki] + +#### Request (object) [_request_security.delegate_pki] +- **`x509_certificate_chain` (string[])**: The X509Certificate chain, which is represented as an ordered string array. +Each string in the array is a base64-encoded (Section 4 of RFC4648 - not base64url-encoded) of the certificate's DER encoding. + +The first element is the target certificate that contains the subject distinguished name that is requesting access. +This may be followed by additional certificates; each subsequent certificate is used to certify the previous one. + +## client.security.deletePrivileges [_security.delete_privileges] +Delete application privileges. + +To use this API, you must have one of the following privileges: + +* The `manage_security` cluster privilege (or a greater privilege such as `all`). +* The "Manage Application Privileges" global privilege for the application being referenced in the request. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-privileges) + +```ts +client.security.deletePrivileges({ application, name }) +``` + +### Arguments [_arguments_security.delete_privileges] + +#### Request (object) [_request_security.delete_privileges] +- **`application` (string)**: The name of the application. +Application privileges are always associated with exactly one application. +- **`name` (string \| string[])**: The name of the privilege. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +## client.security.deleteRole [_security.delete_role] +Delete roles. + +Delete roles in the native realm. +The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. +The delete roles API cannot remove roles that are defined in roles files. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-role) + +```ts +client.security.deleteRole({ name }) +``` + +### Arguments [_arguments_security.delete_role] + +#### Request (object) [_request_security.delete_role] +- **`name` (string)**: The name of the role. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +## client.security.deleteRoleMapping [_security.delete_role_mapping] +Delete role mappings. + +Role mappings define which roles are assigned to each user. +The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. +The delete role mappings API cannot remove role mappings that are defined in role mapping files. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-role-mapping) + +```ts +client.security.deleteRoleMapping({ name }) +``` + +### Arguments [_arguments_security.delete_role_mapping] + +#### Request (object) [_request_security.delete_role_mapping] +- **`name` (string)**: The distinct name that identifies the role mapping. +The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +## client.security.deleteServiceToken [_security.delete_service_token] +Delete service account tokens. + +Delete service account tokens for a service in a specified namespace. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-service-token) + +```ts +client.security.deleteServiceToken({ namespace, service, name }) +``` + +### Arguments [_arguments_security.delete_service_token] + +#### Request (object) [_request_security.delete_service_token] +- **`namespace` (string)**: The namespace, which is a top-level grouping of service accounts. +- **`service` (string)**: The service name. +- **`name` (string)**: The name of the service account token. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +## client.security.deleteUser [_security.delete_user] +Delete users. + +Delete users from the native realm. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-user) + +```ts +client.security.deleteUser({ username }) +``` + +### Arguments [_arguments_security.delete_user] + +#### Request (object) [_request_security.delete_user] +- **`username` (string)**: An identifier for the user. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +## client.security.disableUser [_security.disable_user] +Disable users. + +Disable users in the native realm. +By default, when you create users, they are enabled. +You can use this API to revoke a user's access to Elasticsearch. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-disable-user) + +```ts +client.security.disableUser({ username }) +``` + +### Arguments [_arguments_security.disable_user] + +#### Request (object) [_request_security.disable_user] +- **`username` (string)**: An identifier for the user. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +## client.security.disableUserProfile [_security.disable_user_profile] +Disable a user profile. + +Disable user profiles so that they are not visible in user profile searches. + +NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. +Individual users and external applications should not call this API directly. +Elastic reserves the right to change or remove this feature in future releases without prior notice. + +When you activate a user profile, its automatically enabled and visible in user profile searches. You can use the disable user profile API to disable a user profile so it’s not visible in these searches. +To re-enable a disabled user profile, use the enable user profile API . + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-disable-user-profile) + +```ts +client.security.disableUserProfile({ uid }) +``` + +### Arguments [_arguments_security.disable_user_profile] + +#### Request (object) [_request_security.disable_user_profile] +- **`uid` (string)**: Unique identifier for the user profile. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. +If 'wait_for', it waits for a refresh to make this operation visible to search. +If 'false', it does nothing with refreshes. + +## client.security.enableUser [_security.enable_user] +Enable users. + +Enable users in the native realm. +By default, when you create users, they are enabled. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enable-user) + +```ts +client.security.enableUser({ username }) +``` + +### Arguments [_arguments_security.enable_user] + +#### Request (object) [_request_security.enable_user] +- **`username` (string)**: An identifier for the user. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +## client.security.enableUserProfile [_security.enable_user_profile] +Enable a user profile. + +Enable user profiles to make them visible in user profile searches. + +NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. +Individual users and external applications should not call this API directly. +Elastic reserves the right to change or remove this feature in future releases without prior notice. + +When you activate a user profile, it's automatically enabled and visible in user profile searches. +If you later disable the user profile, you can use the enable user profile API to make the profile visible in these searches again. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enable-user-profile) + +```ts +client.security.enableUserProfile({ uid }) +``` + +### Arguments [_arguments_security.enable_user_profile] + +#### Request (object) [_request_security.enable_user_profile] +- **`uid` (string)**: A unique identifier for the user profile. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If 'true', Elasticsearch refreshes the affected shards to make this operation +visible to search. +If 'wait_for', it waits for a refresh to make this operation visible to search. +If 'false', nothing is done with refreshes. + +## client.security.enrollKibana [_security.enroll_kibana] +Enroll Kibana. + +Enable a Kibana instance to configure itself for communication with a secured Elasticsearch cluster. + +NOTE: This API is currently intended for internal use only by Kibana. +Kibana uses this API internally to configure itself for communications with an Elasticsearch cluster that already has security features enabled. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enroll-kibana) + +```ts +client.security.enrollKibana() +``` + + +## client.security.enrollNode [_security.enroll_node] +Enroll a node. + +Enroll a new node to allow it to join an existing cluster with security features enabled. + +The response contains all the necessary information for the joining node to bootstrap discovery and security related settings so that it can successfully join the cluster. +The response contains key and certificate material that allows the caller to generate valid signed certificates for the HTTP layer of all nodes in the cluster. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enroll-node) + +```ts +client.security.enrollNode() +``` + + +## client.security.getApiKey [_security.get_api_key] +Get API key information. + +Retrieves information for one or more API keys. +NOTE: If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own. +If you have `read_security`, `manage_api_key` or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-api-key) + +```ts +client.security.getApiKey({ ... }) +``` + +### Arguments [_arguments_security.get_api_key] + +#### Request (object) [_request_security.get_api_key] +- **`id` (Optional, string)**: An API key id. +This parameter cannot be used with any of `name`, `realm_name` or `username`. +- **`name` (Optional, string)**: An API key name. +This parameter cannot be used with any of `id`, `realm_name` or `username`. +It supports prefix search with wildcard. +- **`owner` (Optional, boolean)**: A boolean flag that can be used to query API keys owned by the currently authenticated user. +The `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones. +- **`realm_name` (Optional, string)**: The name of an authentication realm. +This parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`. +- **`username` (Optional, string)**: The username of a user. +This parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`. +- **`with_limited_by` (Optional, boolean)**: Return the snapshot of the owner user's role descriptors +associated with the API key. An API key's actual +permission is the intersection of its assigned role +descriptors and the owner user's role descriptors. +- **`active_only` (Optional, boolean)**: A boolean flag that can be used to query API keys that are currently active. An API key is considered active if it is neither invalidated, nor expired at query time. You can specify this together with other parameters such as `owner` or `name`. If `active_only` is false, the response will include both active and inactive (expired or invalidated) keys. +- **`with_profile_uid` (Optional, boolean)**: Determines whether to also retrieve the profile uid, for the API key owner principal, if it exists. + +## client.security.getBuiltinPrivileges [_security.get_builtin_privileges] +Get builtin privileges. + +Get the list of cluster privileges and index privileges that are available in this version of Elasticsearch. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-builtin-privileges) + +```ts +client.security.getBuiltinPrivileges() +``` + + +## client.security.getPrivileges [_security.get_privileges] +Get application privileges. + +To use this API, you must have one of the following privileges: + +* The `read_security` cluster privilege (or a greater privilege such as `manage_security` or `all`). +* The "Manage Application Privileges" global privilege for the application being referenced in the request. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-privileges) + +```ts +client.security.getPrivileges({ ... }) +``` + +### Arguments [_arguments_security.get_privileges] + +#### Request (object) [_request_security.get_privileges] +- **`application` (Optional, string)**: The name of the application. +Application privileges are always associated with exactly one application. +If you do not specify this parameter, the API returns information about all privileges for all applications. +- **`name` (Optional, string \| string[])**: The name of the privilege. +If you do not specify this parameter, the API returns information about all privileges for the requested application. + +## client.security.getRole [_security.get_role] +Get roles. + +Get roles in the native realm. +The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. +The get roles API cannot retrieve roles that are defined in roles files. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-role) + +```ts +client.security.getRole({ ... }) +``` + +### Arguments [_arguments_security.get_role] + +#### Request (object) [_request_security.get_role] +- **`name` (Optional, string \| string[])**: The name of the role. +You can specify multiple roles as a list. +If you do not specify this parameter, the API returns information about all roles. + +## client.security.getRoleMapping [_security.get_role_mapping] +Get role mappings. + +Role mappings define which roles are assigned to each user. +The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. +The get role mappings API cannot retrieve role mappings that are defined in role mapping files. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-role-mapping) + +```ts +client.security.getRoleMapping({ ... }) +``` + +### Arguments [_arguments_security.get_role_mapping] + +#### Request (object) [_request_security.get_role_mapping] +- **`name` (Optional, string \| string[])**: The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. You can specify multiple mapping names as a list. If you do not specify this parameter, the API returns information about all role mappings. + +## client.security.getServiceAccounts [_security.get_service_accounts] +Get service accounts. + +Get a list of service accounts that match the provided path parameters. + +NOTE: Currently, only the `elastic/fleet-server` service account is available. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-service-accounts) + +```ts +client.security.getServiceAccounts({ ... }) +``` + +### Arguments [_arguments_security.get_service_accounts] + +#### Request (object) [_request_security.get_service_accounts] +- **`namespace` (Optional, string)**: The name of the namespace. +Omit this parameter to retrieve information about all service accounts. +If you omit this parameter, you must also omit the `service` parameter. +- **`service` (Optional, string)**: The service name. +Omit this parameter to retrieve information about all service accounts that belong to the specified `namespace`. + +## client.security.getServiceCredentials [_security.get_service_credentials] +Get service account credentials. + +To use this API, you must have at least the `read_security` cluster privilege (or a greater privilege such as `manage_service_account` or `manage_security`). + +The response includes service account tokens that were created with the create service account tokens API as well as file-backed tokens from all nodes of the cluster. + +NOTE: For tokens backed by the `service_tokens` file, the API collects them from all nodes of the cluster. +Tokens with the same name from different nodes are assumed to be the same token and are only counted once towards the total number of service tokens. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-service-credentials) + +```ts +client.security.getServiceCredentials({ namespace, service }) +``` + +### Arguments [_arguments_security.get_service_credentials] + +#### Request (object) [_request_security.get_service_credentials] +- **`namespace` (string)**: The name of the namespace. +- **`service` (string)**: The service name. + +## client.security.getSettings [_security.get_settings] +Get security index settings. + +Get the user-configurable settings for the security internal index (`.security` and associated indices). +Only a subset of the index settings — those that are user-configurable—will be shown. +This includes: + +* `index.auto_expand_replicas` +* `index.number_of_replicas` + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-settings) + +```ts +client.security.getSettings({ ... }) +``` + +### Arguments [_arguments_security.get_settings] + +#### Request (object) [_request_security.get_settings] +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.security.getStats [_security.get_stats] +Get security stats. + +Gather security usage statistics from all node(s) within the cluster. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-stats) + +```ts +client.security.getStats() +``` + + +## client.security.getToken [_security.get_token] +Get a token. + +Create a bearer token for access without requiring basic authentication. +The tokens are created by the Elasticsearch Token Service, which is automatically enabled when you configure TLS on the HTTP interface. +Alternatively, you can explicitly enable the `xpack.security.authc.token.enabled` setting. +When you are running in production mode, a bootstrap check prevents you from enabling the token service unless you also enable TLS on the HTTP interface. + +The get token API takes the same parameters as a typical OAuth 2.0 token API except for the use of a JSON request body. + +A successful get token API call returns a JSON structure that contains the access token, the amount of time (seconds) that the token expires in, the type, and the scope if available. + +The tokens returned by the get token API have a finite period of time for which they are valid and after that time period, they can no longer be used. +That time period is defined by the `xpack.security.authc.token.timeout` setting. +If you want to invalidate a token immediately, you can do so by using the invalidate token API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-token) + +```ts +client.security.getToken({ ... }) +``` + +### Arguments [_arguments_security.get_token] + +#### Request (object) [_request_security.get_token] +- **`grant_type` (Optional, Enum("password" \| "client_credentials" \| "_kerberos" \| "refresh_token"))**: The type of grant. +Supported grant types are: `password`, `_kerberos`, `client_credentials`, and `refresh_token`. +- **`scope` (Optional, string)**: The scope of the token. +Currently tokens are only issued for a scope of FULL regardless of the value sent with the request. +- **`password` (Optional, string)**: The user's password. +If you specify the `password` grant type, this parameter is required. +This parameter is not valid with any other supported grant type. +- **`kerberos_ticket` (Optional, string)**: The base64 encoded kerberos ticket. +If you specify the `_kerberos` grant type, this parameter is required. +This parameter is not valid with any other supported grant type. +- **`refresh_token` (Optional, string)**: The string that was returned when you created the token, which enables you to extend its life. +If you specify the `refresh_token` grant type, this parameter is required. +This parameter is not valid with any other supported grant type. +- **`username` (Optional, string)**: The username that identifies the user. +If you specify the `password` grant type, this parameter is required. +This parameter is not valid with any other supported grant type. + +## client.security.getUser [_security.get_user] +Get users. + +Get information about users in the native realm and built-in users. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user) + +```ts +client.security.getUser({ ... }) +``` + +### Arguments [_arguments_security.get_user] + +#### Request (object) [_request_security.get_user] +- **`username` (Optional, string \| string[])**: An identifier for the user. You can specify multiple usernames as a list. If you omit this parameter, the API retrieves information about all users. +- **`with_profile_uid` (Optional, boolean)**: Determines whether to retrieve the user profile UID, if it exists, for the users. + +## client.security.getUserPrivileges [_security.get_user_privileges] +Get user privileges. + +Get the security privileges for the logged in user. +All users can use this API, but only to determine their own privileges. +To check the privileges of other users, you must use the run as feature. +To check whether a user has a specific list of privileges, use the has privileges API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user-privileges) + +```ts +client.security.getUserPrivileges() +``` + + +## client.security.getUserProfile [_security.get_user_profile] +Get a user profile. + +Get a user's profile using the unique profile ID. + +NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. +Individual users and external applications should not call this API directly. +Elastic reserves the right to change or remove this feature in future releases without prior notice. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user-profile) + +```ts +client.security.getUserProfile({ uid }) +``` + +### Arguments [_arguments_security.get_user_profile] + +#### Request (object) [_request_security.get_user_profile] +- **`uid` (string \| string[])**: A unique identifier for the user profile. +- **`data` (Optional, string \| string[])**: A list of filters for the `data` field of the profile document. +To return all content use `data=*`. +To return a subset of content use `data=` to retrieve content nested under the specified ``. +By default returns no `data` content. + +## client.security.grantApiKey [_security.grant_api_key] +Grant an API key. + +Create an API key on behalf of another user. +This API is similar to the create API keys API, however it creates the API key for a user that is different than the user that runs the API. +The caller must have authentication credentials for the user on whose behalf the API key will be created. +It is not possible to use this API to create an API key without that user's credentials. +The supported user authentication credential types are: + +* username and password +* Elasticsearch access tokens +* JWTs + +The user, for whom the authentication credentials is provided, can optionally "run as" (impersonate) another user. +In this case, the API key will be created on behalf of the impersonated user. + +This API is intended be used by applications that need to create and manage API keys for end users, but cannot guarantee that those users have permission to create API keys on their own behalf. +The API keys are created by the Elasticsearch API key service, which is automatically enabled. + +A successful grant API key API call returns a JSON structure that contains the API key, its unique id, and its name. +If applicable, it also returns expiration information for the API key in milliseconds. + +By default, API keys never expire. You can specify expiration information when you create the API keys. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-grant-api-key) + +```ts +client.security.grantApiKey({ api_key, grant_type }) +``` + +### Arguments [_arguments_security.grant_api_key] + +#### Request (object) [_request_security.grant_api_key] +- **`api_key` ({ name, expiration, role_descriptors, metadata })**: The API key. +- **`grant_type` (Enum("access_token" \| "password"))**: The type of grant. Supported grant types are: `access_token`, `password`. +- **`access_token` (Optional, string)**: The user's access token. +If you specify the `access_token` grant type, this parameter is required. +It is not valid with other grant types. +- **`username` (Optional, string)**: The user name that identifies the user. +If you specify the `password` grant type, this parameter is required. +It is not valid with other grant types. +- **`password` (Optional, string)**: The user's password. +If you specify the `password` grant type, this parameter is required. +It is not valid with other grant types. +- **`run_as` (Optional, string)**: The name of the user to be impersonated. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If 'true', Elasticsearch refreshes the affected shards to make this operation +visible to search. +If 'wait_for', it waits for a refresh to make this operation visible to search. +If 'false', nothing is done with refreshes. + +## client.security.hasPrivileges [_security.has_privileges] +Check user privileges. + +Determine whether the specified user has a specified list of privileges. +All users can use this API, but only to determine their own privileges. +To check the privileges of other users, you must use the run as feature. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-has-privileges) + +```ts +client.security.hasPrivileges({ ... }) +``` + +### Arguments [_arguments_security.has_privileges] + +#### Request (object) [_request_security.has_privileges] +- **`user` (Optional, string)**: Username +- **`application` (Optional, { application, privileges, resources }[])** +- **`cluster` (Optional, Enum("all" \| "cancel_task" \| "create_snapshot" \| "cross_cluster_replication" \| "cross_cluster_search" \| "delegate_pki" \| "grant_api_key" \| "manage" \| "manage_api_key" \| "manage_autoscaling" \| "manage_behavioral_analytics" \| "manage_ccr" \| "manage_data_frame_transforms" \| "manage_data_stream_global_retention" \| "manage_enrich" \| "manage_esql" \| "manage_ilm" \| "manage_index_templates" \| "manage_inference" \| "manage_ingest_pipelines" \| "manage_logstash_pipelines" \| "manage_ml" \| "manage_oidc" \| "manage_own_api_key" \| "manage_pipeline" \| "manage_rollup" \| "manage_saml" \| "manage_search_application" \| "manage_search_query_rules" \| "manage_search_synonyms" \| "manage_security" \| "manage_service_account" \| "manage_slm" \| "manage_token" \| "manage_transform" \| "manage_user_profile" \| "manage_watcher" \| "monitor" \| "monitor_data_frame_transforms" \| "monitor_data_stream_global_retention" \| "monitor_enrich" \| "monitor_esql" \| "monitor_inference" \| "monitor_ml" \| "monitor_rollup" \| "monitor_snapshot" \| "monitor_stats" \| "monitor_text_structure" \| "monitor_transform" \| "monitor_watcher" \| "none" \| "post_behavioral_analytics_event" \| "read_ccr" \| "read_fleet_secrets" \| "read_ilm" \| "read_pipeline" \| "read_security" \| "read_slm" \| "transport_client" \| "write_connector_secrets" \| "write_fleet_secrets")[])**: A list of the cluster privileges that you want to check. +- **`index` (Optional, { names, privileges, allow_restricted_indices }[])** + +## client.security.hasPrivilegesUserProfile [_security.has_privileges_user_profile] +Check user profile privileges. + +Determine whether the users associated with the specified user profile IDs have all the requested privileges. + +NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. +Elastic reserves the right to change or remove this feature in future releases without prior notice. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-has-privileges-user-profile) + +```ts +client.security.hasPrivilegesUserProfile({ uids, privileges }) +``` + +### Arguments [_arguments_security.has_privileges_user_profile] + +#### Request (object) [_request_security.has_privileges_user_profile] +- **`uids` (string[])**: A list of profile IDs. The privileges are checked for associated users of the profiles. +- **`privileges` ({ application, cluster, index })**: An object containing all the privileges to be checked. + +## client.security.invalidateApiKey [_security.invalidate_api_key] +Invalidate API keys. + +This API invalidates API keys created by the create API key or grant API key APIs. +Invalidated API keys fail authentication, but they can still be viewed using the get API key information and query API key information APIs, for at least the configured retention period, until they are automatically deleted. + +To use this API, you must have at least the `manage_security`, `manage_api_key`, or `manage_own_api_key` cluster privileges. +The `manage_security` privilege allows deleting any API key, including both REST and cross cluster API keys. +The `manage_api_key` privilege allows deleting any REST API key, but not cross cluster API keys. +The `manage_own_api_key` only allows deleting REST API keys that are owned by the user. +In addition, with the `manage_own_api_key` privilege, an invalidation request must be issued in one of the three formats: + +- Set the parameter `owner=true`. +- Or, set both `username` and `realm_name` to match the user's identity. +- Or, if the request is issued by an API key, that is to say an API key invalidates itself, specify its ID in the `ids` field. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-invalidate-api-key) + +```ts +client.security.invalidateApiKey({ ... }) +``` + +### Arguments [_arguments_security.invalidate_api_key] + +#### Request (object) [_request_security.invalidate_api_key] +- **`id` (Optional, string)** +- **`ids` (Optional, string[])**: A list of API key ids. +This parameter cannot be used with any of `name`, `realm_name`, or `username`. +- **`name` (Optional, string)**: An API key name. +This parameter cannot be used with any of `ids`, `realm_name` or `username`. +- **`owner` (Optional, boolean)**: Query API keys owned by the currently authenticated user. +The `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones. + +NOTE: At least one of `ids`, `name`, `username`, and `realm_name` must be specified if `owner` is `false`. +- **`realm_name` (Optional, string)**: The name of an authentication realm. +This parameter cannot be used with either `ids` or `name`, or when `owner` flag is set to `true`. +- **`username` (Optional, string)**: The username of a user. +This parameter cannot be used with either `ids` or `name` or when `owner` flag is set to `true`. + +## client.security.invalidateToken [_security.invalidate_token] +Invalidate a token. + +The access tokens returned by the get token API have a finite period of time for which they are valid. +After that time period, they can no longer be used. +The time period is defined by the `xpack.security.authc.token.timeout` setting. + +The refresh tokens returned by the get token API are only valid for 24 hours. +They can also be used exactly once. +If you want to invalidate one or more access or refresh tokens immediately, use this invalidate token API. + +NOTE: While all parameters are optional, at least one of them is required. +More specifically, either one of `token` or `refresh_token` parameters is required. +If none of these two are specified, then `realm_name` and/or `username` need to be specified. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-invalidate-token) + +```ts +client.security.invalidateToken({ ... }) +``` + +### Arguments [_arguments_security.invalidate_token] + +#### Request (object) [_request_security.invalidate_token] +- **`token` (Optional, string)**: An access token. +This parameter cannot be used if any of `refresh_token`, `realm_name`, or `username` are used. +- **`refresh_token` (Optional, string)**: A refresh token. +This parameter cannot be used if any of `refresh_token`, `realm_name`, or `username` are used. +- **`realm_name` (Optional, string)**: The name of an authentication realm. +This parameter cannot be used with either `refresh_token` or `token`. +- **`username` (Optional, string)**: The username of a user. +This parameter cannot be used with either `refresh_token` or `token`. + +## client.security.oidcAuthenticate [_security.oidc_authenticate] +Authenticate OpenID Connect. + +Exchange an OpenID Connect authentication response message for an Elasticsearch internal access token and refresh token that can be subsequently used for authentication. + +Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. +These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-oidc-authenticate) + +```ts +client.security.oidcAuthenticate({ nonce, redirect_uri, state }) +``` + +### Arguments [_arguments_security.oidc_authenticate] + +#### Request (object) [_request_security.oidc_authenticate] +- **`nonce` (string)**: Associate a client session with an ID token and mitigate replay attacks. +This value needs to be the same as the one that was provided to the `/_security/oidc/prepare` API or the one that was generated by Elasticsearch and included in the response to that call. +- **`redirect_uri` (string)**: The URL to which the OpenID Connect Provider redirected the User Agent in response to an authentication request after a successful authentication. +This URL must be provided as-is (URL encoded), taken from the body of the response or as the value of a location header in the response from the OpenID Connect Provider. +- **`state` (string)**: Maintain state between the authentication request and the response. +This value needs to be the same as the one that was provided to the `/_security/oidc/prepare` API or the one that was generated by Elasticsearch and included in the response to that call. +- **`realm` (Optional, string)**: The name of the OpenID Connect realm. +This property is useful in cases where multiple realms are defined. + +## client.security.oidcLogout [_security.oidc_logout] +Logout of OpenID Connect. + +Invalidate an access token and a refresh token that were generated as a response to the `/_security/oidc/authenticate` API. + +If the OpenID Connect authentication realm in Elasticsearch is accordingly configured, the response to this call will contain a URI pointing to the end session endpoint of the OpenID Connect Provider in order to perform single logout. + +Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. +These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-oidc-logout) + +```ts +client.security.oidcLogout({ token }) +``` + +### Arguments [_arguments_security.oidc_logout] + +#### Request (object) [_request_security.oidc_logout] +- **`token` (string)**: The access token to be invalidated. +- **`refresh_token` (Optional, string)**: The refresh token to be invalidated. + +## client.security.oidcPrepareAuthentication [_security.oidc_prepare_authentication] +Prepare OpenID connect authentication. + +Create an oAuth 2.0 authentication request as a URL string based on the configuration of the OpenID Connect authentication realm in Elasticsearch. + +The response of this API is a URL pointing to the Authorization Endpoint of the configured OpenID Connect Provider, which can be used to redirect the browser of the user in order to continue the authentication process. + +Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. +These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-oidc-prepare-authentication) + +```ts +client.security.oidcPrepareAuthentication({ ... }) +``` + +### Arguments [_arguments_security.oidc_prepare_authentication] + +#### Request (object) [_request_security.oidc_prepare_authentication] +- **`iss` (Optional, string)**: In the case of a third party initiated single sign on, this is the issuer identifier for the OP that the RP is to send the authentication request to. +It cannot be specified when *realm* is specified. +One of *realm* or *iss* is required. +- **`login_hint` (Optional, string)**: In the case of a third party initiated single sign on, it is a string value that is included in the authentication request as the *login_hint* parameter. +This parameter is not valid when *realm* is specified. +- **`nonce` (Optional, string)**: The value used to associate a client session with an ID token and to mitigate replay attacks. +If the caller of the API does not provide a value, Elasticsearch will generate one with sufficient entropy and return it in the response. +- **`realm` (Optional, string)**: The name of the OpenID Connect realm in Elasticsearch the configuration of which should be used in order to generate the authentication request. +It cannot be specified when *iss* is specified. +One of *realm* or *iss* is required. +- **`state` (Optional, string)**: The value used to maintain state between the authentication request and the response, typically used as a Cross-Site Request Forgery mitigation. +If the caller of the API does not provide a value, Elasticsearch will generate one with sufficient entropy and return it in the response. + +## client.security.putPrivileges [_security.put_privileges] +Create or update application privileges. + +To use this API, you must have one of the following privileges: + +* The `manage_security` cluster privilege (or a greater privilege such as `all`). +* The "Manage Application Privileges" global privilege for the application being referenced in the request. + +Application names are formed from a prefix, with an optional suffix that conform to the following rules: + +* The prefix must begin with a lowercase ASCII letter. +* The prefix must contain only ASCII letters or digits. +* The prefix must be at least 3 characters long. +* If the suffix exists, it must begin with either a dash `-` or `_`. +* The suffix cannot contain any of the following characters: `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, `*`. +* No part of the name can contain whitespace. + +Privilege names must begin with a lowercase ASCII letter and must contain only ASCII letters and digits along with the characters `_`, `-`, and `.`. + +Action names can contain any number of printable ASCII characters and must contain at least one of the following characters: `/`, `*`, `:`. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-privileges) + +```ts +client.security.putPrivileges({ ... }) +``` + +### Arguments [_arguments_security.put_privileges] + +#### Request (object) [_request_security.put_privileges] +- **`privileges` (Optional, Record>)** +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +## client.security.putRole [_security.put_role] +Create or update roles. + +The role management APIs are generally the preferred way to manage roles in the native realm, rather than using file-based role management. +The create or update roles API cannot update roles that are defined in roles files. +File-based role management is not available in Elastic Serverless. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-role) + +```ts +client.security.putRole({ name }) +``` + +### Arguments [_arguments_security.put_role] + +#### Request (object) [_request_security.put_role] +- **`name` (string)**: The name of the role that is being created or updated. On Elasticsearch Serverless, the role name must begin with a letter or digit and can only contain letters, digits and the characters '_', '-', and '.'. Each role must have a unique name, as this will serve as the identifier for that role. +- **`applications` (Optional, { application, privileges, resources }[])**: A list of application privilege entries. +- **`cluster` (Optional, Enum("all" \| "cancel_task" \| "create_snapshot" \| "cross_cluster_replication" \| "cross_cluster_search" \| "delegate_pki" \| "grant_api_key" \| "manage" \| "manage_api_key" \| "manage_autoscaling" \| "manage_behavioral_analytics" \| "manage_ccr" \| "manage_data_frame_transforms" \| "manage_data_stream_global_retention" \| "manage_enrich" \| "manage_esql" \| "manage_ilm" \| "manage_index_templates" \| "manage_inference" \| "manage_ingest_pipelines" \| "manage_logstash_pipelines" \| "manage_ml" \| "manage_oidc" \| "manage_own_api_key" \| "manage_pipeline" \| "manage_rollup" \| "manage_saml" \| "manage_search_application" \| "manage_search_query_rules" \| "manage_search_synonyms" \| "manage_security" \| "manage_service_account" \| "manage_slm" \| "manage_token" \| "manage_transform" \| "manage_user_profile" \| "manage_watcher" \| "monitor" \| "monitor_data_frame_transforms" \| "monitor_data_stream_global_retention" \| "monitor_enrich" \| "monitor_esql" \| "monitor_inference" \| "monitor_ml" \| "monitor_rollup" \| "monitor_snapshot" \| "monitor_stats" \| "monitor_text_structure" \| "monitor_transform" \| "monitor_watcher" \| "none" \| "post_behavioral_analytics_event" \| "read_ccr" \| "read_fleet_secrets" \| "read_ilm" \| "read_pipeline" \| "read_security" \| "read_slm" \| "transport_client" \| "write_connector_secrets" \| "write_fleet_secrets")[])**: A list of cluster privileges. These privileges define the cluster-level actions for users with this role. +- **`global` (Optional, Record)**: An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges. +- **`indices` (Optional, { field_security, names, privileges, query, allow_restricted_indices }[])**: A list of indices permissions entries. +- **`remote_indices` (Optional, { clusters, field_security, names, privileges, query, allow_restricted_indices }[])**: A list of remote indices permissions entries. + +NOTE: Remote indices are effective for remote clusters configured with the API key based model. +They have no effect for remote clusters configured with the certificate based model. +- **`remote_cluster` (Optional, { clusters, privileges }[])**: A list of remote cluster permissions entries. +- **`metadata` (Optional, Record)**: Optional metadata. Within the metadata object, keys that begin with an underscore (`_`) are reserved for system use. +- **`run_as` (Optional, string[])**: A list of users that the owners of this role can impersonate. *Note*: in Serverless, the run-as feature is disabled. For API compatibility, you can still specify an empty `run_as` field, but a non-empty list will be rejected. +- **`description` (Optional, string)**: Optional description of the role descriptor +- **`transient_metadata` (Optional, Record)**: Indicates roles that might be incompatible with the current cluster license, specifically roles with document and field level security. When the cluster license doesn’t allow certain features for a given role, this parameter is updated dynamically to list the incompatible features. If `enabled` is `false`, the role is ignored, but is still listed in the response from the authenticate API. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +## client.security.putRoleMapping [_security.put_role_mapping] +Create or update role mappings. + +Role mappings define which roles are assigned to each user. +Each mapping has rules that identify users and a list of roles that are granted to those users. +The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The create or update role mappings API cannot update role mappings that are defined in role mapping files. + +NOTE: This API does not create roles. Rather, it maps users to existing roles. +Roles can be created by using the create or update roles API or roles files. + +**Role templates** + +The most common use for role mappings is to create a mapping from a known value on the user to a fixed role name. +For example, all users in the `cn=admin,dc=example,dc=com` LDAP group should be given the superuser role in Elasticsearch. +The `roles` field is used for this purpose. + +For more complex needs, it is possible to use Mustache templates to dynamically determine the names of the roles that should be granted to the user. +The `role_templates` field is used for this purpose. + +NOTE: To use role templates successfully, the relevant scripting feature must be enabled. +Otherwise, all attempts to create a role mapping with role templates fail. + +All of the user fields that are available in the role mapping rules are also available in the role templates. +Thus it is possible to assign a user to a role that reflects their username, their groups, or the name of the realm to which they authenticated. + +By default a template is evaluated to produce a single string that is the name of the role which should be assigned to the user. +If the format of the template is set to "json" then the template is expected to produce a JSON string or an array of JSON strings for the role names. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-role-mapping) + +```ts +client.security.putRoleMapping({ name }) +``` + +### Arguments [_arguments_security.put_role_mapping] + +#### Request (object) [_request_security.put_role_mapping] +- **`name` (string)**: The distinct name that identifies the role mapping. +The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. +- **`enabled` (Optional, boolean)**: Mappings that have `enabled` set to `false` are ignored when role mapping is performed. +- **`metadata` (Optional, Record)**: Additional metadata that helps define which roles are assigned to each user. +Within the metadata object, keys beginning with `_` are reserved for system usage. +- **`roles` (Optional, string[])**: A list of role names that are granted to the users that match the role mapping rules. +Exactly one of `roles` or `role_templates` must be specified. +- **`role_templates` (Optional, { format, template }[])**: A list of Mustache templates that will be evaluated to determine the roles names that should granted to the users that match the role mapping rules. +Exactly one of `roles` or `role_templates` must be specified. +- **`rules` (Optional, { any, all, field, except })**: The rules that determine which users should be matched by the mapping. +A rule is a logical condition that is expressed by using a JSON DSL. +- **`run_as` (Optional, string[])** +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +## client.security.putUser [_security.put_user] +Create or update users. + +Add and update users in the native realm. +A password is required for adding a new user but is optional when updating an existing user. +To change a user's password without updating any other fields, use the change password API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-user) + +```ts +client.security.putUser({ username }) +``` + +### Arguments [_arguments_security.put_user] + +#### Request (object) [_request_security.put_user] +- **`username` (string)**: An identifier for the user. + +NOTE: Usernames must be at least 1 and no more than 507 characters. +They can contain alphanumeric characters (a-z, A-Z, 0-9), spaces, punctuation, and printable symbols in the Basic Latin (ASCII) block. +Leading or trailing whitespace is not allowed. +- **`email` (Optional, string \| null)**: The email of the user. +- **`full_name` (Optional, string \| null)**: The full name of the user. +- **`metadata` (Optional, Record)**: Arbitrary metadata that you want to associate with the user. +- **`password` (Optional, string)**: The user's password. +Passwords must be at least 6 characters long. +When adding a user, one of `password` or `password_hash` is required. +When updating an existing user, the password is optional, so that other fields on the user (such as their roles) may be updated without modifying the user's password +- **`password_hash` (Optional, string)**: A hash of the user's password. +This must be produced using the same hashing algorithm as has been configured for password storage. +For more details, see the explanation of the `xpack.security.authc.password_hashing.algorithm` setting in the user cache and password hash algorithm documentation. +Using this parameter allows the client to pre-hash the password for performance and/or confidentiality reasons. +The `password` parameter and the `password_hash` parameter cannot be used in the same request. +- **`roles` (Optional, string[])**: A set of roles the user has. +The roles determine the user's access permissions. +To create a user without any roles, specify an empty list (`[]`). +- **`enabled` (Optional, boolean)**: Specifies whether the user is enabled. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: Valid values are `true`, `false`, and `wait_for`. +These values have the same meaning as in the index API, but the default value for this API is true. + +## client.security.queryApiKeys [_security.query_api_keys] +Find API keys with a query. + +Get a paginated list of API keys and their information. +You can optionally filter the results with a query. + +To use this API, you must have at least the `manage_own_api_key` or the `read_security` cluster privileges. +If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own. +If you have the `read_security`, `manage_api_key`, or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership. +Refer to the linked documentation for examples of how to find API keys: + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-api-keys) + +```ts +client.security.queryApiKeys({ ... }) +``` + +### Arguments [_arguments_security.query_api_keys] + +#### Request (object) [_request_security.query_api_keys] +- **`aggregations` (Optional, Record)**: Any aggregations to run over the corpus of returned API keys. +Aggregations and queries work together. Aggregations are computed only on the API keys that match the query. +This supports only a subset of aggregation types, namely: `terms`, `range`, `date_range`, `missing`, +`cardinality`, `value_count`, `composite`, `filter`, and `filters`. +Additionally, aggregations only run over the same subset of fields that query works with. +- **`query` (Optional, { bool, exists, ids, match, match_all, prefix, range, simple_query_string, term, terms, wildcard })**: A query to filter which API keys to return. +If the query parameter is missing, it is equivalent to a `match_all` query. +The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, +`ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. +You can query the following public information associated with an API key: `id`, `type`, `name`, +`creation`, `expiration`, `invalidated`, `invalidation`, `username`, `realm`, and `metadata`. + +NOTE: The queryable string values associated with API keys are internally mapped as keywords. +Consequently, if no `analyzer` parameter is specified for a `match` query, then the provided match query string is interpreted as a single keyword value. +Such a match query is hence equivalent to a `term` query. +- **`from` (Optional, number)**: The starting document offset. +It must not be negative. +By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. +To page through more hits, use the `search_after` parameter. +- **`sort` (Optional, string \| { _score, _doc, _geo_distance, _script } \| string \| { _score, _doc, _geo_distance, _script }[])**: The sort definition. +Other than `id`, all public fields of an API key are eligible for sorting. +In addition, sort can also be applied to the `_doc` field to sort by index order. +- **`size` (Optional, number)**: The number of hits to return. +It must not be negative. +The `size` parameter can be set to `0`, in which case no API key matches are returned, only the aggregation results. +By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. +To page through more hits, use the `search_after` parameter. +- **`search_after` (Optional, number \| number \| string \| boolean \| null[])**: The search after definition. +- **`with_limited_by` (Optional, boolean)**: Return the snapshot of the owner user's role descriptors associated with the API key. +An API key's actual permission is the intersection of its assigned role descriptors and the owner user's role descriptors (effectively limited by it). +An API key cannot retrieve any API key’s limited-by role descriptors (including itself) unless it has `manage_api_key` or higher privileges. +- **`with_profile_uid` (Optional, boolean)**: Determines whether to also retrieve the profile UID for the API key owner principal. +If it exists, the profile UID is returned under the `profile_uid` response field for each API key. +- **`typed_keys` (Optional, boolean)**: Determines whether aggregation names are prefixed by their respective types in the response. + +## client.security.queryRole [_security.query_role] +Find roles with a query. + +Get roles in a paginated manner. +The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. +The query roles API does not retrieve roles that are defined in roles files, nor built-in ones. +You can optionally filter the results with a query. +Also, the results can be paginated and sorted. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-role) + +```ts +client.security.queryRole({ ... }) +``` + +### Arguments [_arguments_security.query_role] + +#### Request (object) [_request_security.query_role] +- **`query` (Optional, { bool, exists, ids, match, match_all, prefix, range, simple_query_string, term, terms, wildcard })**: A query to filter which roles to return. +If the query parameter is missing, it is equivalent to a `match_all` query. +The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, +`ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. +You can query the following information associated with roles: `name`, `description`, `metadata`, +`applications.application`, `applications.privileges`, and `applications.resources`. +- **`from` (Optional, number)**: The starting document offset. +It must not be negative. +By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. +To page through more hits, use the `search_after` parameter. +- **`sort` (Optional, string \| { _score, _doc, _geo_distance, _script } \| string \| { _score, _doc, _geo_distance, _script }[])**: The sort definition. +You can sort on `name`, `description`, `metadata`, `applications.application`, `applications.privileges`, +and `applications.resources`. +In addition, sort can also be applied to the `_doc` field to sort by index order. +- **`size` (Optional, number)**: The number of hits to return. +It must not be negative. +By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. +To page through more hits, use the `search_after` parameter. +- **`search_after` (Optional, number \| number \| string \| boolean \| null[])**: The search after definition. + +## client.security.queryUser [_security.query_user] +Find users with a query. + +Get information for users in a paginated manner. +You can optionally filter the results with a query. + +NOTE: As opposed to the get user API, built-in users are excluded from the result. +This API is only for native users. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-user) + +```ts +client.security.queryUser({ ... }) +``` + +### Arguments [_arguments_security.query_user] + +#### Request (object) [_request_security.query_user] +- **`query` (Optional, { ids, bool, exists, match, match_all, prefix, range, simple_query_string, term, terms, wildcard })**: A query to filter which users to return. +If the query parameter is missing, it is equivalent to a `match_all` query. +The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, +`ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. +You can query the following information associated with user: `username`, `roles`, `enabled`, `full_name`, and `email`. +- **`from` (Optional, number)**: The starting document offset. +It must not be negative. +By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. +To page through more hits, use the `search_after` parameter. +- **`sort` (Optional, string \| { _score, _doc, _geo_distance, _script } \| string \| { _score, _doc, _geo_distance, _script }[])**: The sort definition. +Fields eligible for sorting are: `username`, `roles`, `enabled`. +In addition, sort can also be applied to the `_doc` field to sort by index order. +- **`size` (Optional, number)**: The number of hits to return. +It must not be negative. +By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. +To page through more hits, use the `search_after` parameter. +- **`search_after` (Optional, number \| number \| string \| boolean \| null[])**: The search after definition +- **`with_profile_uid` (Optional, boolean)**: Determines whether to retrieve the user profile UID, if it exists, for the users. + +## client.security.samlAuthenticate [_security.saml_authenticate] +Authenticate SAML. + +Submit a SAML response message to Elasticsearch for consumption. + +NOTE: This API is intended for use by custom web applications other than Kibana. +If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. + +The SAML message that is submitted can be: + +* A response to a SAML authentication request that was previously created using the SAML prepare authentication API. +* An unsolicited SAML message in the case of an IdP-initiated single sign-on (SSO) flow. + +In either case, the SAML message needs to be a base64 encoded XML document with a root element of ``. + +After successful validation, Elasticsearch responds with an Elasticsearch internal access token and refresh token that can be subsequently used for authentication. +This API endpoint essentially exchanges SAML responses that indicate successful authentication in the IdP for Elasticsearch access and refresh tokens, which can be used for authentication against Elasticsearch. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-authenticate) + +```ts +client.security.samlAuthenticate({ content, ids }) +``` + +### Arguments [_arguments_security.saml_authenticate] + +#### Request (object) [_request_security.saml_authenticate] +- **`content` (string)**: The SAML response as it was sent by the user's browser, usually a Base64 encoded XML document. +- **`ids` (string \| string[])**: A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user. +- **`realm` (Optional, string)**: The name of the realm that should authenticate the SAML response. Useful in cases where many SAML realms are defined. + +## client.security.samlCompleteLogout [_security.saml_complete_logout] +Logout of SAML completely. + +Verifies the logout response sent from the SAML IdP. + +NOTE: This API is intended for use by custom web applications other than Kibana. +If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. + +The SAML IdP may send a logout response back to the SP after handling the SP-initiated SAML Single Logout. +This API verifies the response by ensuring the content is relevant and validating its signature. +An empty response is returned if the verification process is successful. +The response can be sent by the IdP with either the HTTP-Redirect or the HTTP-Post binding. +The caller of this API must prepare the request accordingly so that this API can handle either of them. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-complete-logout) + +```ts +client.security.samlCompleteLogout({ realm, ids }) +``` + +### Arguments [_arguments_security.saml_complete_logout] + +#### Request (object) [_request_security.saml_complete_logout] +- **`realm` (string)**: The name of the SAML realm in Elasticsearch for which the configuration is used to verify the logout response. +- **`ids` (string \| string[])**: A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user. +- **`query_string` (Optional, string)**: If the SAML IdP sends the logout response with the HTTP-Redirect binding, this field must be set to the query string of the redirect URI. +- **`content` (Optional, string)**: If the SAML IdP sends the logout response with the HTTP-Post binding, this field must be set to the value of the SAMLResponse form parameter from the logout response. + +## client.security.samlInvalidate [_security.saml_invalidate] +Invalidate SAML. + +Submit a SAML LogoutRequest message to Elasticsearch for consumption. + +NOTE: This API is intended for use by custom web applications other than Kibana. +If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. + +The logout request comes from the SAML IdP during an IdP initiated Single Logout. +The custom web application can use this API to have Elasticsearch process the `LogoutRequest`. +After successful validation of the request, Elasticsearch invalidates the access token and refresh token that corresponds to that specific SAML principal and provides a URL that contains a SAML LogoutResponse message. +Thus the user can be redirected back to their IdP. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-invalidate) + +```ts +client.security.samlInvalidate({ query_string }) +``` + +### Arguments [_arguments_security.saml_invalidate] + +#### Request (object) [_request_security.saml_invalidate] +- **`query_string` (string)**: The query part of the URL that the user was redirected to by the SAML IdP to initiate the Single Logout. +This query should include a single parameter named `SAMLRequest` that contains a SAML logout request that is deflated and Base64 encoded. +If the SAML IdP has signed the logout request, the URL should include two extra parameters named `SigAlg` and `Signature` that contain the algorithm used for the signature and the signature value itself. +In order for Elasticsearch to be able to verify the IdP's signature, the value of the `query_string` field must be an exact match to the string provided by the browser. +The client application must not attempt to parse or process the string in any way. +- **`acs` (Optional, string)**: The Assertion Consumer Service URL that matches the one of the SAML realm in Elasticsearch that should be used. You must specify either this parameter or the `realm` parameter. +- **`realm` (Optional, string)**: The name of the SAML realm in Elasticsearch the configuration. You must specify either this parameter or the `acs` parameter. + +## client.security.samlLogout [_security.saml_logout] +Logout of SAML. + +Submits a request to invalidate an access token and refresh token. + +NOTE: This API is intended for use by custom web applications other than Kibana. +If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. + +This API invalidates the tokens that were generated for a user by the SAML authenticate API. +If the SAML realm in Elasticsearch is configured accordingly and the SAML IdP supports this, the Elasticsearch response contains a URL to redirect the user to the IdP that contains a SAML logout request (starting an SP-initiated SAML Single Logout). + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-logout) + +```ts +client.security.samlLogout({ token }) +``` + +### Arguments [_arguments_security.saml_logout] + +#### Request (object) [_request_security.saml_logout] +- **`token` (string)**: The access token that was returned as a response to calling the SAML authenticate API. +Alternatively, the most recent token that was received after refreshing the original one by using a `refresh_token`. +- **`refresh_token` (Optional, string)**: The refresh token that was returned as a response to calling the SAML authenticate API. +Alternatively, the most recent refresh token that was received after refreshing the original access token. + +## client.security.samlPrepareAuthentication [_security.saml_prepare_authentication] +Prepare SAML authentication. + +Create a SAML authentication request (``) as a URL string based on the configuration of the respective SAML realm in Elasticsearch. + +NOTE: This API is intended for use by custom web applications other than Kibana. +If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. + +This API returns a URL pointing to the SAML Identity Provider. +You can use the URL to redirect the browser of the user in order to continue the authentication process. +The URL includes a single parameter named `SAMLRequest`, which contains a SAML Authentication request that is deflated and Base64 encoded. +If the configuration dictates that SAML authentication requests should be signed, the URL has two extra parameters named `SigAlg` and `Signature`. +These parameters contain the algorithm used for the signature and the signature value itself. +It also returns a random string that uniquely identifies this SAML Authentication request. +The caller of this API needs to store this identifier as it needs to be used in a following step of the authentication process. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-prepare-authentication) + +```ts +client.security.samlPrepareAuthentication({ ... }) +``` + +### Arguments [_arguments_security.saml_prepare_authentication] + +#### Request (object) [_request_security.saml_prepare_authentication] +- **`acs` (Optional, string)**: The Assertion Consumer Service URL that matches the one of the SAML realms in Elasticsearch. +The realm is used to generate the authentication request. You must specify either this parameter or the `realm` parameter. +- **`realm` (Optional, string)**: The name of the SAML realm in Elasticsearch for which the configuration is used to generate the authentication request. +You must specify either this parameter or the `acs` parameter. +- **`relay_state` (Optional, string)**: A string that will be included in the redirect URL that this API returns as the `RelayState` query parameter. +If the Authentication Request is signed, this value is used as part of the signature computation. + +## client.security.samlServiceProviderMetadata [_security.saml_service_provider_metadata] +Create SAML service provider metadata. + +Generate SAML metadata for a SAML 2.0 Service Provider. + +The SAML 2.0 specification provides a mechanism for Service Providers to describe their capabilities and configuration using a metadata file. +This API generates Service Provider metadata based on the configuration of a SAML realm in Elasticsearch. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-service-provider-metadata) + +```ts +client.security.samlServiceProviderMetadata({ realm_name }) +``` + +### Arguments [_arguments_security.saml_service_provider_metadata] + +#### Request (object) [_request_security.saml_service_provider_metadata] +- **`realm_name` (string)**: The name of the SAML realm in Elasticsearch. + +## client.security.suggestUserProfiles [_security.suggest_user_profiles] +Suggest a user profile. + +Get suggestions for user profiles that match specified search criteria. + +NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. +Individual users and external applications should not call this API directly. +Elastic reserves the right to change or remove this feature in future releases without prior notice. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-suggest-user-profiles) + +```ts +client.security.suggestUserProfiles({ ... }) +``` + +### Arguments [_arguments_security.suggest_user_profiles] + +#### Request (object) [_request_security.suggest_user_profiles] +- **`name` (Optional, string)**: A query string used to match name-related fields in user profile documents. +Name-related fields are the user's `username`, `full_name`, and `email`. +- **`size` (Optional, number)**: The number of profiles to return. +- **`data` (Optional, string \| string[])**: A list of filters for the `data` field of the profile document. +To return all content use `data=*`. +To return a subset of content, use `data=` to retrieve content nested under the specified ``. +By default, the API returns no `data` content. +It is an error to specify `data` as both the query parameter and the request body field. +- **`hint` (Optional, { uids, labels })**: Extra search criteria to improve relevance of the suggestion result. +Profiles matching the spcified hint are ranked higher in the response. +Profiles not matching the hint aren't excluded from the response as long as the profile matches the `name` field query. + +## client.security.updateApiKey [_security.update_api_key] +Update an API key. + +Update attributes of an existing API key. +This API supports updates to an API key's access scope, expiration, and metadata. + +To use this API, you must have at least the `manage_own_api_key` cluster privilege. +Users can only update API keys that they created or that were granted to them. +To update another user’s API key, use the `run_as` feature to submit a request on behalf of another user. + +IMPORTANT: It's not possible to use an API key as the authentication credential for this API. The owner user’s credentials are required. + +Use this API to update API keys created by the create API key or grant API Key APIs. +If you need to apply the same update to many API keys, you can use the bulk update API keys API to reduce overhead. +It's not possible to update expired API keys or API keys that have been invalidated by the invalidate API key API. + +The access scope of an API key is derived from the `role_descriptors` you specify in the request and a snapshot of the owner user's permissions at the time of the request. +The snapshot of the owner's permissions is updated automatically on every call. + +IMPORTANT: If you don't specify `role_descriptors` in the request, a call to this API might still change the API key's access scope. +This change can occur if the owner user's permissions have changed since the API key was created or last modified. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-api-key) + +```ts +client.security.updateApiKey({ id }) +``` + +### Arguments [_arguments_security.update_api_key] + +#### Request (object) [_request_security.update_api_key] +- **`id` (string)**: The ID of the API key to update. +- **`role_descriptors` (Optional, Record)**: The role descriptors to assign to this API key. +The API key's effective permissions are an intersection of its assigned privileges and the point in time snapshot of permissions of the owner user. +You can assign new privileges by specifying them in this parameter. +To remove assigned privileges, you can supply an empty `role_descriptors` parameter, that is to say, an empty object `{}`. +If an API key has no assigned privileges, it inherits the owner user's full permissions. +The snapshot of the owner's permissions is always updated, whether you supply the `role_descriptors` parameter or not. +The structure of a role descriptor is the same as the request for the create API keys API. +- **`metadata` (Optional, Record)**: Arbitrary metadata that you want to associate with the API key. +It supports a nested data structure. +Within the metadata object, keys beginning with `_` are reserved for system usage. +When specified, this value fully replaces the metadata previously associated with the API key. +- **`expiration` (Optional, string \| -1 \| 0)**: The expiration time for the API key. +By default, API keys never expire. +This property can be omitted to leave the expiration unchanged. + +## client.security.updateCrossClusterApiKey [_security.update_cross_cluster_api_key] +Update a cross-cluster API key. + +Update the attributes of an existing cross-cluster API key, which is used for API key based remote cluster access. + +To use this API, you must have at least the `manage_security` cluster privilege. +Users can only update API keys that they created. +To update another user's API key, use the `run_as` feature to submit a request on behalf of another user. + +IMPORTANT: It's not possible to use an API key as the authentication credential for this API. +To update an API key, the owner user's credentials are required. + +It's not possible to update expired API keys, or API keys that have been invalidated by the invalidate API key API. + +This API supports updates to an API key's access scope, metadata, and expiration. +The owner user's information, such as the `username` and `realm`, is also updated automatically on every call. + +NOTE: This API cannot update REST API keys, which should be updated by either the update API key or bulk update API keys API. + +To learn more about how to use this API, refer to the [Update cross cluter API key API examples page](https://www.elastic.co/docs/reference/elasticsearch/rest-apis/update-cc-api-key-examples). + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-cross-cluster-api-key) + +```ts +client.security.updateCrossClusterApiKey({ id, access }) +``` + +### Arguments [_arguments_security.update_cross_cluster_api_key] + +#### Request (object) [_request_security.update_cross_cluster_api_key] +- **`id` (string)**: The ID of the cross-cluster API key to update. +- **`access` ({ replication, search })**: The access to be granted to this API key. +The access is composed of permissions for cross cluster search and cross cluster replication. +At least one of them must be specified. +When specified, the new access assignment fully replaces the previously assigned access. +- **`expiration` (Optional, string \| -1 \| 0)**: The expiration time for the API key. +By default, API keys never expire. This property can be omitted to leave the value unchanged. +- **`metadata` (Optional, Record)**: Arbitrary metadata that you want to associate with the API key. +It supports nested data structure. +Within the metadata object, keys beginning with `_` are reserved for system usage. +When specified, this information fully replaces metadata previously associated with the API key. +- **`certificate_identity` (Optional, string)**: The certificate identity to associate with this API key. +This field is used to restrict the API key to connections authenticated by a specific TLS certificate. +The value should match the certificate's distinguished name (DN) pattern. +When specified, this fully replaces any previously assigned certificate identity. +To clear an existing certificate identity, explicitly set this field to `null`. +When omitted, the existing certificate identity remains unchanged. + +## client.security.updateSettings [_security.update_settings] +Update security index settings. + +Update the user-configurable settings for the security internal index (`.security` and associated indices). Only a subset of settings are allowed to be modified. This includes `index.auto_expand_replicas` and `index.number_of_replicas`. + +NOTE: If `index.auto_expand_replicas` is set, `index.number_of_replicas` will be ignored during updates. + +If a specific index is not in use on the system and settings are provided for it, the request will be rejected. +This API does not yet support configuring the settings for indices before they are in use. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-settings) + +```ts +client.security.updateSettings({ ... }) +``` + +### Arguments [_arguments_security.update_settings] + +#### Request (object) [_request_security.update_settings] +- **`security` (Optional, { index })**: Settings for the index used for most security configuration, including native realm users and roles configured with the API. +- **`security-profile` (Optional, { index })**: Settings for the index used to store profile information. +- **`security-tokens` (Optional, { index })**: Settings for the index used to store tokens. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.security.updateUserProfileData [_security.update_user_profile_data] +Update user profile data. + +Update specific data for the user profile that is associated with a unique ID. + +NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. +Individual users and external applications should not call this API directly. +Elastic reserves the right to change or remove this feature in future releases without prior notice. + +To use this API, you must have one of the following privileges: + +* The `manage_user_profile` cluster privilege. +* The `update_profile_data` global privilege for the namespaces that are referenced in the request. + +This API updates the `labels` and `data` fields of an existing user profile document with JSON objects. +New keys and their values are added to the profile document and conflicting keys are replaced by data that's included in the request. + +For both labels and data, content is namespaced by the top-level fields. +The `update_profile_data` global privilege grants privileges for updating only the allowed namespaces. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-user-profile-data) + +```ts +client.security.updateUserProfileData({ uid }) +``` + +### Arguments [_arguments_security.update_user_profile_data] + +#### Request (object) [_request_security.update_user_profile_data] +- **`uid` (string)**: A unique identifier for the user profile. +- **`labels` (Optional, Record)**: Searchable data that you want to associate with the user profile. +This field supports a nested data structure. +Within the labels object, top-level keys cannot begin with an underscore (`_`) or contain a period (`.`). +- **`data` (Optional, Record)**: Non-searchable data that you want to associate with the user profile. +This field supports a nested data structure. +Within the `data` object, top-level keys cannot begin with an underscore (`_`) or contain a period (`.`). +The data object is not searchable, but can be retrieved with the get user profile API. +- **`if_seq_no` (Optional, number)**: Only perform the operation if the document has this sequence number. +- **`if_primary_term` (Optional, number)**: Only perform the operation if the document has this primary term. +- **`refresh` (Optional, Enum(true \| false \| "wait_for"))**: If 'true', Elasticsearch refreshes the affected shards to make this operation +visible to search. +If 'wait_for', it waits for a refresh to make this operation visible to search. +If 'false', nothing is done with refreshes. + +## client.simulate.ingest [_simulate.ingest] +Simulate data ingestion. +Run ingest pipelines against a set of provided documents, optionally with substitute pipeline definitions, to simulate ingesting data into an index. + +This API is meant to be used for troubleshooting or pipeline development, as it does not actually index any data into Elasticsearch. + +The API runs the default and final pipeline for that index against a set of documents provided in the body of the request. +If a pipeline contains a reroute processor, it follows that reroute processor to the new index, running that index's pipelines as well the same way that a non-simulated ingest would. +No data is indexed into Elasticsearch. +Instead, the transformed document is returned, along with the list of pipelines that have been run and the name of the index where the document would have been indexed if this were not a simulation. +The transformed document is validated against the mappings that would apply to this index, and any validation error is reported in the result. + +This API differs from the simulate pipeline API in that you specify a single pipeline for that API, and it runs only that one pipeline. +The simulate pipeline API is more useful for developing a single pipeline, while the simulate ingest API is more useful for troubleshooting the interaction of the various pipelines that get applied when ingesting into an index. + +By default, the pipeline definitions that are currently in the system are used. +However, you can supply substitute pipeline definitions in the body of the request. +These will be used in place of the pipeline definitions that are already in the system. This can be used to replace existing pipeline definitions or to create new ones. The pipeline substitutions are used only within this request. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-simulate-ingest) + +```ts +client.simulate.ingest({ docs }) +``` + +### Arguments [_arguments_simulate.ingest] + +#### Request (object) [_request_simulate.ingest] +- **`docs` ({ _id, _index, _source }[])**: Sample documents to test in the pipeline. +- **`index` (Optional, string)**: The index to simulate ingesting into. +This value can be overridden by specifying an index on each document. +If you specify this parameter in the request path, it is used for any documents that do not explicitly specify an index argument. +- **`component_template_substitutions` (Optional, Record)**: A map of component template names to substitute component template definition objects. +- **`index_template_substitutions` (Optional, Record)**: A map of index template names to substitute index template definition objects. +- **`mapping_addition` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })** +- **`pipeline_substitutions` (Optional, Record)**: Pipelines to test. +If you don’t specify the `pipeline` request path parameter, this parameter is required. +If you specify both this and the request path parameter, the API only uses the request path parameter. +- **`pipeline` (Optional, string)**: The pipeline to use as the default pipeline. +This value can be used to override the default pipeline of the index. +- **`merge_type` (Optional, Enum("index" \| "template"))**: The mapping merge type if mapping overrides are being provided in mapping_addition. +The allowed values are one of index or template. +The index option merges mappings the way they would be merged into an existing index. +The template option merges mappings the way they would be merged into a template. + +## client.slm.deleteLifecycle [_slm.delete_lifecycle] +Delete a policy. +Delete a snapshot lifecycle policy definition. +This operation prevents any future snapshots from being taken but does not cancel in-progress snapshots or remove previously-taken snapshots. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-delete-lifecycle) + +```ts +client.slm.deleteLifecycle({ policy_id }) +``` + +### Arguments [_arguments_slm.delete_lifecycle] + +#### Request (object) [_request_slm.delete_lifecycle] +- **`policy_id` (string)**: The id of the snapshot lifecycle policy to remove +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.slm.executeLifecycle [_slm.execute_lifecycle] +Run a policy. +Immediately create a snapshot according to the snapshot lifecycle policy without waiting for the scheduled time. +The snapshot policy is normally applied according to its schedule, but you might want to manually run a policy before performing an upgrade or other maintenance. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-execute-lifecycle) + +```ts +client.slm.executeLifecycle({ policy_id }) +``` + +### Arguments [_arguments_slm.execute_lifecycle] + +#### Request (object) [_request_slm.execute_lifecycle] +- **`policy_id` (string)**: The id of the snapshot lifecycle policy to be executed +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.slm.executeRetention [_slm.execute_retention] +Run a retention policy. +Manually apply the retention policy to force immediate removal of snapshots that are expired according to the snapshot lifecycle policy retention rules. +The retention policy is normally applied according to its schedule. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-execute-retention) + +```ts +client.slm.executeRetention({ ... }) +``` + +### Arguments [_arguments_slm.execute_retention] + +#### Request (object) [_request_slm.execute_retention] +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.slm.getLifecycle [_slm.get_lifecycle] +Get policy information. +Get snapshot lifecycle policy definitions and information about the latest snapshot attempts. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-lifecycle) + +```ts +client.slm.getLifecycle({ ... }) +``` + +### Arguments [_arguments_slm.get_lifecycle] + +#### Request (object) [_request_slm.get_lifecycle] +- **`policy_id` (Optional, string \| string[])**: List of snapshot lifecycle policies to retrieve +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.slm.getStats [_slm.get_stats] +Get snapshot lifecycle management statistics. +Get global and policy-level statistics about actions taken by snapshot lifecycle management. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-stats) + +```ts +client.slm.getStats({ ... }) +``` + +### Arguments [_arguments_slm.get_stats] + +#### Request (object) [_request_slm.get_stats] +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.slm.getStatus [_slm.get_status] +Get the snapshot lifecycle management status. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-status) + +```ts +client.slm.getStatus({ ... }) +``` + +### Arguments [_arguments_slm.get_status] + +#### Request (object) [_request_slm.get_status] +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. + +## client.slm.putLifecycle [_slm.put_lifecycle] +Create or update a policy. +Create or update a snapshot lifecycle policy. +If the policy already exists, this request increments the policy version. +Only the latest version of a policy is stored. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-put-lifecycle) + +```ts +client.slm.putLifecycle({ policy_id }) +``` + +### Arguments [_arguments_slm.put_lifecycle] + +#### Request (object) [_request_slm.put_lifecycle] +- **`policy_id` (string)**: The identifier for the snapshot lifecycle policy you want to create or update. +- **`config` (Optional, { ignore_unavailable, indices, include_global_state, feature_states, metadata, partial })**: Configuration for each snapshot created by the policy. +- **`name` (Optional, string)**: Name automatically assigned to each snapshot created by the policy. Date math is supported. To prevent conflicting snapshot names, a UUID is automatically appended to each snapshot name. +- **`repository` (Optional, string)**: Repository used to store snapshots created by this policy. This repository must exist prior to the policy’s creation. You can create a repository using the snapshot repository API. +- **`retention` (Optional, { expire_after, max_count, min_count })**: Retention rules used to retain and delete snapshots created by the policy. +- **`schedule` (Optional, string)**: Periodic or absolute schedule at which the policy creates snapshots. SLM applies schedule changes immediately. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. + +## client.slm.start [_slm.start] +Start snapshot lifecycle management. +Snapshot lifecycle management (SLM) starts automatically when a cluster is formed. +Manually starting SLM is necessary only if it has been stopped using the stop SLM API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-start) + +```ts +client.slm.start({ ... }) +``` + +### Arguments [_arguments_slm.start] + +#### Request (object) [_request_slm.start] +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. + +## client.slm.stop [_slm.stop] +Stop snapshot lifecycle management. +Stop all snapshot lifecycle management (SLM) operations and the SLM plugin. +This API is useful when you are performing maintenance on a cluster and need to prevent SLM from performing any actions on your data streams or indices. +Stopping SLM does not stop any snapshots that are in progress. +You can manually trigger snapshots with the run snapshot lifecycle policy API even if SLM is stopped. + +The API returns a response as soon as the request is acknowledged, but the plugin might continue to run until in-progress operations complete and it can be safely stopped. +Use the get snapshot lifecycle management status API to see if SLM is running. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-stop) + +```ts +client.slm.stop({ ... }) +``` + +### Arguments [_arguments_slm.stop] + +#### Request (object) [_request_slm.stop] +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. + +## client.snapshot.cleanupRepository [_snapshot.cleanup_repository] +Clean up the snapshot repository. +Trigger the review of the contents of a snapshot repository and delete any stale data not referenced by existing snapshots. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-cleanup-repository) + +```ts +client.snapshot.cleanupRepository({ repository }) +``` + +### Arguments [_arguments_snapshot.cleanup_repository] + +#### Request (object) [_request_snapshot.cleanup_repository] +- **`repository` (string)**: The name of the snapshot repository to clean up. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1` +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. +If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. +To indicate that the request should never timeout, set it to `-1`. + +## client.snapshot.clone [_snapshot.clone] +Clone a snapshot. +Clone part of all of a snapshot into another snapshot in the same repository. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-clone) + +```ts +client.snapshot.clone({ repository, snapshot, target_snapshot, indices }) +``` + +### Arguments [_arguments_snapshot.clone] + +#### Request (object) [_request_snapshot.clone] +- **`repository` (string)**: The name of the snapshot repository that both source and target snapshot belong to. +- **`snapshot` (string)**: The source snapshot name. +- **`target_snapshot` (string)**: The target snapshot name. +- **`indices` (string)**: A list of indices to include in the snapshot. +Multi-target syntax is supported. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. + +## client.snapshot.create [_snapshot.create] +Create a snapshot. +Take a snapshot of a cluster or of data streams and indices. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-create) + +```ts +client.snapshot.create({ repository, snapshot }) +``` + +### Arguments [_arguments_snapshot.create] + +#### Request (object) [_request_snapshot.create] +- **`repository` (string)**: The name of the repository for the snapshot. +- **`snapshot` (string)**: The name of the snapshot. +It supportes date math. +It must be unique in the repository. +- **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Determines how wildcard patterns in the `indices` parameter match data streams and indices. +It supports a list of values such as `open,hidden`. +- **`feature_states` (Optional, string[])**: The feature states to include in the snapshot. +Each feature state includes one or more system indices containing related data. +You can view a list of eligible features using the get features API. + +If `include_global_state` is `true`, all current feature states are included by default. +If `include_global_state` is `false`, no feature states are included by default. + +Note that specifying an empty array will result in the default behavior. +To exclude all feature states, regardless of the `include_global_state` value, specify an array with only the value `none` (`["none"]`). +- **`ignore_unavailable` (Optional, boolean)**: If `true`, the request ignores data streams and indices in `indices` that are missing or closed. +If `false`, the request returns an error for any data stream or index that is missing or closed. +- **`include_global_state` (Optional, boolean)**: If `true`, the current cluster state is included in the snapshot. +The cluster state includes persistent cluster settings, composable index templates, legacy index templates, ingest pipelines, and ILM policies. +It also includes data stored in system indices, such as Watches and task records (configurable via `feature_states`). +- **`indices` (Optional, string \| string[])**: A list of data streams and indices to include in the snapshot. +It supports a multi-target syntax. +The default is an empty array (`[]`), which includes all regular data streams and regular indices. +To exclude all data streams and indices, use `-*`. + +You can't use this parameter to include or exclude system indices or system data streams from a snapshot. +Use `feature_states` instead. +- **`metadata` (Optional, Record)**: Arbitrary metadata to the snapshot, such as a record of who took the snapshot, why it was taken, or any other useful data. +It can have any contents but it must be less than 1024 bytes. +This information is not automatically generated by Elasticsearch. +- **`partial` (Optional, boolean)**: If `true`, it enables you to restore a partial snapshot of indices with unavailable shards. +Only shards that were successfully included in the snapshot will be restored. +All missing shards will be recreated as empty. + +If `false`, the entire restore operation will fail if one or more indices included in the snapshot do not have all primary shards available. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`wait_for_completion` (Optional, boolean)**: If `true`, the request returns a response when the snapshot is complete. +If `false`, the request returns a response when the snapshot initializes. + +## client.snapshot.createRepository [_snapshot.create_repository] +Create or update a snapshot repository. +IMPORTANT: If you are migrating searchable snapshots, the repository name must be identical in the source and destination clusters. +To register a snapshot repository, the cluster's global metadata must be writeable. +Ensure there are no cluster blocks (for example, `cluster.blocks.read_only` and `clsuter.blocks.read_only_allow_delete` settings) that prevent write access. + +Several options for this API can be specified using a query parameter or a request body parameter. +If both parameters are specified, only the query parameter is used. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-create-repository) + +```ts +client.snapshot.createRepository({ repository }) +``` + +### Arguments [_arguments_snapshot.create_repository] + +#### Request (object) [_request_snapshot.create_repository] +- **`repository` (string)**: The name of the snapshot repository to register or update. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. +If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. +To indicate that the request should never timeout, set it to `-1`. +- **`verify` (Optional, boolean)**: If `true`, the request verifies the repository is functional on all master and data nodes in the cluster. +If `false`, this verification is skipped. +You can also perform this verification with the verify snapshot repository API. + +## client.snapshot.delete [_snapshot.delete] +Delete snapshots. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-delete) + +```ts +client.snapshot.delete({ repository, snapshot }) +``` + +### Arguments [_arguments_snapshot.delete] + +#### Request (object) [_request_snapshot.delete] +- **`repository` (string)**: The name of the repository to delete a snapshot from. +- **`snapshot` (string \| string[])**: A list of snapshot names to delete. +It also accepts wildcards (`*`). +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +- **`wait_for_completion` (Optional, boolean)**: If `true`, the request returns a response when the matching snapshots are all deleted. +If `false`, the request returns a response as soon as the deletes are scheduled. + +## client.snapshot.deleteRepository [_snapshot.delete_repository] +Delete snapshot repositories. +When a repository is unregistered, Elasticsearch removes only the reference to the location where the repository is storing the snapshots. +The snapshots themselves are left untouched and in place. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-delete-repository) + +```ts +client.snapshot.deleteRepository({ repository }) +``` + +### Arguments [_arguments_snapshot.delete_repository] + +#### Request (object) [_request_snapshot.delete_repository] +- **`repository` (string \| string[])**: The ame of the snapshot repositories to unregister. +Wildcard (`*`) patterns are supported. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. +If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. +To indicate that the request should never timeout, set it to `-1`. + +## client.snapshot.get [_snapshot.get] +Get snapshot information. + +NOTE: The `after` parameter and `next` field enable you to iterate through snapshots with some consistency guarantees regarding concurrent creation or deletion of snapshots. +It is guaranteed that any snapshot that exists at the beginning of the iteration and is not concurrently deleted will be seen during the iteration. +Snapshots concurrently created may be seen during an iteration. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-get) + +```ts +client.snapshot.get({ repository, snapshot }) +``` + +### Arguments [_arguments_snapshot.get] + +#### Request (object) [_request_snapshot.get] +- **`repository` (string)**: A list of snapshot repository names used to limit the request. +Wildcard (`*`) expressions are supported. +- **`snapshot` (string \| string[])**: A list of snapshot names to retrieve +Wildcards (`*`) are supported. + +* To get information about all snapshots in a registered repository, use a wildcard (`*`) or `_all`. +* To get information about any snapshots that are currently running, use `_current`. +- **`after` (Optional, string)**: An offset identifier to start pagination from as returned by the next field in the response body. +- **`from_sort_value` (Optional, string)**: The value of the current sort column at which to start retrieval. +It can be a string `snapshot-` or a repository name when sorting by snapshot or repository name. +It can be a millisecond time value or a number when sorting by `index-` or shard count. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error for any snapshots that are unavailable. +- **`index_details` (Optional, boolean)**: If `true`, the response includes additional information about each index in the snapshot comprising the number of shards in the index, the total size of the index in bytes, and the maximum number of segments per shard in the index. +The default is `false`, meaning that this information is omitted. +- **`index_names` (Optional, boolean)**: If `true`, the response includes the name of each index in each snapshot. +- **`include_repository` (Optional, boolean)**: If `true`, the response includes the repository name in each snapshot. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`order` (Optional, Enum("asc" \| "desc"))**: The sort order. +Valid values are `asc` for ascending and `desc` for descending order. +The default behavior is ascending order. +- **`offset` (Optional, number)**: Numeric offset to start pagination from based on the snapshots matching this request. Using a non-zero value for this parameter is mutually exclusive with using the after parameter. Defaults to 0. +- **`size` (Optional, number)**: The maximum number of snapshots to return. +The default is 0, which means to return all that match the request without limit. +- **`slm_policy_filter` (Optional, string)**: Filter snapshots by a list of snapshot lifecycle management (SLM) policy names that snapshots belong to. + +You can use wildcards (`*`) and combinations of wildcards followed by exclude patterns starting with `-`. +For example, the pattern `*,-policy-a-\*` will return all snapshots except for those that were created by an SLM policy with a name starting with `policy-a-`. +Note that the wildcard pattern `*` matches all snapshots created by an SLM policy but not those snapshots that were not created by an SLM policy. +To include snapshots that were not created by an SLM policy, you can use the special pattern `_none` that will match all snapshots without an SLM policy. +- **`sort` (Optional, Enum("start_time" \| "duration" \| "name" \| "index_count" \| "repository" \| "shard_count" \| "failed_shard_count"))**: The sort order for the result. +The default behavior is sorting by snapshot start time stamp. +- **`state` (Optional, Enum("IN_PROGRESS" \| "SUCCESS" \| "FAILED" \| "PARTIAL" \| "INCOMPATIBLE") \| Enum("IN_PROGRESS" \| "SUCCESS" \| "FAILED" \| "PARTIAL" \| "INCOMPATIBLE")[])**: Only return snapshots with a state found in the given list of snapshot states. +The default is all snapshot states. +- **`verbose` (Optional, boolean)**: If `true`, returns additional information about each snapshot such as the version of Elasticsearch which took the snapshot, the start and end times of the snapshot, and the number of shards snapshotted. + +NOTE: The parameters `size`, `order`, `after`, `from_sort_value`, `offset`, `slm_policy_filter`, and `sort` are not supported when you set `verbose=false` and the sort order for requests with `verbose=false` is undefined. + +## client.snapshot.getRepository [_snapshot.get_repository] +Get snapshot repository information. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-get-repository) + +```ts +client.snapshot.getRepository({ ... }) +``` + +### Arguments [_arguments_snapshot.get_repository] + +#### Request (object) [_request_snapshot.get_repository] +- **`repository` (Optional, string \| string[])**: A list of snapshot repository names used to limit the request. +Wildcard (`*`) expressions are supported including combining wildcards with exclude patterns starting with `-`. + +To get information about all snapshot repositories registered in the cluster, omit this parameter or use `*` or `_all`. +- **`local` (Optional, boolean)**: If `true`, the request gets information from the local node only. +If `false`, the request gets information from the master node. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. + +## client.snapshot.repositoryAnalyze [_snapshot.repository_analyze] +Analyze a snapshot repository. + +Performs operations on a snapshot repository in order to check for incorrect behaviour. + +There are a large number of third-party storage systems available, not all of which are suitable for use as a snapshot repository by Elasticsearch. +Some storage systems behave incorrectly, or perform poorly, especially when accessed concurrently by multiple clients as the nodes of an Elasticsearch cluster do. +This API performs a collection of read and write operations on your repository which are designed to detect incorrect behaviour and to measure the performance characteristics of your storage system. + +The default values for the parameters are deliberately low to reduce the impact of running an analysis inadvertently and to provide a sensible starting point for your investigations. +Run your first analysis with the default parameter values to check for simple problems. +Some repositories may behave correctly when lightly loaded but incorrectly under production-like workloads. +If the first analysis is successful, run a sequence of increasingly large analyses until you encounter a failure or you reach a `blob_count` of at least `2000`, a `max_blob_size` of at least `2gb`, a `max_total_data_size` of at least `1tb`, and a `register_operation_count` of at least `100`. +Always specify a generous timeout, possibly `1h` or longer, to allow time for each analysis to run to completion. +Some repositories may behave correctly when accessed by a small number of Elasticsearch nodes but incorrectly when accessed concurrently by a production-scale cluster. +Perform the analyses using a multi-node cluster of a similar size to your production cluster so that it can detect any problems that only arise when the repository is accessed by many nodes at once. + +If the analysis fails, Elasticsearch detected that your repository behaved unexpectedly. +This usually means you are using a third-party storage system with an incorrect or incompatible implementation of the API it claims to support. +If so, this storage system is not suitable for use as a snapshot repository. +Repository analysis triggers conditions that occur only rarely when taking snapshots in a production system. +Snapshotting to unsuitable storage may appear to work correctly most of the time despite repository analysis failures. +However your snapshot data is at risk if you store it in a snapshot repository that does not reliably pass repository analysis. +You can demonstrate that the analysis failure is due to an incompatible storage implementation by verifying that Elasticsearch does not detect the same problem when analysing the reference implementation of the storage protocol you are using. +For instance, if you are using storage that offers an API which the supplier claims to be compatible with AWS S3, verify that repositories in AWS S3 do not fail repository analysis. +This allows you to demonstrate to your storage supplier that a repository analysis failure must only be caused by an incompatibility with AWS S3 and cannot be attributed to a problem in Elasticsearch. +Please do not report Elasticsearch issues involving third-party storage systems unless you can demonstrate that the same issue exists when analysing a repository that uses the reference implementation of the same storage protocol. +You will need to work with the supplier of your storage system to address the incompatibilities that Elasticsearch detects. + +If the analysis is successful, the API returns details of the testing process, optionally including how long each operation took. +You can use this information to determine the performance of your storage system. +If any operation fails or returns an incorrect result, the API returns an error. +If the API returns an error, it may not have removed all the data it wrote to the repository. +The error will indicate the location of any leftover data and this path is also recorded in the Elasticsearch logs. +You should verify that this location has been cleaned up correctly. +If there is still leftover data at the specified location, you should manually remove it. + +If the connection from your client to Elasticsearch is closed while the client is waiting for the result of the analysis, the test is cancelled. +Some clients are configured to close their connection if no response is received within a certain timeout. +An analysis takes a long time to complete so you might need to relax any such client-side timeouts. +On cancellation the analysis attempts to clean up the data it was writing, but it may not be able to remove it all. +The path to the leftover data is recorded in the Elasticsearch logs. +You should verify that this location has been cleaned up correctly. +If there is still leftover data at the specified location, you should manually remove it. + +If the analysis is successful then it detected no incorrect behaviour, but this does not mean that correct behaviour is guaranteed. +The analysis attempts to detect common bugs but it does not offer 100% coverage. +Additionally, it does not test the following: + +* Your repository must perform durable writes. Once a blob has been written it must remain in place until it is deleted, even after a power loss or similar disaster. +* Your repository must not suffer from silent data corruption. Once a blob has been written, its contents must remain unchanged until it is deliberately modified or deleted. +* Your repository must behave correctly even if connectivity from the cluster is disrupted. Reads and writes may fail in this case, but they must not return incorrect results. + +IMPORTANT: An analysis writes a substantial amount of data to your repository and then reads it back again. +This consumes bandwidth on the network between the cluster and the repository, and storage space and I/O bandwidth on the repository itself. +You must ensure this load does not affect other users of these systems. +Analyses respect the repository settings `max_snapshot_bytes_per_sec` and `max_restore_bytes_per_sec` if available and the cluster setting `indices.recovery.max_bytes_per_sec` which you can use to limit the bandwidth they consume. + +NOTE: This API is intended for exploratory use by humans. +You should expect the request parameters and the response format to vary in future versions. +The response exposes immplementation details of the analysis which may change from version to version. + +NOTE: Different versions of Elasticsearch may perform different checks for repository compatibility, with newer versions typically being stricter than older ones. +A storage system that passes repository analysis with one version of Elasticsearch may fail with a different version. +This indicates it behaves incorrectly in ways that the former version did not detect. +You must work with the supplier of your storage system to address the incompatibilities detected by the repository analysis API in any version of Elasticsearch. + +NOTE: This API may not work correctly in a mixed-version cluster. + +*Implementation details* + +NOTE: This section of documentation describes how the repository analysis API works in this version of Elasticsearch, but you should expect the implementation to vary between versions. +The request parameters and response format depend on details of the implementation so may also be different in newer versions. + +The analysis comprises a number of blob-level tasks, as set by the `blob_count` parameter and a number of compare-and-exchange operations on linearizable registers, as set by the `register_operation_count` parameter. +These tasks are distributed over the data and master-eligible nodes in the cluster for execution. + +For most blob-level tasks, the executing node first writes a blob to the repository and then instructs some of the other nodes in the cluster to attempt to read the data it just wrote. +The size of the blob is chosen randomly, according to the `max_blob_size` and `max_total_data_size` parameters. +If any of these reads fails then the repository does not implement the necessary read-after-write semantics that Elasticsearch requires. + +For some blob-level tasks, the executing node will instruct some of its peers to attempt to read the data before the writing process completes. +These reads are permitted to fail, but must not return partial data. +If any read returns partial data then the repository does not implement the necessary atomicity semantics that Elasticsearch requires. + +For some blob-level tasks, the executing node will overwrite the blob while its peers are reading it. +In this case the data read may come from either the original or the overwritten blob, but the read operation must not return partial data or a mix of data from the two blobs. +If any of these reads returns partial data or a mix of the two blobs then the repository does not implement the necessary atomicity semantics that Elasticsearch requires for overwrites. + +The executing node will use a variety of different methods to write the blob. +For instance, where applicable, it will use both single-part and multi-part uploads. +Similarly, the reading nodes will use a variety of different methods to read the data back again. +For instance they may read the entire blob from start to end or may read only a subset of the data. + +For some blob-level tasks, the executing node will cancel the write before it is complete. +In this case, it still instructs some of the other nodes in the cluster to attempt to read the blob but all of these reads must fail to find the blob. + +Linearizable registers are special blobs that Elasticsearch manipulates using an atomic compare-and-exchange operation. +This operation ensures correct and strongly-consistent behavior even when the blob is accessed by multiple nodes at the same time. +The detailed implementation of the compare-and-exchange operation on linearizable registers varies by repository type. +Repository analysis verifies that that uncontended compare-and-exchange operations on a linearizable register blob always succeed. +Repository analysis also verifies that contended operations either succeed or report the contention but do not return incorrect results. +If an operation fails due to contention, Elasticsearch retries the operation until it succeeds. +Most of the compare-and-exchange operations performed by repository analysis atomically increment a counter which is represented as an 8-byte blob. +Some operations also verify the behavior on small blobs with sizes other than 8 bytes. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-repository-analyze) + +```ts +client.snapshot.repositoryAnalyze({ repository }) +``` + +### Arguments [_arguments_snapshot.repository_analyze] + +#### Request (object) [_request_snapshot.repository_analyze] +- **`repository` (string)**: The name of the repository. +- **`blob_count` (Optional, number)**: The total number of blobs to write to the repository during the test. +For realistic experiments, set this parameter to at least `2000`. +- **`concurrency` (Optional, number)**: The number of operations to run concurrently during the test. +For realistic experiments, leave this parameter unset. +- **`detailed` (Optional, boolean)**: Indicates whether to return detailed results, including timing information for every operation performed during the analysis. +If false, it returns only a summary of the analysis. +- **`early_read_node_count` (Optional, number)**: The number of nodes on which to perform an early read operation while writing each blob. +Early read operations are only rarely performed. +For realistic experiments, leave this parameter unset. +- **`max_blob_size` (Optional, number \| string)**: The maximum size of a blob to be written during the test. +For realistic experiments, set this parameter to at least `2gb`. +- **`max_total_data_size` (Optional, number \| string)**: An upper limit on the total size of all the blobs written during the test. +For realistic experiments, set this parameter to at least `1tb`. +- **`rare_action_probability` (Optional, number)**: The probability of performing a rare action such as an early read, an overwrite, or an aborted write on each blob. +For realistic experiments, leave this parameter unset. +- **`rarely_abort_writes` (Optional, boolean)**: Indicates whether to rarely cancel writes before they complete. +For realistic experiments, leave this parameter unset. +- **`read_node_count` (Optional, number)**: The number of nodes on which to read a blob after writing. +For realistic experiments, leave this parameter unset. +- **`register_operation_count` (Optional, number)**: The minimum number of linearizable register operations to perform in total. +For realistic experiments, set this parameter to at least `100`. +- **`seed` (Optional, number)**: The seed for the pseudo-random number generator used to generate the list of operations performed during the test. +To repeat the same set of operations in multiple experiments, use the same seed in each experiment. +Note that the operations are performed concurrently so might not always happen in the same order on each run. +For realistic experiments, leave this parameter unset. +- **`timeout` (Optional, string \| -1 \| 0)**: The period of time to wait for the test to complete. +If no response is received before the timeout expires, the test is cancelled and returns an error. +For realistic experiments, set this parameter sufficiently long to allow the test to complete. + +## client.snapshot.repositoryVerifyIntegrity [_snapshot.repository_verify_integrity] +Verify the repository integrity. +Verify the integrity of the contents of a snapshot repository. + +This API enables you to perform a comprehensive check of the contents of a repository, looking for any anomalies in its data or metadata which might prevent you from restoring snapshots from the repository or which might cause future snapshot create or delete operations to fail. + +If you suspect the integrity of the contents of one of your snapshot repositories, cease all write activity to this repository immediately, set its `read_only` option to `true`, and use this API to verify its integrity. +Until you do so: + +* It may not be possible to restore some snapshots from this repository. +* Searchable snapshots may report errors when searched or may have unassigned shards. +* Taking snapshots into this repository may fail or may appear to succeed but have created a snapshot which cannot be restored. +* Deleting snapshots from this repository may fail or may appear to succeed but leave the underlying data on disk. +* Continuing to write to the repository while it is in an invalid state may causing additional damage to its contents. + +If the API finds any problems with the integrity of the contents of your repository, Elasticsearch will not be able to repair the damage. +The only way to bring the repository back into a fully working state after its contents have been damaged is by restoring its contents from a repository backup which was taken before the damage occurred. +You must also identify what caused the damage and take action to prevent it from happening again. + +If you cannot restore a repository backup, register a new repository and use this for all future snapshot operations. +In some cases it may be possible to recover some of the contents of a damaged repository, either by restoring as many of its snapshots as needed and taking new snapshots of the restored data, or by using the reindex API to copy data from any searchable snapshots mounted from the damaged repository. + +Avoid all operations which write to the repository while the verify repository integrity API is running. +If something changes the repository contents while an integrity verification is running then Elasticsearch may incorrectly report having detected some anomalies in its contents due to the concurrent writes. +It may also incorrectly fail to report some anomalies that the concurrent writes prevented it from detecting. + +NOTE: This API is intended for exploratory use by humans. You should expect the request parameters and the response format to vary in future versions. + +NOTE: This API may not work correctly in a mixed-version cluster. + +The default values for the parameters of this API are designed to limit the impact of the integrity verification on other activities in your cluster. +For instance, by default it will only use at most half of the `snapshot_meta` threads to verify the integrity of each snapshot, allowing other snapshot operations to use the other half of this thread pool. +If you modify these parameters to speed up the verification process, you risk disrupting other snapshot-related operations in your cluster. +For large repositories, consider setting up a separate single-node Elasticsearch cluster just for running the integrity verification API. + +The response exposes implementation details of the analysis which may change from version to version. +The response body format is therefore not considered stable and may be different in newer versions. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-repository-verify-integrity) + +```ts +client.snapshot.repositoryVerifyIntegrity({ repository }) +``` + +### Arguments [_arguments_snapshot.repository_verify_integrity] + +#### Request (object) [_request_snapshot.repository_verify_integrity] +- **`repository` (string \| string[])**: The name of the snapshot repository. +- **`blob_thread_pool_concurrency` (Optional, number)**: If `verify_blob_contents` is `true`, this parameter specifies how many blobs to verify at once. +- **`index_snapshot_verification_concurrency` (Optional, number)**: The maximum number of index snapshots to verify concurrently within each index verification. +- **`index_verification_concurrency` (Optional, number)**: The number of indices to verify concurrently. +The default behavior is to use the entire `snapshot_meta` thread pool. +- **`max_bytes_per_sec` (Optional, string)**: If `verify_blob_contents` is `true`, this parameter specifies the maximum amount of data that Elasticsearch will read from the repository every second. +- **`max_failed_shard_snapshots` (Optional, number)**: The number of shard snapshot failures to track during integrity verification, in order to avoid excessive resource usage. +If your repository contains more than this number of shard snapshot failures, the verification will fail. +- **`meta_thread_pool_concurrency` (Optional, number)**: The maximum number of snapshot metadata operations to run concurrently. +The default behavior is to use at most half of the `snapshot_meta` thread pool at once. +- **`snapshot_verification_concurrency` (Optional, number)**: The number of snapshots to verify concurrently. +The default behavior is to use at most half of the `snapshot_meta` thread pool at once. +- **`verify_blob_contents` (Optional, boolean)**: Indicates whether to verify the checksum of every data blob in the repository. +If this feature is enabled, Elasticsearch will read the entire repository contents, which may be extremely slow and expensive. + +## client.snapshot.restore [_snapshot.restore] +Restore a snapshot. +Restore a snapshot of a cluster or data streams and indices. + +You can restore a snapshot only to a running cluster with an elected master node. +The snapshot repository must be registered and available to the cluster. +The snapshot and cluster versions must be compatible. + +To restore a snapshot, the cluster's global metadata must be writable. Ensure there are't any cluster blocks that prevent writes. The restore operation ignores index blocks. + +Before you restore a data stream, ensure the cluster contains a matching index template with data streams enabled. To check, use the index management feature in Kibana or the get index template API: + +``` +GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream +``` + +If no such template exists, you can create one or restore a cluster state that contains one. Without a matching index template, a data stream can't roll over or create backing indices. + +If your snapshot contains data from App Search or Workplace Search, you must restore the Enterprise Search encryption key before you restore the snapshot. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-restore) + +```ts +client.snapshot.restore({ repository, snapshot }) +``` + +### Arguments [_arguments_snapshot.restore] + +#### Request (object) [_request_snapshot.restore] +- **`repository` (string)**: The name of the repository to restore a snapshot from. +- **`snapshot` (string)**: The name of the snapshot to restore. +- **`feature_states` (Optional, string[])**: The feature states to restore. +If `include_global_state` is `true`, the request restores all feature states in the snapshot by default. +If `include_global_state` is `false`, the request restores no feature states by default. +Note that specifying an empty array will result in the default behavior. +To restore no feature states, regardless of the `include_global_state` value, specify an array containing only the value `none` (`["none"]`). +- **`ignore_index_settings` (Optional, string[])**: The index settings to not restore from the snapshot. +You can't use this option to ignore `index.number_of_shards`. + +For data streams, this option applies only to restored backing indices. +New backing indices are configured using the data stream's matching index template. +- **`ignore_unavailable` (Optional, boolean)**: If `true`, the request ignores any index or data stream in indices that's missing from the snapshot. +If `false`, the request returns an error for any missing index or data stream. +- **`include_aliases` (Optional, boolean)**: If `true`, the request restores aliases for any restored data streams and indices. +If `false`, the request doesn’t restore aliases. +- **`include_global_state` (Optional, boolean)**: If `true`, restore the cluster state. The cluster state includes: + +* Persistent cluster settings +* Index templates +* Legacy index templates +* Ingest pipelines +* Index lifecycle management (ILM) policies +* Stored scripts +* For snapshots taken after 7.12.0, feature states + +If `include_global_state` is `true`, the restore operation merges the legacy index templates in your cluster with the templates contained in the snapshot, replacing any existing ones whose name matches one in the snapshot. +It completely removes all persistent settings, non-legacy index templates, ingest pipelines, and ILM lifecycle policies that exist in your cluster and replaces them with the corresponding items from the snapshot. + +Use the `feature_states` parameter to configure how feature states are restored. + +If `include_global_state` is `true` and a snapshot was created without a global state then the restore request will fail. +- **`index_settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })**: Index settings to add or change in restored indices, including backing indices. +You can't use this option to change `index.number_of_shards`. + +For data streams, this option applies only to restored backing indices. +New backing indices are configured using the data stream's matching index template. +- **`indices` (Optional, string \| string[])**: A list of indices and data streams to restore. +It supports a multi-target syntax. +The default behavior is all regular indices and regular data streams in the snapshot. + +You can't use this parameter to restore system indices or system data streams. +Use `feature_states` instead. +- **`partial` (Optional, boolean)**: If `false`, the entire restore operation will fail if one or more indices included in the snapshot do not have all primary shards available. + +If true, it allows restoring a partial snapshot of indices with unavailable shards. +Only shards that were successfully included in the snapshot will be restored. +All missing shards will be recreated as empty. +- **`rename_pattern` (Optional, string)**: A rename pattern to apply to restored data streams and indices. +Data streams and indices matching the rename pattern will be renamed according to `rename_replacement`. + +The rename pattern is applied as defined by the regular expression that supports referencing the original text, according to the `appendReplacement` logic. +- **`rename_replacement` (Optional, string)**: The rename replacement string that is used with the `rename_pattern`. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +- **`wait_for_completion` (Optional, boolean)**: If `true`, the request returns a response when the restore operation completes. +The operation is complete when it finishes all attempts to recover primary shards for restored indices. +This applies even if one or more of the recovery attempts fail. + +If `false`, the request returns a response when the restore operation initializes. + +## client.snapshot.status [_snapshot.status] +Get the snapshot status. +Get a detailed description of the current state for each shard participating in the snapshot. + +Note that this API should be used only to obtain detailed shard-level information for ongoing snapshots. +If this detail is not needed or you want to obtain information about one or more existing snapshots, use the get snapshot API. + +If you omit the `` request path parameter, the request retrieves information only for currently running snapshots. +This usage is preferred. +If needed, you can specify `` and `` to retrieve information for specific snapshots, even if they're not currently running. + +Note that the stats will not be available for any shard snapshots in an ongoing snapshot completed by a node that (even momentarily) left the cluster. +Loading the stats from the repository is an expensive operation (see the WARNING below). +Therefore the stats values for such shards will be -1 even though the "stage" value will be "DONE", in order to minimize latency. +A "description" field will be present for a shard snapshot completed by a departed node explaining why the shard snapshot's stats results are invalid. +Consequently, the total stats for the index will be less than expected due to the missing values from these shards. + +WARNING: Using the API to return the status of any snapshots other than currently running snapshots can be expensive. +The API requires a read from the repository for each shard in each snapshot. +For example, if you have 100 snapshots with 1,000 shards each, an API request that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 shards). + +Depending on the latency of your storage, such requests can take an extremely long time to return results. +These requests can also tax machine resources and, when using cloud storage, incur high processing costs. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-status) + +```ts +client.snapshot.status({ ... }) +``` + +### Arguments [_arguments_snapshot.status] + +#### Request (object) [_request_snapshot.status] +- **`repository` (Optional, string)**: The snapshot repository name used to limit the request. +It supports wildcards (`*`) if `` isn't specified. +- **`snapshot` (Optional, string \| string[])**: A list of snapshots to retrieve status for. +The default is currently running snapshots. +Wildcards (`*`) are not supported. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error for any snapshots that are unavailable. +If `true`, the request ignores snapshots that are unavailable, such as those that are corrupted or temporarily cannot be returned. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. + +## client.snapshot.verifyRepository [_snapshot.verify_repository] +Verify a snapshot repository. +Check for common misconfigurations in a snapshot repository. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-verify-repository) + +```ts +client.snapshot.verifyRepository({ repository }) +``` + +### Arguments [_arguments_snapshot.verify_repository] + +#### Request (object) [_request_snapshot.verify_repository] +- **`repository` (string)**: The name of the snapshot repository to verify. +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. +If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. +To indicate that the request should never timeout, set it to `-1`. + +## client.sql.clearCursor [_sql.clear_cursor] +Clear an SQL search cursor. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-clear-cursor) + +```ts +client.sql.clearCursor({ cursor }) +``` + +### Arguments [_arguments_sql.clear_cursor] + +#### Request (object) [_request_sql.clear_cursor] +- **`cursor` (string)**: Cursor to clear. + +## client.sql.deleteAsync [_sql.delete_async] +Delete an async SQL search. +Delete an async SQL search or a stored synchronous SQL search. +If the search is still running, the API cancels it. + +If the Elasticsearch security features are enabled, only the following users can use this API to delete a search: + +* Users with the `cancel_task` cluster privilege. +* The user who first submitted the search. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-delete-async) + +```ts +client.sql.deleteAsync({ id }) +``` + +### Arguments [_arguments_sql.delete_async] + +#### Request (object) [_request_sql.delete_async] +- **`id` (string)**: The identifier for the search. + +## client.sql.getAsync [_sql.get_async] +Get async SQL search results. +Get the current status and available results for an async SQL search or stored synchronous SQL search. + +If the Elasticsearch security features are enabled, only the user who first submitted the SQL search can retrieve the search using this API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-get-async) + +```ts +client.sql.getAsync({ id }) +``` + +### Arguments [_arguments_sql.get_async] + +#### Request (object) [_request_sql.get_async] +- **`id` (string)**: The identifier for the search. +- **`delimiter` (Optional, string)**: The separator for CSV results. +The API supports this parameter only for CSV responses. +- **`format` (Optional, string)**: The format for the response. +You must specify a format using this parameter or the `Accept` HTTP header. +If you specify both, the API uses this parameter. +- **`keep_alive` (Optional, string \| -1 \| 0)**: The retention period for the search and its results. +It defaults to the `keep_alive` period for the original SQL search. +- **`wait_for_completion_timeout` (Optional, string \| -1 \| 0)**: The period to wait for complete results. +It defaults to no timeout, meaning the request waits for complete search results. + +## client.sql.getAsyncStatus [_sql.get_async_status] +Get the async SQL search status. +Get the current status of an async SQL search or a stored synchronous SQL search. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-get-async-status) + +```ts +client.sql.getAsyncStatus({ id }) +``` + +### Arguments [_arguments_sql.get_async_status] + +#### Request (object) [_request_sql.get_async_status] +- **`id` (string)**: The identifier for the search. + +## client.sql.query [_sql.query] +Get SQL search results. +Run an SQL request. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-query) + +```ts +client.sql.query({ ... }) +``` + +### Arguments [_arguments_sql.query] + +#### Request (object) [_request_sql.query] +- **`allow_partial_search_results` (Optional, boolean)**: If `true`, the response has partial results when there are shard request timeouts or shard failures. +If `false`, the API returns an error with no partial results. +- **`catalog` (Optional, string)**: The default catalog (cluster) for queries. +If unspecified, the queries execute on the data in the local cluster only. +- **`columnar` (Optional, boolean)**: If `true`, the results are in a columnar fashion: one row represents all the values of a certain column from the current page of results. +The API supports this parameter only for CBOR, JSON, SMILE, and YAML responses. +- **`cursor` (Optional, string)**: The cursor used to retrieve a set of paginated results. +If you specify a cursor, the API only uses the `columnar` and `time_zone` request body parameters. +It ignores other request body parameters. +- **`fetch_size` (Optional, number)**: The maximum number of rows (or entries) to return in one response. +- **`field_multi_value_leniency` (Optional, boolean)**: If `false`, the API returns an exception when encountering multiple values for a field. +If `true`, the API is lenient and returns the first value from the array with no guarantee of consistent results. +- **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The Elasticsearch query DSL for additional filtering. +- **`index_using_frozen` (Optional, boolean)**: If `true`, the search can run on frozen indices. +- **`keep_alive` (Optional, string \| -1 \| 0)**: The retention period for an async or saved synchronous search. +- **`keep_on_completion` (Optional, boolean)**: If `true`, Elasticsearch stores synchronous searches if you also specify the `wait_for_completion_timeout` parameter. +If `false`, Elasticsearch only stores async searches that don't finish before the `wait_for_completion_timeout`. +- **`page_timeout` (Optional, string \| -1 \| 0)**: The minimum retention period for the scroll cursor. +After this time period, a pagination request might fail because the scroll cursor is no longer available. +Subsequent scroll requests prolong the lifetime of the scroll cursor by the duration of `page_timeout` in the scroll request. +- **`params` (Optional, User-defined value[])**: The values for parameters in the query. +- **`query` (Optional, string)**: The SQL query to run. +- **`request_timeout` (Optional, string \| -1 \| 0)**: The timeout before the request fails. +- **`runtime_mappings` (Optional, Record)**: One or more runtime fields for the search request. +These fields take precedence over mapped fields with the same name. +- **`time_zone` (Optional, string)**: The ISO-8601 time zone ID for the search. +- **`wait_for_completion_timeout` (Optional, string \| -1 \| 0)**: The period to wait for complete results. +It defaults to no timeout, meaning the request waits for complete search results. +If the search doesn't finish within this period, the search becomes async. + +To save a synchronous search, you must specify this parameter and the `keep_on_completion` parameter. +- **`format` (Optional, Enum("csv" \| "json" \| "tsv" \| "txt" \| "yaml" \| "cbor" \| "smile"))**: The format for the response. +You can also specify a format using the `Accept` HTTP header. +If you specify both this parameter and the `Accept` HTTP header, this parameter takes precedence. +- **`project_routing` (Optional, string)**: Specifies a subset of projects to target for the search using project +metadata tags in a subset of Lucene query syntax. +Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). +Examples: + _alias:my-project + _alias:_origin + _alias:*pr* +Supported in serverless only. + +## client.sql.translate [_sql.translate] +Translate SQL into Elasticsearch queries. +Translate an SQL search into a search API request containing Query DSL. +It accepts the same request body parameters as the SQL search API, excluding `cursor`. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-translate) + +```ts +client.sql.translate({ query }) +``` + +### Arguments [_arguments_sql.translate] + +#### Request (object) [_request_sql.translate] +- **`query` (string)**: The SQL query to run. +- **`fetch_size` (Optional, number)**: The maximum number of rows (or entries) to return in one response. +- **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The Elasticsearch query DSL for additional filtering. +- **`time_zone` (Optional, string)**: The ISO-8601 time zone ID for the search. + +## client.ssl.certificates [_ssl.certificates] +Get SSL certificates. + +Get information about the X.509 certificates that are used to encrypt communications in the cluster. +The API returns a list that includes certificates from all TLS contexts including: + +- Settings for transport and HTTP interfaces +- TLS settings that are used within authentication realms +- TLS settings for remote monitoring exporters + +The list includes certificates that are used for configuring trust, such as those configured in the `xpack.security.transport.ssl.truststore` and `xpack.security.transport.ssl.certificate_authorities` settings. +It also includes certificates that are used for configuring server identity, such as `xpack.security.http.ssl.keystore` and `xpack.security.http.ssl.certificate settings`. + +The list does not include certificates that are sourced from the default SSL context of the Java Runtime Environment (JRE), even if those certificates are in use within Elasticsearch. + +NOTE: When a PKCS#11 token is configured as the truststore of the JRE, the API returns all the certificates that are included in the PKCS#11 token irrespective of whether these are used in the Elasticsearch TLS configuration. + +If Elasticsearch is configured to use a keystore or truststore, the API output includes all certificates in that store, even though some of the certificates might not be in active use within the cluster. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ssl-certificates) + +```ts +client.ssl.certificates() +``` + + +## client.synonyms.deleteSynonym [_synonyms.delete_synonym] +Delete a synonym set. + +You can only delete a synonyms set that is not in use by any index analyzer. + +Synonyms sets can be used in synonym graph token filters and synonym token filters. +These synonym filters can be used as part of search analyzers. + +Analyzers need to be loaded when an index is restored (such as when a node starts, or the index becomes open). +Even if the analyzer is not used on any field mapping, it still needs to be loaded on the index recovery phase. + +If any analyzers cannot be loaded, the index becomes unavailable and the cluster status becomes red or yellow as index shards are not available. +To prevent that, synonyms sets that are used in analyzers can't be deleted. +A delete request in this case will return a 400 response code. + +To remove a synonyms set, you must first remove all indices that contain analyzers using it. +You can migrate an index by creating a new index that does not contain the token filter with the synonyms set, and use the reindex API in order to copy over the index data. +Once finished, you can delete the index. +When the synonyms set is not used in analyzers, you will be able to delete it. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-delete-synonym) + +```ts +client.synonyms.deleteSynonym({ id }) +``` + +### Arguments [_arguments_synonyms.delete_synonym] + +#### Request (object) [_request_synonyms.delete_synonym] +- **`id` (string)**: The synonyms set identifier to delete. + +## client.synonyms.deleteSynonymRule [_synonyms.delete_synonym_rule] +Delete a synonym rule. +Delete a synonym rule from a synonym set. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-delete-synonym-rule) + +```ts +client.synonyms.deleteSynonymRule({ set_id, rule_id }) +``` + +### Arguments [_arguments_synonyms.delete_synonym_rule] + +#### Request (object) [_request_synonyms.delete_synonym_rule] +- **`set_id` (string)**: The ID of the synonym set to update. +- **`rule_id` (string)**: The ID of the synonym rule to delete. +- **`refresh` (Optional, boolean)**: If `true`, the request will refresh the analyzers with the deleted synonym rule and wait for the new synonyms to be available before returning. +If `false`, analyzers will not be reloaded with the deleted synonym rule + +## client.synonyms.getSynonym [_synonyms.get_synonym] +Get a synonym set. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym) + +```ts +client.synonyms.getSynonym({ id }) +``` + +### Arguments [_arguments_synonyms.get_synonym] + +#### Request (object) [_request_synonyms.get_synonym] +- **`id` (string)**: The synonyms set identifier to retrieve. +- **`from` (Optional, number)**: The starting offset for query rules to retrieve. +- **`size` (Optional, number)**: The max number of query rules to retrieve. + +## client.synonyms.getSynonymRule [_synonyms.get_synonym_rule] +Get a synonym rule. +Get a synonym rule from a synonym set. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym-rule) + +```ts +client.synonyms.getSynonymRule({ set_id, rule_id }) +``` + +### Arguments [_arguments_synonyms.get_synonym_rule] + +#### Request (object) [_request_synonyms.get_synonym_rule] +- **`set_id` (string)**: The ID of the synonym set to retrieve the synonym rule from. +- **`rule_id` (string)**: The ID of the synonym rule to retrieve. + +## client.synonyms.getSynonymsSets [_synonyms.get_synonyms_sets] +Get all synonym sets. +Get a summary of all defined synonym sets. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym) + +```ts +client.synonyms.getSynonymsSets({ ... }) +``` + +### Arguments [_arguments_synonyms.get_synonyms_sets] + +#### Request (object) [_request_synonyms.get_synonyms_sets] +- **`from` (Optional, number)**: The starting offset for synonyms sets to retrieve. +- **`size` (Optional, number)**: The maximum number of synonyms sets to retrieve. + +## client.synonyms.putSynonym [_synonyms.put_synonym] +Create or update a synonym set. +Synonyms sets are limited to a maximum of 10,000 synonym rules per set. +If you need to manage more synonym rules, you can create multiple synonym sets. + +When an existing synonyms set is updated, the search analyzers that use the synonyms set are reloaded automatically for all indices. +This is equivalent to invoking the reload search analyzers API for all indices that use the synonyms set. + +For practical examples of how to create or update a synonyms set, refer to the External documentation. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-put-synonym) + +```ts +client.synonyms.putSynonym({ id, synonyms_set }) +``` + +### Arguments [_arguments_synonyms.put_synonym] + +#### Request (object) [_request_synonyms.put_synonym] +- **`id` (string)**: The ID of the synonyms set to be created or updated. +- **`synonyms_set` ({ id, synonyms } \| { id, synonyms }[])**: The synonym rules definitions for the synonyms set. +- **`refresh` (Optional, boolean)**: If `true`, the request will refresh the analyzers with the new synonyms set and wait for the new synonyms to be available before returning. +If `false`, analyzers will not be reloaded with the new synonym set + +## client.synonyms.putSynonymRule [_synonyms.put_synonym_rule] +Create or update a synonym rule. +Create or update a synonym rule in a synonym set. + +If any of the synonym rules included is invalid, the API returns an error. + +When you update a synonym rule, all analyzers using the synonyms set will be reloaded automatically to reflect the new rule. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-put-synonym-rule) + +```ts +client.synonyms.putSynonymRule({ set_id, rule_id, synonyms }) +``` + +### Arguments [_arguments_synonyms.put_synonym_rule] + +#### Request (object) [_request_synonyms.put_synonym_rule] +- **`set_id` (string)**: The ID of the synonym set. +- **`rule_id` (string)**: The ID of the synonym rule to be updated or created. +- **`synonyms` (string)**: The synonym rule information definition, which must be in Solr format. +- **`refresh` (Optional, boolean)**: If `true`, the request will refresh the analyzers with the new synonym rule and wait for the new synonyms to be available before returning. +If `false`, analyzers will not be reloaded with the new synonym rule + +## client.tasks.cancel [_tasks.cancel] +Cancel a task. + +WARNING: The task management API is new and should still be considered a beta feature. +The API may change in ways that are not backwards compatible. + +A task may continue to run for some time after it has been cancelled because it may not be able to safely stop its current activity straight away. +It is also possible that Elasticsearch must complete its work on other tasks before it can process the cancellation. +The get task information API will continue to list these cancelled tasks until they complete. +The cancelled flag in the response indicates that the cancellation command has been processed and the task will stop as soon as possible. + +To troubleshoot why a cancelled task does not complete promptly, use the get task information API with the `?detailed` parameter to identify the other tasks the system is running. +You can also use the node hot threads API to obtain detailed information about the work the system is doing instead of completing the cancelled task. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks) + +```ts +client.tasks.cancel({ ... }) +``` + +### Arguments [_arguments_tasks.cancel] + +#### Request (object) [_request_tasks.cancel] +- **`task_id` (Optional, string)**: The task identifier. +- **`actions` (Optional, string \| string[])**: A list or wildcard expression of actions that is used to limit the request. +- **`nodes` (Optional, string[])**: A list of node IDs or names that is used to limit the request. +- **`parent_task_id` (Optional, string)**: A parent task ID that is used to limit the tasks. +- **`wait_for_completion` (Optional, boolean)**: If true, the request blocks until all found tasks are complete. + +## client.tasks.get [_tasks.get] +Get task information. +Get information about a task currently running in the cluster. + +WARNING: The task management API is new and should still be considered a beta feature. +The API may change in ways that are not backwards compatible. + +If the task identifier is not found, a 404 response code indicates that there are no resources that match the request. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks) + +```ts +client.tasks.get({ task_id }) +``` + +### Arguments [_arguments_tasks.get] + +#### Request (object) [_request_tasks.get] +- **`task_id` (string)**: The task identifier. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +- **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the task has completed. + +## client.tasks.list [_tasks.list] +Get all tasks. +Get information about the tasks currently running on one or more nodes in the cluster. + +WARNING: The task management API is new and should still be considered a beta feature. +The API may change in ways that are not backwards compatible. + +**Identifying running tasks** + +The `X-Opaque-Id header`, when provided on the HTTP request header, is going to be returned as a header in the response as well as in the headers field for in the task information. +This enables you to track certain calls or associate certain tasks with the client that started them. +For example: + +``` +curl -i -H "X-Opaque-Id: 123456" "http://localhost:9200/_tasks?group_by=parents" +``` + +The API returns the following result: + +``` +HTTP/1.1 200 OK +X-Opaque-Id: 123456 +content-type: application/json; charset=UTF-8 +content-length: 831 + +{ + "tasks" : { + "u5lcZHqcQhu-rUoFaqDphA:45" : { + "node" : "u5lcZHqcQhu-rUoFaqDphA", + "id" : 45, + "type" : "transport", + "action" : "cluster:monitor/tasks/lists", + "start_time_in_millis" : 1513823752749, + "running_time_in_nanos" : 293139, + "cancellable" : false, + "headers" : { + "X-Opaque-Id" : "123456" + }, + "children" : [ + { + "node" : "u5lcZHqcQhu-rUoFaqDphA", + "id" : 46, + "type" : "direct", + "action" : "cluster:monitor/tasks/lists[n]", + "start_time_in_millis" : 1513823752750, + "running_time_in_nanos" : 92133, + "cancellable" : false, + "parent_task_id" : "u5lcZHqcQhu-rUoFaqDphA:45", + "headers" : { + "X-Opaque-Id" : "123456" + } + } + ] + } + } + } +``` +In this example, `X-Opaque-Id: 123456` is the ID as a part of the response header. +The `X-Opaque-Id` in the task `headers` is the ID for the task that was initiated by the REST request. +The `X-Opaque-Id` in the children `headers` is the child task of the task that was initiated by the REST request. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks) + +```ts +client.tasks.list({ ... }) +``` + +### Arguments [_arguments_tasks.list] + +#### Request (object) [_request_tasks.list] +- **`actions` (Optional, string \| string[])**: A list or wildcard expression of actions used to limit the request. +For example, you can use `cluser:*` to retrieve all cluster-related tasks. +- **`detailed` (Optional, boolean)**: If `true`, the response includes detailed information about the running tasks. +This information is useful to distinguish tasks from each other but is more costly to run. +- **`group_by` (Optional, Enum("nodes" \| "parents" \| "none"))**: A key that is used to group tasks in the response. +The task lists can be grouped either by nodes or by parent tasks. +- **`nodes` (Optional, string \| string[])**: A list of node IDs or names that is used to limit the returned information. +- **`parent_task_id` (Optional, string)**: A parent task identifier that is used to limit returned information. +To return all tasks, omit this parameter or use a value of `-1`. +If the parent task is not found, the API does not return a 404 response code. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for each node to respond. +If a node does not respond before its timeout expires, the response does not include its information. +However, timed out nodes are included in the `node_failures` property. +- **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the operation is complete. + +## client.textStructure.findFieldStructure [_text_structure.find_field_structure] +Find the structure of a text field. +Find the structure of a text field in an Elasticsearch index. + +This API provides a starting point for extracting further information from log messages already ingested into Elasticsearch. +For example, if you have ingested data into a very simple index that has just `@timestamp` and message fields, you can use this API to see what common structure exists in the message field. + +The response from the API contains: + +* Sample messages. +* Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields. +* Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. +* Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. + +All this information can be calculated by the structure finder with no guidance. +However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. + +If the structure finder produces unexpected results, specify the `explain` query parameter and an explanation will appear in the response. +It helps determine why the returned structure was chosen. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-text_structure) + +```ts +client.textStructure.findFieldStructure({ field, index }) +``` + +### Arguments [_arguments_text_structure.find_field_structure] + +#### Request (object) [_request_text_structure.find_field_structure] +- **`field` (string)**: The field that should be analyzed. +- **`index` (string)**: The name of the index that contains the analyzed field. +- **`column_names` (Optional, string \| string[])**: If `format` is set to `delimited`, you can specify the column names in a list. +If this parameter is not specified, the structure finder uses the column names from the header row of the text. +If the text does not have a header row, columns are named "column1", "column2", "column3", for example. +- **`delimiter` (Optional, string)**: If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. +Only a single character is supported; the delimiter cannot have multiple characters. +By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). +In this default scenario, all rows must have the same number of fields for the delimited format to be detected. +If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. +- **`documents_to_sample` (Optional, number)**: The number of documents to include in the structural analysis. +The minimum value is 2. +- **`ecs_compatibility` (Optional, Enum("disabled" \| "v1"))**: The mode of compatibility with ECS compliant Grok patterns. +Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. +This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. +If the structure finder identifies a common structure but has no idea of the meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output. +The intention in that situation is that a user who knows the meanings will rename the fields before using them. +- **`explain` (Optional, boolean)**: If `true`, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result. +- **`format` (Optional, Enum("delimited" \| "ndjson" \| "semi_structured_text" \| "xml"))**: The high level structure of the text. +By default, the API chooses the format. +In this default scenario, all rows must have the same number of fields for a delimited format to be detected. +If the format is set to delimited and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. +- **`grok_pattern` (Optional, string)**: If the format is `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. +The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. +If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". +If `grok_pattern` is not specified, the structure finder creates a Grok pattern. +- **`quote` (Optional, string)**: If the format is `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. +Only a single character is supported. +If this parameter is not specified, the default value is a double quote (`"`). +If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. +- **`should_trim_fields` (Optional, boolean)**: If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. +If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. +Otherwise, the default value is `false`. +- **`timeout` (Optional, string \| -1 \| 0)**: The maximum amount of time that the structure analysis can take. +If the analysis is still running when the timeout expires, it will be stopped. +- **`timestamp_field` (Optional, string)**: The name of the field that contains the primary timestamp of each record in the text. +In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field. + +If the format is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. +Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. + +For structured text, if you specify this parameter, the field must exist within the text. + +If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. +For structured text, it is not compulsory to have a timestamp in the text. +- **`timestamp_format` (Optional, string)**: The Java time format of the timestamp field in the text. +Only a subset of Java time format letter groups are supported: + +* `a` +* `d` +* `dd` +* `EEE` +* `EEEE` +* `H` +* `HH` +* `h` +* `M` +* `MM` +* `MMM` +* `MMMM` +* `mm` +* `ss` +* `XX` +* `XXX` +* `yy` +* `yyyy` +* `zzz` + +Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and are separated from the `ss` by a period (`.`), comma (`,`), or colon (`:`). +Spacing and punctuation is also permitted with the exception a question mark (`?`), newline, and carriage return, together with literal text enclosed in single quotes. +For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. + +One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. +Another is when the timestamp format is one that the structure finder does not consider by default. + +If this parameter is not specified, the structure finder chooses the best format from a built-in set. + +If the special value `null` is specified, the structure finder will not look for a primary timestamp in the text. +When the format is semi-structured text, this will result in the structure finder treating the text as single-line messages. + +## client.textStructure.findMessageStructure [_text_structure.find_message_structure] +Find the structure of text messages. +Find the structure of a list of text messages. +The messages must contain data that is suitable to be ingested into Elasticsearch. + +This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. +Use this API rather than the find text structure API if your input text has already been split up into separate messages by some other process. + +The response from the API contains: + +* Sample messages. +* Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields. +* Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. +Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. + +All this information can be calculated by the structure finder with no guidance. +However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. + +If the structure finder produces unexpected results, specify the `explain` query parameter and an explanation will appear in the response. +It helps determine why the returned structure was chosen. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-find-message-structure) + +```ts +client.textStructure.findMessageStructure({ messages }) +``` + +### Arguments [_arguments_text_structure.find_message_structure] + +#### Request (object) [_request_text_structure.find_message_structure] +- **`messages` (string[])**: The list of messages you want to analyze. +- **`column_names` (Optional, string \| string[])**: If the format is `delimited`, you can specify the column names in a list. +If this parameter is not specified, the structure finder uses the column names from the header row of the text. +If the text does not have a header role, columns are named "column1", "column2", "column3", for example. +- **`delimiter` (Optional, string)**: If you the format is `delimited`, you can specify the character used to delimit the values in each row. +Only a single character is supported; the delimiter cannot have multiple characters. +By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). +In this default scenario, all rows must have the same number of fields for the delimited format to be detected. +If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. +- **`ecs_compatibility` (Optional, Enum("disabled" \| "v1"))**: The mode of compatibility with ECS compliant Grok patterns. +Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. +This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. +If the structure finder identifies a common structure but has no idea of meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output, with the intention that a user who knows the meanings rename these fields before using it. +- **`explain` (Optional, boolean)**: If this parameter is set to true, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result. +- **`format` (Optional, Enum("delimited" \| "ndjson" \| "semi_structured_text" \| "xml"))**: The high level structure of the text. +By default, the API chooses the format. +In this default scenario, all rows must have the same number of fields for a delimited format to be detected. +If the format is `delimited` and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. +- **`grok_pattern` (Optional, string)**: If the format is `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. +The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. +If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". +If `grok_pattern` is not specified, the structure finder creates a Grok pattern. +- **`quote` (Optional, string)**: If the format is `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. +Only a single character is supported. +If this parameter is not specified, the default value is a double quote (`"`). +If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. +- **`should_trim_fields` (Optional, boolean)**: If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. +If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. +Otherwise, the default value is `false`. +- **`timeout` (Optional, string \| -1 \| 0)**: The maximum amount of time that the structure analysis can take. +If the analysis is still running when the timeout expires, it will be stopped. +- **`timestamp_field` (Optional, string)**: The name of the field that contains the primary timestamp of each record in the text. +In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field. + +If the format is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. +Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. + +For structured text, if you specify this parameter, the field must exist within the text. + +If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. +For structured text, it is not compulsory to have a timestamp in the text. +- **`timestamp_format` (Optional, string)**: The Java time format of the timestamp field in the text. +Only a subset of Java time format letter groups are supported: + +* `a` +* `d` +* `dd` +* `EEE` +* `EEEE` +* `H` +* `HH` +* `h` +* `M` +* `MM` +* `MMM` +* `MMMM` +* `mm` +* `ss` +* `XX` +* `XXX` +* `yy` +* `yyyy` +* `zzz` + +Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and are separated from the `ss` by a period (`.`), comma (`,`), or colon (`:`). +Spacing and punctuation is also permitted with the exception a question mark (`?`), newline, and carriage return, together with literal text enclosed in single quotes. +For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. + +One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. +Another is when the timestamp format is one that the structure finder does not consider by default. + +If this parameter is not specified, the structure finder chooses the best format from a built-in set. + +If the special value `null` is specified, the structure finder will not look for a primary timestamp in the text. +When the format is semi-structured text, this will result in the structure finder treating the text as single-line messages. + +## client.textStructure.findStructure [_text_structure.find_structure] +Find the structure of a text file. +The text file must contain data that is suitable to be ingested into Elasticsearch. + +This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. +Unlike other Elasticsearch endpoints, the data that is posted to this endpoint does not need to be UTF-8 encoded and in JSON format. +It must, however, be text; binary text formats are not currently supported. +The size is limited to the Elasticsearch HTTP receive buffer size, which defaults to 100 Mb. + +The response from the API contains: + +* A couple of messages from the beginning of the text. +* Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields. +* Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. +* Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. + +All this information can be calculated by the structure finder with no guidance. +However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-find-structure) + +```ts +client.textStructure.findStructure({ ... }) +``` + +### Arguments [_arguments_text_structure.find_structure] + +#### Request (object) [_request_text_structure.find_structure] +- **`text_files` (Optional, TJsonDocument[])** +- **`charset` (Optional, string)**: The text's character set. +It must be a character set that is supported by the JVM that Elasticsearch uses. +For example, `UTF-8`, `UTF-16LE`, `windows-1252`, or `EUC-JP`. +If this parameter is not specified, the structure finder chooses an appropriate character set. +- **`column_names` (Optional, string)**: If you have set format to `delimited`, you can specify the column names in a list. +If this parameter is not specified, the structure finder uses the column names from the header row of the text. +If the text does not have a header role, columns are named "column1", "column2", "column3", for example. +- **`delimiter` (Optional, string)**: If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. +Only a single character is supported; the delimiter cannot have multiple characters. +By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). +In this default scenario, all rows must have the same number of fields for the delimited format to be detected. +If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. +- **`ecs_compatibility` (Optional, string)**: The mode of compatibility with ECS compliant Grok patterns. +Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. +Valid values are `disabled` and `v1`. +This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. +If the structure finder identifies a common structure but has no idea of meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output, with the intention that a user who knows the meanings rename these fields before using it. +- **`explain` (Optional, boolean)**: If this parameter is set to `true`, the response includes a field named explanation, which is an array of strings that indicate how the structure finder produced its result. +If the structure finder produces unexpected results for some text, use this query parameter to help you determine why the returned structure was chosen. +- **`format` (Optional, string)**: The high level structure of the text. +Valid values are `ndjson`, `xml`, `delimited`, and `semi_structured_text`. +By default, the API chooses the format. +In this default scenario, all rows must have the same number of fields for a delimited format to be detected. +If the format is set to `delimited` and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. +- **`grok_pattern` (Optional, string)**: If you have set `format` to `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. +The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. +If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". +If `grok_pattern` is not specified, the structure finder creates a Grok pattern. +- **`has_header_row` (Optional, boolean)**: If you have set `format` to `delimited`, you can use this parameter to indicate whether the column names are in the first row of the text. +If this parameter is not specified, the structure finder guesses based on the similarity of the first row of the text to other rows. +- **`line_merge_size_limit` (Optional, number)**: The maximum number of characters in a message when lines are merged to form messages while analyzing semi-structured text. +If you have extremely long messages you may need to increase this, but be aware that this may lead to very long processing times if the way to group lines into messages is misdetected. +- **`lines_to_sample` (Optional, number)**: The number of lines to include in the structural analysis, starting from the beginning of the text. +The minimum is 2. +If the value of this parameter is greater than the number of lines in the text, the analysis proceeds (as long as there are at least two lines in the text) for all of the lines. + +NOTE: The number of lines and the variation of the lines affects the speed of the analysis. +For example, if you upload text where the first 1000 lines are all variations on the same message, the analysis will find more commonality than would be seen with a bigger sample. +If possible, however, it is more efficient to upload sample text with more variety in the first 1000 lines than to request analysis of 100000 lines to achieve some variety. +- **`quote` (Optional, string)**: If you have set `format` to `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. +Only a single character is supported. +If this parameter is not specified, the default value is a double quote (`"`). +If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. +- **`should_trim_fields` (Optional, boolean)**: If you have set `format` to `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. +If this parameter is not specified and the delimiter is pipe (`|`), the default value is `true`. +Otherwise, the default value is `false`. +- **`timeout` (Optional, string \| -1 \| 0)**: The maximum amount of time that the structure analysis can take. +If the analysis is still running when the timeout expires then it will be stopped. +- **`timestamp_field` (Optional, string)**: The name of the field that contains the primary timestamp of each record in the text. +In particular, if the text were ingested into an index, this is the field that would be used to populate the `@timestamp` field. + +If the `format` is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. +Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. + +For structured text, if you specify this parameter, the field must exist within the text. + +If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. +For structured text, it is not compulsory to have a timestamp in the text. +- **`timestamp_format` (Optional, string)**: The Java time format of the timestamp field in the text. + +Only a subset of Java time format letter groups are supported: + +* `a` +* `d` +* `dd` +* `EEE` +* `EEEE` +* `H` +* `HH` +* `h` +* `M` +* `MM` +* `MMM` +* `MMMM` +* `mm` +* `ss` +* `XX` +* `XXX` +* `yy` +* `yyyy` +* `zzz` + +Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and separated from the `ss` by a `.`, `,` or `:`. +Spacing and punctuation is also permitted with the exception of `?`, newline and carriage return, together with literal text enclosed in single quotes. +For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. + +One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. +Another is when the timestamp format is one that the structure finder does not consider by default. + +If this parameter is not specified, the structure finder chooses the best format from a built-in set. + +If the special value `null` is specified the structure finder will not look for a primary timestamp in the text. +When the format is semi-structured text this will result in the structure finder treating the text as single-line messages. + +## client.textStructure.testGrokPattern [_text_structure.test_grok_pattern] +Test a Grok pattern. +Test a Grok pattern on one or more lines of text. +The API indicates whether the lines match the pattern together with the offsets and lengths of the matched substrings. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-test-grok-pattern) + +```ts +client.textStructure.testGrokPattern({ grok_pattern, text }) +``` + +### Arguments [_arguments_text_structure.test_grok_pattern] + +#### Request (object) [_request_text_structure.test_grok_pattern] +- **`grok_pattern` (string)**: The Grok pattern to run on the text. +- **`text` (string[])**: The lines of text to run the Grok pattern on. +- **`ecs_compatibility` (Optional, string)**: The mode of compatibility with ECS compliant Grok patterns. +Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. +Valid values are `disabled` and `v1`. + +## client.transform.deleteTransform [_transform.delete_transform] +Delete a transform. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-delete-transform) + +```ts +client.transform.deleteTransform({ transform_id }) +``` + +### Arguments [_arguments_transform.delete_transform] + +#### Request (object) [_request_transform.delete_transform] +- **`transform_id` (string)**: Identifier for the transform. +- **`force` (Optional, boolean)**: If this value is false, the transform must be stopped before it can be deleted. If true, the transform is +deleted regardless of its current state. +- **`delete_dest_index` (Optional, boolean)**: If this value is true, the destination index is deleted together with the transform. If false, the destination +index will not be deleted +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.transform.getNodeStats [_transform.get_node_stats] +Retrieves transform usage information for transform nodes + +[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/get-transform-node-stats.html) + +```ts +client.transform.getNodeStats() +``` + + +## client.transform.getTransform [_transform.get_transform] +Get transforms. +Get configuration information for transforms. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-get-transform) + +```ts +client.transform.getTransform({ ... }) +``` + +### Arguments [_arguments_transform.get_transform] + +#### Request (object) [_request_transform.get_transform] +- **`transform_id` (Optional, string \| string[])**: Identifier for the transform. It can be a transform identifier or a +wildcard expression. You can get information for all transforms by using +`_all`, by specifying `*` as the ``, or by omitting the +``. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: + +1. Contains wildcard expressions and there are no transforms that match. +2. Contains the _all string or no identifiers and there are no matches. +3. Contains wildcard expressions and there are only partial matches. + +If this parameter is false, the request returns a 404 status code when +there are no matches or only partial matches. +- **`from` (Optional, number)**: Skips the specified number of transforms. +- **`size` (Optional, number)**: Specifies the maximum number of transforms to obtain. +- **`exclude_generated` (Optional, boolean)**: Excludes fields that were automatically added when creating the +transform. This allows the configuration to be in an acceptable format to +be retrieved and then added to another cluster. + +## client.transform.getTransformStats [_transform.get_transform_stats] +Get transform stats. + +Get usage information for transforms. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-get-transform-stats) + +```ts +client.transform.getTransformStats({ transform_id }) +``` + +### Arguments [_arguments_transform.get_transform_stats] + +#### Request (object) [_request_transform.get_transform_stats] +- **`transform_id` (string \| string[])**: Identifier for the transform. It can be a transform identifier or a +wildcard expression. You can get information for all transforms by using +`_all`, by specifying `*` as the ``, or by omitting the +``. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: + +1. Contains wildcard expressions and there are no transforms that match. +2. Contains the _all string or no identifiers and there are no matches. +3. Contains wildcard expressions and there are only partial matches. + +If this parameter is false, the request returns a 404 status code when +there are no matches or only partial matches. +- **`from` (Optional, number)**: Skips the specified number of transforms. +- **`size` (Optional, number)**: Specifies the maximum number of transforms to obtain. +- **`timeout` (Optional, string \| -1 \| 0)**: Controls the time to wait for the stats + +## client.transform.previewTransform [_transform.preview_transform] +Preview a transform. +Generates a preview of the results that you will get when you create a transform with the same configuration. + +It returns a maximum of 100 results. The calculations are based on all the current data in the source index. It also +generates a list of mappings and settings for the destination index. These values are determined based on the field +types of the source index and the transform aggregations. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-preview-transform) + +```ts +client.transform.previewTransform({ ... }) +``` + +### Arguments [_arguments_transform.preview_transform] + +#### Request (object) [_request_transform.preview_transform] +- **`transform_id` (Optional, string)**: Identifier for the transform to preview. If you specify this path parameter, you cannot provide transform +configuration details in the request body. +- **`dest` (Optional, { index, op_type, pipeline, routing, version_type })**: The destination for the transform. +- **`description` (Optional, string)**: Free text description of the transform. +- **`frequency` (Optional, string \| -1 \| 0)**: The interval between checks for changes in the source indices when the +transform is running continuously. Also determines the retry interval in +the event of transient failures while the transform is searching or +indexing. The minimum value is 1s and the maximum is 1h. +- **`pivot` (Optional, { aggregations, group_by })**: The pivot method transforms the data by aggregating and grouping it. +These objects define the group by fields and the aggregation to reduce +the data. +- **`source` (Optional, { index, query, remote, size, slice, sort, _source, runtime_mappings })**: The source of the data for the transform. +- **`settings` (Optional, { align_checkpoints, dates_as_epoch_millis, deduce_mappings, docs_per_second, max_page_search_size, use_point_in_time, unattended })**: Defines optional transform settings. +- **`sync` (Optional, { time })**: Defines the properties transforms require to run continuously. +- **`retention_policy` (Optional, { time })**: Defines a retention policy for the transform. Data that meets the defined +criteria is deleted from the destination index. +- **`latest` (Optional, { sort, unique_key })**: The latest method transforms the data by finding the latest document for +each unique key. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the +timeout expires, the request fails and returns an error. + +## client.transform.putTransform [_transform.put_transform] +Create a transform. +Creates a transform. + +A transform copies data from source indices, transforms it, and persists it into an entity-centric destination index. You can also think of the destination index as a two-dimensional tabular data structure (known as +a data frame). The ID for each document in the data frame is generated from a hash of the entity, so there is a +unique row per entity. + +You must choose either the latest or pivot method for your transform; you cannot use both in a single transform. If +you choose to use the pivot method for your transform, the entities are defined by the set of `group_by` fields in +the pivot object. If you choose to use the latest method, the entities are defined by the `unique_key` field values +in the latest object. + +You must have `create_index`, `index`, and `read` privileges on the destination index and `read` and +`view_index_metadata` privileges on the source indices. When Elasticsearch security features are enabled, the +transform remembers which roles the user that created it had at the time of creation and uses those same roles. If +those roles do not have the required privileges on the source and destination indices, the transform fails when it +attempts unauthorized operations. + +NOTE: You must use Kibana or this API to create a transform. Do not add a transform directly into any +`.transform-internal*` indices using the Elasticsearch index API. If Elasticsearch security features are enabled, do +not give users any privileges on `.transform-internal*` indices. If you used transforms prior to 7.5, also do not +give users any privileges on `.data-frame-internal*` indices. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-put-transform) + +```ts +client.transform.putTransform({ transform_id, dest, source }) +``` + +### Arguments [_arguments_transform.put_transform] + +#### Request (object) [_request_transform.put_transform] +- **`transform_id` (string)**: Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), +hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters. +- **`dest` ({ index, op_type, pipeline, routing, version_type })**: The destination for the transform. +- **`source` ({ index, query, remote, size, slice, sort, _source, runtime_mappings })**: The source of the data for the transform. +- **`description` (Optional, string)**: Free text description of the transform. +- **`frequency` (Optional, string \| -1 \| 0)**: The interval between checks for changes in the source indices when the transform is running continuously. Also +determines the retry interval in the event of transient failures while the transform is searching or indexing. +The minimum value is `1s` and the maximum is `1h`. +- **`latest` (Optional, { sort, unique_key })**: The latest method transforms the data by finding the latest document for each unique key. +- **`_meta` (Optional, Record)**: Defines optional transform metadata. +- **`pivot` (Optional, { aggregations, group_by })**: The pivot method transforms the data by aggregating and grouping it. These objects define the group by fields +and the aggregation to reduce the data. +- **`retention_policy` (Optional, { time })**: Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the +destination index. +- **`settings` (Optional, { align_checkpoints, dates_as_epoch_millis, deduce_mappings, docs_per_second, max_page_search_size, use_point_in_time, unattended })**: Defines optional transform settings. +- **`sync` (Optional, { time })**: Defines the properties transforms require to run continuously. +- **`defer_validation` (Optional, boolean)**: When the transform is created, a series of validations occur to ensure its success. For example, there is a +check for the existence of the source indices and a check that the destination index is not part of the source +index pattern. You can use this parameter to skip the checks, for example when the source index does not exist +until after the transform is created. The validations are always run when you start the transform, however, with +the exception of privilege checks. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.transform.resetTransform [_transform.reset_transform] +Reset a transform. + +Before you can reset it, you must stop it; alternatively, use the `force` query parameter. +If the destination index was created by the transform, it is deleted. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-reset-transform) + +```ts +client.transform.resetTransform({ transform_id }) +``` + +### Arguments [_arguments_transform.reset_transform] + +#### Request (object) [_request_transform.reset_transform] +- **`transform_id` (string)**: Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), +hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters. +- **`force` (Optional, boolean)**: If this value is `true`, the transform is reset regardless of its current state. If it's `false`, the transform +must be stopped before it can be reset. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.transform.scheduleNowTransform [_transform.schedule_now_transform] +Schedule a transform to start now. + +Instantly run a transform to process data. +If you run this API, the transform will process the new data instantly, +without waiting for the configured frequency interval. After the API is called, +the transform will be processed again at `now + frequency` unless the API +is called again in the meantime. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-schedule-now-transform) + +```ts +client.transform.scheduleNowTransform({ transform_id }) +``` + +### Arguments [_arguments_transform.schedule_now_transform] + +#### Request (object) [_request_transform.schedule_now_transform] +- **`transform_id` (string)**: Identifier for the transform. +- **`timeout` (Optional, string \| -1 \| 0)**: Controls the time to wait for the scheduling to take place + +## client.transform.setUpgradeMode [_transform.set_upgrade_mode] +Set upgrade_mode for transform indices. +Sets a cluster wide upgrade_mode setting that prepares transform +indices for an upgrade. +When upgrading your cluster, in some circumstances you must restart your +nodes and reindex your transform indices. In those circumstances, +there must be no transforms running. You can close the transforms, +do the upgrade, then open all the transforms again. Alternatively, +you can use this API to temporarily halt tasks associated with the transforms +and prevent new transforms from opening. You can also use this API +during upgrades that do not require you to reindex your transform +indices, though stopping transforms is not a requirement in that case. +You can see the current value for the upgrade_mode setting by using the get +transform info API. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-set-upgrade-mode) + +```ts +client.transform.setUpgradeMode({ ... }) +``` + +### Arguments [_arguments_transform.set_upgrade_mode] + +#### Request (object) [_request_transform.set_upgrade_mode] +- **`enabled` (Optional, boolean)**: When `true`, it enables `upgrade_mode` which temporarily halts all +transform tasks and prohibits new transform tasks from +starting. +- **`timeout` (Optional, string \| -1 \| 0)**: The time to wait for the request to be completed. + +## client.transform.startTransform [_transform.start_transform] +Start a transform. + +When you start a transform, it creates the destination index if it does not already exist. The `number_of_shards` is +set to `1` and the `auto_expand_replicas` is set to `0-1`. If it is a pivot transform, it deduces the mapping +definitions for the destination index from the source indices and the transform aggregations. If fields in the +destination index are derived from scripts (as in the case of `scripted_metric` or `bucket_script` aggregations), +the transform uses dynamic mappings unless an index template exists. If it is a latest transform, it does not deduce +mapping definitions; it uses dynamic mappings. To use explicit mappings, create the destination index before you +start the transform. Alternatively, you can create an index template, though it does not affect the deduced mappings +in a pivot transform. + +When the transform starts, a series of validations occur to ensure its success. If you deferred validation when you +created the transform, they occur when you start the transform—​with the exception of privilege checks. When +Elasticsearch security features are enabled, the transform remembers which roles the user that created it had at the +time of creation and uses those same roles. If those roles do not have the required privileges on the source and +destination indices, the transform fails when it attempts unauthorized operations. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-start-transform) + +```ts +client.transform.startTransform({ transform_id }) +``` + +### Arguments [_arguments_transform.start_transform] + +#### Request (object) [_request_transform.start_transform] +- **`transform_id` (string)**: Identifier for the transform. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +- **`from` (Optional, string)**: Restricts the set of transformed entities to those changed after this time. Relative times like now-30d are supported. Only applicable for continuous transforms. + +## client.transform.stopTransform [_transform.stop_transform] +Stop transforms. +Stops one or more transforms. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-stop-transform) + +```ts +client.transform.stopTransform({ transform_id }) +``` + +### Arguments [_arguments_transform.stop_transform] + +#### Request (object) [_request_transform.stop_transform] +- **`transform_id` (string)**: Identifier for the transform. To stop multiple transforms, use a list or a wildcard expression. +To stop all transforms, use `_all` or `*` as the identifier. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; +contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there +are only partial matches. + +If it is true, the API returns a successful acknowledgement message when there are no matches. When there are +only partial matches, the API stops the appropriate transforms. + +If it is false, the request returns a 404 status code when there are no matches or only partial matches. +- **`force` (Optional, boolean)**: If it is true, the API forcefully stops the transforms. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response when `wait_for_completion` is `true`. If no response is received before the +timeout expires, the request returns a timeout exception. However, the request continues processing and +eventually moves the transform to a STOPPED state. +- **`wait_for_checkpoint` (Optional, boolean)**: If it is true, the transform does not completely stop until the current checkpoint is completed. If it is false, +the transform stops as soon as possible. +- **`wait_for_completion` (Optional, boolean)**: If it is true, the API blocks until the indexer state completely stops. If it is false, the API returns +immediately and the indexer is stopped asynchronously in the background. + +## client.transform.updateTransform [_transform.update_transform] +Update a transform. +Updates certain properties of a transform. + +All updated properties except `description` do not take effect until after the transform starts the next checkpoint, +thus there is data consistency in each checkpoint. To use this API, you must have `read` and `view_index_metadata` +privileges for the source indices. You must also have `index` and `read` privileges for the destination index. When +Elasticsearch security features are enabled, the transform remembers which roles the user who updated it had at the +time of update and runs with those privileges. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-update-transform) + +```ts +client.transform.updateTransform({ transform_id }) +``` + +### Arguments [_arguments_transform.update_transform] + +#### Request (object) [_request_transform.update_transform] +- **`transform_id` (string)**: Identifier for the transform. +- **`dest` (Optional, { index, op_type, pipeline, routing, version_type })**: The destination for the transform. +- **`description` (Optional, string)**: Free text description of the transform. +- **`frequency` (Optional, string \| -1 \| 0)**: The interval between checks for changes in the source indices when the +transform is running continuously. Also determines the retry interval in +the event of transient failures while the transform is searching or +indexing. The minimum value is 1s and the maximum is 1h. +- **`_meta` (Optional, Record)**: Defines optional transform metadata. +- **`source` (Optional, { index, query, remote, size, slice, sort, _source, runtime_mappings })**: The source of the data for the transform. +- **`settings` (Optional, { align_checkpoints, dates_as_epoch_millis, deduce_mappings, docs_per_second, max_page_search_size, use_point_in_time, unattended })**: Defines optional transform settings. +- **`sync` (Optional, { time })**: Defines the properties transforms require to run continuously. +- **`retention_policy` (Optional, { time } \| null)**: Defines a retention policy for the transform. Data that meets the defined +criteria is deleted from the destination index. +- **`defer_validation` (Optional, boolean)**: When true, deferrable validations are not run. This behavior may be +desired if the source index does not exist until after the transform is +created. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the +timeout expires, the request fails and returns an error. + +## client.transform.upgradeTransforms [_transform.upgrade_transforms] +Upgrade all transforms. + +Transforms are compatible across minor versions and between supported major versions. +However, over time, the format of transform configuration information may change. +This API identifies transforms that have a legacy configuration format and upgrades them to the latest version. +It also cleans up the internal data structures that store the transform state and checkpoints. +The upgrade does not affect the source and destination indices. +The upgrade also does not affect the roles that transforms use when Elasticsearch security features are enabled; the role used to read source data and write to the destination index remains unchanged. + +If a transform upgrade step fails, the upgrade stops and an error is returned about the underlying issue. +Resolve the issue then re-run the process again. +A summary is returned when the upgrade is finished. + +To ensure continuous transforms remain running during a major version upgrade of the cluster – for example, from 7.16 to 8.0 – it is recommended to upgrade transforms before upgrading the cluster. +You may want to perform a recent cluster backup prior to the upgrade. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-upgrade-transforms) + +```ts +client.transform.upgradeTransforms({ ... }) +``` + +### Arguments [_arguments_transform.upgrade_transforms] + +#### Request (object) [_request_transform.upgrade_transforms] +- **`dry_run` (Optional, boolean)**: When true, the request checks for updates but does not run them. +- **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and +returns an error. + +## client.watcher.ackWatch [_watcher.ack_watch] +Acknowledge a watch. +Acknowledging a watch enables you to manually throttle the execution of the watch's actions. + +The acknowledgement state of an action is stored in the `status.actions..ack.state` structure. + +IMPORTANT: If the specified watch is currently being executed, this API will return an error +The reason for this behavior is to prevent overwriting the watch status from a watch execution. + +Acknowledging an action throttles further executions of that action until its `ack.state` is reset to `awaits_successful_execution`. +This happens when the condition of the watch is not met (the condition evaluates to false). +To demonstrate how throttling works in practice and how it can be configured for individual actions within a watch, refer to External documentation. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-ack-watch) + +```ts +client.watcher.ackWatch({ watch_id }) +``` + +### Arguments [_arguments_watcher.ack_watch] + +#### Request (object) [_request_watcher.ack_watch] +- **`watch_id` (string)**: The watch identifier. +- **`action_id` (Optional, string \| string[])**: A list of the action identifiers to acknowledge. +If you omit this parameter, all of the actions of the watch are acknowledged. + +## client.watcher.activateWatch [_watcher.activate_watch] +Activate a watch. +A watch can be either active or inactive. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-activate-watch) + +```ts +client.watcher.activateWatch({ watch_id }) +``` + +### Arguments [_arguments_watcher.activate_watch] + +#### Request (object) [_request_watcher.activate_watch] +- **`watch_id` (string)**: The watch identifier. + +## client.watcher.deactivateWatch [_watcher.deactivate_watch] +Deactivate a watch. +A watch can be either active or inactive. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-deactivate-watch) + +```ts +client.watcher.deactivateWatch({ watch_id }) +``` + +### Arguments [_arguments_watcher.deactivate_watch] + +#### Request (object) [_request_watcher.deactivate_watch] +- **`watch_id` (string)**: The watch identifier. + +## client.watcher.deleteWatch [_watcher.delete_watch] +Delete a watch. +When the watch is removed, the document representing the watch in the `.watches` index is gone and it will never be run again. + +Deleting a watch does not delete any watch execution records related to this watch from the watch history. + +IMPORTANT: Deleting a watch must be done by using only this API. +Do not delete the watch directly from the `.watches` index using the Elasticsearch delete document API +When Elasticsearch security features are enabled, make sure no write privileges are granted to anyone for the `.watches` index. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-delete-watch) + +```ts +client.watcher.deleteWatch({ id }) +``` + +### Arguments [_arguments_watcher.delete_watch] + +#### Request (object) [_request_watcher.delete_watch] +- **`id` (string)**: The watch identifier. + +## client.watcher.executeWatch [_watcher.execute_watch] +Run a watch. +This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes. + +For testing and debugging purposes, you also have fine-grained control on how the watch runs. +You can run the watch without running all of its actions or alternatively by simulating them. +You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after it runs. + +You can use the run watch API to run watches that are not yet registered by specifying the watch definition inline. +This serves as great tool for testing and debugging your watches prior to adding them to Watcher. + +When Elasticsearch security features are enabled on your cluster, watches are run with the privileges of the user that stored the watches. +If your user is allowed to read index `a`, but not index `b`, then the exact same set of rules will apply during execution of a watch. + +When using the run watch API, the authorization data of the user that called the API will be used as a base, instead of the information who stored the watch. +Refer to the external documentation for examples of watch execution requests, including existing, customized, and inline watches. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-execute-watch) + +```ts +client.watcher.executeWatch({ ... }) +``` + +### Arguments [_arguments_watcher.execute_watch] + +#### Request (object) [_request_watcher.execute_watch] +- **`id` (Optional, string)**: The watch identifier. +- **`action_modes` (Optional, Record)**: Determines how to handle the watch actions as part of the watch execution. +- **`alternative_input` (Optional, Record)**: When present, the watch uses this object as a payload instead of executing its own input. +- **`ignore_condition` (Optional, boolean)**: When set to `true`, the watch execution uses the always condition. This can also be specified as an HTTP parameter. +- **`record_execution` (Optional, boolean)**: When set to `true`, the watch record representing the watch execution result is persisted to the `.watcher-history` index for the current time. +In addition, the status of the watch is updated, possibly throttling subsequent runs. +This can also be specified as an HTTP parameter. +- **`simulated_actions` (Optional, { actions, all, use_all })** +- **`trigger_data` (Optional, { scheduled_time, triggered_time })**: This structure is parsed as the data of the trigger event that will be used during the watch execution. +- **`watch` (Optional, { actions, condition, input, metadata, status, throttle_period, throttle_period_in_millis, transform, trigger })**: When present, this watch is used instead of the one specified in the request. +This watch is not persisted to the index and `record_execution` cannot be set. +- **`debug` (Optional, boolean)**: Defines whether the watch runs in debug mode. + +## client.watcher.getSettings [_watcher.get_settings] +Get Watcher index settings. +Get settings for the Watcher internal index (`.watches`). +Only a subset of settings are shown, for example `index.auto_expand_replicas` and `index.number_of_replicas`. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-get-settings) + +```ts +client.watcher.getSettings({ ... }) +``` + +### Arguments [_arguments_watcher.get_settings] + +#### Request (object) [_request_watcher.get_settings] +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.watcher.getWatch [_watcher.get_watch] +Get a watch. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-get-watch) + +```ts +client.watcher.getWatch({ id }) +``` + +### Arguments [_arguments_watcher.get_watch] + +#### Request (object) [_request_watcher.get_watch] +- **`id` (string)**: The watch identifier. + +## client.watcher.putWatch [_watcher.put_watch] +Create or update a watch. +When a watch is registered, a new document that represents the watch is added to the `.watches` index and its trigger is immediately registered with the relevant trigger engine. +Typically for the `schedule` trigger, the scheduler is the trigger engine. + +IMPORTANT: You must use Kibana or this API to create a watch. +Do not add a watch directly to the `.watches` index by using the Elasticsearch index API. +If Elasticsearch security features are enabled, do not give users write privileges on the `.watches` index. + +When you add a watch you can also define its initial active state by setting the *active* parameter. + +When Elasticsearch security features are enabled, your watch can index or search only on indices for which the user that stored the watch has privileges. +If the user is able to read index `a`, but not index `b`, the same will apply when the watch runs. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-put-watch) + +```ts +client.watcher.putWatch({ id }) +``` + +### Arguments [_arguments_watcher.put_watch] + +#### Request (object) [_request_watcher.put_watch] +- **`id` (string)**: The identifier for the watch. +- **`actions` (Optional, Record)**: The list of actions that will be run if the condition matches. +- **`condition` (Optional, { always, array_compare, compare, never, script })**: The condition that defines if the actions should be run. +- **`input` (Optional, { chain, http, search, simple })**: The input that defines the input that loads the data for the watch. +- **`metadata` (Optional, Record)**: Metadata JSON that will be copied into the history entries. +- **`throttle_period` (Optional, string \| -1 \| 0)**: The minimum time between actions being run. +The default is 5 seconds. +This default can be changed in the config file with the setting `xpack.watcher.throttle.period.default_period`. +If both this value and the `throttle_period_in_millis` parameter are specified, Watcher uses the last parameter included in the request. +- **`throttle_period_in_millis` (Optional, Unit)**: Minimum time in milliseconds between actions being run. Defaults to 5000. If both this value and the throttle_period parameter are specified, Watcher uses the last parameter included in the request. +- **`transform` (Optional, { chain, script, search })**: The transform that processes the watch payload to prepare it for the watch actions. +- **`trigger` (Optional, { schedule })**: The trigger that defines when the watch should run. +- **`active` (Optional, boolean)**: The initial state of the watch. +The default value is `true`, which means the watch is active by default. +- **`if_primary_term` (Optional, number)**: only update the watch if the last operation that has changed the watch has the specified primary term +- **`if_seq_no` (Optional, number)**: only update the watch if the last operation that has changed the watch has the specified sequence number +- **`version` (Optional, number)**: Explicit version number for concurrency control + +## client.watcher.queryWatches [_watcher.query_watches] +Query watches. +Get all registered watches in a paginated manner and optionally filter watches by a query. + +Note that only the `_id` and `metadata.*` fields are queryable or sortable. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-query-watches) + +```ts +client.watcher.queryWatches({ ... }) +``` + +### Arguments [_arguments_watcher.query_watches] + +#### Request (object) [_request_watcher.query_watches] +- **`from` (Optional, number)**: The offset from the first result to fetch. +It must be non-negative. +- **`size` (Optional, number)**: The number of hits to return. +It must be non-negative. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: A query that filters the watches to be returned. +- **`sort` (Optional, string \| { _score, _doc, _geo_distance, _script } \| string \| { _score, _doc, _geo_distance, _script }[])**: One or more fields used to sort the search results. +- **`search_after` (Optional, number \| number \| string \| boolean \| null[])**: Retrieve the next page of hits using a set of sort values from the previous page. + +## client.watcher.start [_watcher.start] +Start the watch service. +Start the Watcher service if it is not already running. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-start) + +```ts +client.watcher.start({ ... }) +``` + +### Arguments [_arguments_watcher.start] + +#### Request (object) [_request_watcher.start] +- **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. + +## client.watcher.stats [_watcher.stats] +Get Watcher statistics. +This API always returns basic metrics. +You retrieve more metrics by using the metric parameter. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-stats) + +```ts +client.watcher.stats({ ... }) +``` + +### Arguments [_arguments_watcher.stats] + +#### Request (object) [_request_watcher.stats] +- **`metric` (Optional, Enum("_all" \| "queued_watches" \| "current_watches" \| "pending_watches") \| Enum("_all" \| "queued_watches" \| "current_watches" \| "pending_watches")[])**: Defines which additional metrics are included in the response. +- **`emit_stacktraces` (Optional, boolean)**: Defines whether stack traces are generated for each watch that is running. + +## client.watcher.stop [_watcher.stop] +Stop the watch service. +Stop the Watcher service if it is running. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-stop) + +```ts +client.watcher.stop({ ... }) +``` + +### Arguments [_arguments_watcher.stop] + +#### Request (object) [_request_watcher.stop] +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. + +## client.watcher.updateSettings [_watcher.update_settings] +Update Watcher index settings. +Update settings for the Watcher internal index (`.watches`). +Only a subset of settings can be modified. +This includes `index.auto_expand_replicas`, `index.number_of_replicas`, `index.routing.allocation.exclude.*`, +`index.routing.allocation.include.*` and `index.routing.allocation.require.*`. +Modification of `index.routing.allocation.include._tier_preference` is an exception and is not allowed as the +Watcher shards must always be in the `data_content` tier. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-update-settings) + +```ts +client.watcher.updateSettings({ ... }) +``` + +### Arguments [_arguments_watcher.update_settings] + +#### Request (object) [_request_watcher.update_settings] +- **`index.auto_expand_replicas` (Optional, string)** +- **`index.number_of_replicas` (Optional, number)** +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string \| -1 \| 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.xpack.info [_xpack.info] +Get information. +The information provided by the API includes: + +* Build information including the build number and timestamp. +* License information about the currently installed license. +* Feature information for the features that are currently enabled and available under the current license. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-info) + +```ts +client.xpack.info({ ... }) +``` + +### Arguments [_arguments_xpack.info] + +#### Request (object) [_request_xpack.info] +- **`categories` (Optional, Enum("build" \| "features" \| "license")[])**: A list of the information categories to include in the response. +For example, `build,license,features`. +- **`accept_enterprise` (Optional, boolean)**: If this param is used it must be set to true +- **`human` (Optional, boolean)**: Defines whether additional human-readable information is included in the response. +In particular, it adds descriptions and a tag line. + +## client.xpack.usage [_xpack.usage] +Get usage information. +Get information about the features that are currently enabled and available under the current license. +The API also provides some usage statistics. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-xpack) + +```ts +client.xpack.usage({ ... }) +``` + +### Arguments [_arguments_xpack.usage] + +#### Request (object) [_request_xpack.usage] +- **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. + diff --git a/docs/reference/as_stream_examples.md b/docs/reference/as_stream_examples.md new file mode 100644 index 000000000..6e678bd02 --- /dev/null +++ b/docs/reference/as_stream_examples.md @@ -0,0 +1,103 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/as_stream_examples.html +--- + +# asStream [as_stream_examples] + +Instead of getting the parsed body back, you will get the raw Node.js stream of data. + +```js +'use strict' + +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) + +async function run () { + const bulkResponse = await client.bulk({ + refresh: true, + operations: [ + // operation to perform + { index: { _index: 'game-of-thrones' } }, + // the document to index + { + character: 'Ned Stark', + quote: 'Winter is coming.' + }, + + { index: { _index: 'game-of-thrones' } }, + { + character: 'Daenerys Targaryen', + quote: 'I am the blood of the dragon.' + }, + + { index: { _index: 'game-of-thrones' } }, + { + character: 'Tyrion Lannister', + quote: 'A mind needs books like a sword needs a whetstone.' + } + ] + }) + + if (bulkResponse.errors) { + console.log(bulkResponse) + process.exit(1) + } + + // Let's search! + const result = await client.search({ + index: 'game-of-thrones', + query: { + match: { + quote: 'winter' + } + } + }, { + asStream: true + }) + + let payload = '' + result.setEncoding('utf8') + for await (const chunk of result) { + payload += chunk + } + console.log(JSON.parse(payload)) +} + +run().catch(console.log) +``` + +::::{tip} +This can be useful if you need to pipe the {{es}}'s response to a proxy, or send it directly to another source. +:::: + + +```js +'use strict' + +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) +const fastify = require('fastify')() + +fastify.post('/search/:index', async (req, reply) => { + const { body, statusCode, headers } = await client.search({ + index: req.params.index, + ...req.body + }, { + asStream: true, + meta: true + }) + + reply.code(statusCode).headers(headers) + return body +}) + +fastify.listen(3000) +``` + diff --git a/docs/reference/basic-config.md b/docs/reference/basic-config.md new file mode 100644 index 000000000..db4c5499c --- /dev/null +++ b/docs/reference/basic-config.md @@ -0,0 +1,381 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/basic-config.html +--- + +# Basic configuration [basic-config] + +This page explains the basic configuration options for the JavaScript client. + +```js +const { Client } = require('@elastic/elasticsearch') + +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' }, + maxRetries: 5, + sniffOnStart: true +}) +``` + +### `node` or `nodes` + +The {{es}} endpoint to use. It can be a single string or an array of strings: + +```js +node: 'http://localhost:9200/' +``` + +```js +nodes: ['http://localhost:9200/', 'http://localhost:9201/'] +``` + +Or it can be an object (or an array of objects) that represents the node: + +```js +node: { + url: new URL('http://localhost:9200/'), + tls: 'tls options', + agent: 'http agent options', + id: 'custom node id', + headers: { 'custom': 'headers' }, + roles: { + master: true, + data: true, + ingest: true, + ml: false + } +} +``` + +--- + +### `auth` + +Default: `null` + +Your authentication data. You can use both basic authentication and [ApiKey](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key). + See [Authentication](/reference/connecting.md#authentication) for more details. + +Basic authentication: + +```js +auth: { + username: 'elastic', + password: 'changeme' +} +``` + +[ApiKey](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key) authentication: + +```js +auth: { + apiKey: 'base64EncodedKey' +} +``` + +Bearer authentication, useful for [service account tokens](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-service-token). Be aware that it does not handle automatic token refresh: + +```js +auth: { + bearer: 'token' +} +``` + +### `maxRetries` + +Type: `number`
+Default: `3` + +Max number of retries for each request. + +### `requestTimeout` + +Type: `number`
+Default: `No value` + +Max request timeout in milliseconds for each request. + +### `pingTimeout` + +Type: `number`
+Default: `3000` + +Max ping request timeout in milliseconds for each request. + +### `sniffInterval` + +Type: `number, boolean`
+Default: `false` + +Perform a sniff operation every `n` milliseconds. + +:::{tip} +Sniffing might not be the best solution. Before using the various `sniff` options, review this [blog post](https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how). +::: + +### `sniffOnStart` + +Type: `boolean`
+Default: `false` + +Perform a sniff once the client is started. Be sure to review the sniffing best practices [blog post](https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how). + +### `sniffEndpoint` + +Type: `string`
+Default: `'_nodes/_all/http'` + +Endpoint to ping during a sniff. Be sure to review the sniffing best practices [blog post](https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how). + +### `sniffOnConnectionFault` + +Type: `boolean`
+Default: `false` + +Perform a sniff on connection fault. Be sure to review the sniffing best practices [blog post](https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how). + +### `resurrectStrategy` + +Type: `string`
+Default: `'ping'` + +Configure the node resurrection strategy.
+Options: `'ping'`, `'optimistic'`, `'none'` + +### `suggestCompression` + +Type: `boolean`
+Default: `false` + +Adds an `accept-encoding` header to every request. + +### `compression` + +Type: `string, boolean`
+Default: `false` + +Enables gzip request body compression.
+Options: `'gzip'`, `false` + +### `tls` + +Type: `http.SecureContextOptions`
+Default: `null` + +The [tls configuraton](https://nodejs.org/api/tls.html). + +### `proxy` + +Type: `string, URL`
+Default: `null` + +If you are using an http(s) proxy, you can put its url here. The client will automatically handle the connection to it. + +```js +const client = new Client({ + node: 'http://localhost:9200/', + proxy: 'http://localhost:8080/' +}) + +const client = new Client({ + node: 'http://localhost:9200/', + proxy: 'http://user:pwd@localhost:8080/' +}) +``` + +### `agent` [agent-config] + +Type: `http.AgentOptions, undici.PoolOptions, function, false`
+Default: `null` + +If using the default `UndiciConnection` from `@elastic/transport`, this value can be: + +- an [Undici `PoolOptions` object](https://undici.nodejs.org/#/docs/api/Pool?id=parameter-pooloptions) +- a function that receives all connection-related options and returns an [Undici `Agent`](https://undici.nodejs.org/#/docs/api/Agent.md) instance (or any other object that follows [Undici's `Dispatch.request()`](https://undici.nodejs.org/#/docs/api/Dispatcher?id=dispatcherrequestoptions-callback) conventions) + +If using the legacy `HttpConnection` from `@elastic/transport`, this value can be: + +- [the options object passed to an `http.Agent`](https://nodejs.org/api/http.html#new-agentoptions) +- a function that returns an `http.Agent` (and thus also an [`https.Agent`](https://nodejs.org/api/https.html#class-httpsagent), or any implementation that follows the same conventions, like [`hpagent`](https://www.npmjs.com/package/hpagent)) +- `false` to disable all agent usage, including the `keep-alive` feature + +```js +const client = new Client({ + node: 'http://localhost:9200/', + agent: { agent: 'options' } +}) + +const client = new Client({ + node: 'http://localhost:9200/', + // the function takes as parameter the option + // object passed to the Connection constructor + agent: (opts) => new CustomAgent() +}) + +const client = new Client({ + node: 'http://localhost:9200/', + // Disable agent and keep-alive + agent: false +}) +``` + +::::{warning} +If you have set [the `agent` option](/reference/basic-config.md#agent-config) on your client instance to a function and are using `UndiciConnection`—the default [`Connection`](/reference/advanced-config.md#_connection) value starting in 8.0—all `caFingerprint` and `tls` options will be ignored. It is your responsibility to ensure that your custom agent will properly verify HTTPS connections. +:::: + +### `nodeFilter` + +Type: `function` + +Filter that indicates whether a node should be used for a request. Default function definition: + +```js +function defaultNodeFilter (conn) { + if (conn.roles != null) { + if ( + // avoid master-only nodes + conn.roles.master && + !conn.roles.data && + !conn.roles.ingest && + !conn.roles.ml + ) return false + } + return true +} +``` + +### `nodeSelector` + +Type: `function`
+Default: `'round-robin'` + +Custom selection strategy.
+Options: `'round-robin'`, `'random'`, custom function + +Custom function example: + +```js +function nodeSelector (connections) { + const index = calculateIndex() + return connections[index] +} +``` + +### `generateRequestId` + +Type: `function`
+ +function to generate the request id for every request, it takes two parameters, the request parameters and options. By default, it generates an incremental integer for every request. + +Custom function example: + +```js +function generateRequestId (params, options) { + // your id generation logic + // must be syncronous + return 'id' +} +``` + +### `name` + +Type: `string, symbol`
+Default: `elasticsearch-js` + +The name to identify the client instance in the events. + +### `opaqueIdPrefix` + +Type: `string`
+Default: `null` + +A string that will be use to prefix any `X-Opaque-Id` header. +See [`X-Opaque-Id` support](/reference/observability.md#_x_opaque_id_support) for more details. + +### `headers` + +Type: `object`
+Default: `{}` + +A set of custom headers to send in every request. + +### `context` + +Type: `object`
+Default: `null` + +A custom object that you can use for observability in your events. It will be merged with the API level context option. + +### `enableMetaHeader` + +Type: `boolean`
+Default: `true` + +If true, adds an header named `'x-elastic-client-meta'`, containing some minimal telemetry data, such as the client and platform version. + +### `cloud` + +Type: `object`
+Default: `null` + +Custom configuration for connecting to [Elastic Cloud](https://cloud.elastic.co). See [Authentication](/reference/connecting.md) for more details. + +Cloud configuration example: + +```js +const client = new Client({ + cloud: { + id: '' + }, + auth: { + username: 'elastic', + password: 'changeme' + } +}) +``` + +### `disablePrototypePoisoningProtection` + +Default: `true` + +`boolean`, `'proto'`, `'constructor'` - The client can protect you against prototype poisoning attacks. For more information, refer to [Square Brackets are the Enemy](https://web.archive.org/web/20200319091159/https://hueniverse.com/square-brackets-are-the-enemy-ff5b9fd8a3e8?gi=184a27ee2a08). If needed, you can enable prototype poisoning protection entirely (`false`) or one of the two checks (`'proto'` or `'constructor'`). For performance reasons, it is disabled by default. To learn more, refer to the [`secure-json-parse` documentation](https://github.com/fastify/secure-json-parse). + +### `caFingerprint` + +Type: `string`
+Default: `null` + +If configured, verify that the fingerprint of the CA certificate that has signed the certificate of the server matches the supplied fingerprint. Only accepts SHA256 digest fingerprints. + +### `maxResponseSize` + +Type: `number`
+Default: `null` + +When configured, `maxResponseSize` verifies that the uncompressed response size is lower than the configured number. If it’s higher, the request will be canceled. The `maxResponseSize` cannot be higher than the value of `buffer.constants.MAX_STRING_LENGTH`. + +### `maxCompressedResponseSize` + +Type: `number`
+Default: `null` + +When configured, `maxCompressedResponseSize` verifies that the compressed response size is lower than the configured number. If it’s higher, the request will be canceled. The `maxCompressedResponseSize` cannot be higher than the value of `buffer.constants.MAX_STRING_LENGTH`. + +### `redaction` + +Type: `object`
+Default: A configuration that will replace known sources of sensitive data in `Error` metadata + +Options for how to redact potentially sensitive data from metadata attached to `Error` objects + +::::{note} +[Read about redaction](/reference/advanced-config.md#redaction) for more details +:::: + +### `serverMode` + +Type: `string`
+Default: `"stack"` + +Setting to `"stack"` sets defaults assuming a traditional (non-serverless) {{es}} instance. Setting to `"serverless"` sets defaults to work more seamlessly with [Elastic Cloud Serverless](https://www.elastic.co/guide/en/serverless/current/intro.html), like enabling compression and disabling features that assume the possibility of multiple {{es}} nodes. diff --git a/docs/reference/bulk_examples.md b/docs/reference/bulk_examples.md new file mode 100644 index 000000000..51b53e6c7 --- /dev/null +++ b/docs/reference/bulk_examples.md @@ -0,0 +1,99 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/bulk_examples.html +--- + +# Bulk [bulk_examples] + +With the [`bulk` API](/reference/api-reference.md#_bulk), you can perform multiple index/delete operations in a single API call. The `bulk` API significantly increases indexing speed. + +::::{note} +You can also use the [bulk helper](/reference/client-helpers.md#bulk-helper). +:::: + + +```js +'use strict' + +require('array.prototype.flatmap').shim() +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) + +async function run () { + await client.indices.create({ + index: 'tweets', + operations: { + mappings: { + properties: { + id: { type: 'integer' }, + text: { type: 'text' }, + user: { type: 'keyword' }, + time: { type: 'date' } + } + } + } + }, { ignore: [400] }) + + const dataset = [{ + id: 1, + text: 'If I fall, don\'t bring me back.', + user: 'jon', + time: new Date() + }, { + id: 2, + text: 'Winter is coming', + user: 'ned', + time: new Date() + }, { + id: 3, + text: 'A Lannister always pays his debts.', + user: 'tyrion', + time: new Date() + }, { + id: 4, + text: 'I am the blood of the dragon.', + user: 'daenerys', + time: new Date() + }, { + id: 5, // change this value to a string to see the bulk response with errors + text: 'A girl is Arya Stark of Winterfell. And I\'m going home.', + user: 'arya', + time: new Date() + }] + + const operations = dataset.flatMap(doc => [{ index: { _index: 'tweets' } }, doc]) + + const bulkResponse = await client.bulk({ refresh: true, operations }) + + if (bulkResponse.errors) { + const erroredDocuments = [] + // The items array has the same order of the dataset we just indexed. + // The presence of the `error` key indicates that the operation + // that we did for the document has failed. + bulkResponse.items.forEach((action, i) => { + const operation = Object.keys(action)[0] + if (action[operation].error) { + erroredDocuments.push({ + // If the status is 429 it means that you can retry the document, + // otherwise it's very likely a mapping error, and you should + // fix the document before to try it again. + status: action[operation].status, + error: action[operation].error, + operation: operations[i * 2], + document: operations[i * 2 + 1] + }) + } + }) + console.log(erroredDocuments) + } + + const count = await client.count({ index: 'tweets' }) + console.log(count) +} + +run().catch(console.log) +``` + diff --git a/docs/reference/child.md b/docs/reference/child.md new file mode 100644 index 000000000..bbebed573 --- /dev/null +++ b/docs/reference/child.md @@ -0,0 +1,34 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/child.html +--- + +# Creating a child client [child] + +There are some use cases where you may need multiple instances of the client. You can easily do that by calling `new Client()` as many times as you need, but you will lose all the benefits of using one single client, such as the long living connections and the connection pool handling. To avoid this problem, the client offers a `child` API, which returns a new client instance that shares the connection pool with the parent client. + +::::{note} +The event emitter is shared between the parent and the child(ren). If you extend the parent client, the child client will have the same extensions, while if the child client adds an extension, the parent client will not be extended. +:::: + + +You can pass to the `child` every client option you would pass to a normal client, but the connection pool specific options (`ssl`, `agent`, `pingTimeout`, `Connection`, and `resurrectStrategy`). + +::::{warning} +If you call `close` in any of the parent/child clients, every client will be closed. +:::: + + +```js +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) +const child = client.child({ + headers: { 'x-foo': 'bar' }, +}) + +client.info().then(console.log, console.log) +child.info().then(console.log, console.log) +``` diff --git a/docs/reference/client-helpers.md b/docs/reference/client-helpers.md new file mode 100644 index 000000000..27b251242 --- /dev/null +++ b/docs/reference/client-helpers.md @@ -0,0 +1,648 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-helpers.html +--- + +# Client helpers [client-helpers] + +The client comes with an handy collection of helpers to give you a more comfortable experience with some APIs. + +::::{warning} +The client helpers are experimental, and the API may change in the next minor releases. The helpers will not work in any Node.js version lower than 10. +:::: + +## Bulk helper [bulk-helper] + +Added in `v7.7.0` + +Running bulk requests can be complex due to the shape of the API, this helper aims to provide a nicer developer experience around the Bulk API. + +### Usage [_usage_3] + +```js +const { createReadStream } = require('fs') +const split = require('split2') +const { Client } = require('@elastic/elasticsearch') + +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) +const result = await client.helpers.bulk({ + datasource: createReadStream('./dataset.ndjson').pipe(split()), + onDocument (doc) { + return { + index: { _index: 'my-index' } + } + } +}) + +console.log(result) +// { +// total: number, +// failed: number, +// retry: number, +// successful: number, +// time: number, +// bytes: number, +// aborted: boolean +// } +``` + +To create a new instance of the Bulk helper, access it as shown in the example above, the configuration options are: + +`datasource` +: An array, async generator or a readable stream with the data you need to index/create/update/delete. It can be an array of strings or objects, but also a stream of json strings or JavaScript objects. + If it is a stream, we recommend to use the [`split2`](https://www.npmjs.com/package/split2) package, that splits the stream on new lines delimiters. + This parameter is mandatory. + + ```js + const { createReadStream } = require('fs') + const split = require('split2') + const b = client.helpers.bulk({ + // if you just use split(), the data will be used as array of strings + datasource: createReadStream('./dataset.ndjson').pipe(split()) + // if you need to manipulate the data, you can pass JSON.parse to split + datasource: createReadStream('./dataset.ndjson').pipe(split(JSON.parse)) + }) + ``` + +`onDocument` +: A function that is called for each document of the datasource. Inside this function you can manipulate the document and you must return the operation you want to execute with the document. Look at the [Bulk API documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk) to see the supported operations. + This parameter is mandatory. + + ```js + const b = client.helpers.bulk({ + onDocument (doc) { + return { + index: { _index: 'my-index' } + } + } + }) + ``` + +`onDrop` +: A function that is called for everytime a document can’t be indexed and it has reached the maximum amount of retries. + + ```js + const b = client.helpers.bulk({ + onDrop (doc) { + console.log(doc) + } + }) + ``` + +`onSuccess` +: A function that is called for each successful operation in the bulk request, which includes the result from {{es}} along with the original document that was sent, or `null` for delete operations. + + ```js + const b = client.helpers.bulk({ + onSuccess ({ result, document }) { + console.log(`SUCCESS: Document ${result.index._id} indexed to ${result.index._index}`) + } + }) + ``` + +`flushBytes` +: The size of the bulk body in bytes to reach before to send it. Default of 5MB. + *Default:* `5000000` + + ```js + const b = client.helpers.bulk({ + flushBytes: 1000000 + }) + ``` + +`flushInterval` +: How much time (in milliseconds) the helper waits before flushing the body from the last document read. + *Default:* `30000` + + ```js + const b = client.helpers.bulk({ + flushInterval: 30000 + }) + ``` + +`concurrency` +: How many request is executed at the same time. + *Default:* `5` + + ```js + const b = client.helpers.bulk({ + concurrency: 10 + }) + ``` + +`retries` +: How many times a document is retried before to call the `onDrop` callback. + *Default:* Client max retries. + + ```js + const b = client.helpers.bulk({ + retries: 3 + }) + ``` + +`wait` +: How much time to wait before retries in milliseconds. + *Default:* 5000. + + ```js + const b = client.helpers.bulk({ + wait: 3000 + }) + ``` + +`refreshOnCompletion` +: If `true`, at the end of the bulk operation it runs a refresh on all indices or on the specified indices. + *Default:* false. + + ```js + const b = client.helpers.bulk({ + refreshOnCompletion: true + // or + refreshOnCompletion: 'index-name' + }) + ``` + +### Supported operations [_supported_operations] + +#### Index [_index_2] + +```js +client.helpers.bulk({ + datasource: myDatasource, + onDocument (doc) { + return { + index: { _index: 'my-index' } + } + } +}) +``` + +#### Create [_create_4] + +```js +client.helpers.bulk({ + datasource: myDatasource, + onDocument (doc) { + return { + create: { _index: 'my-index', _id: doc.id } + } + } +}) +``` + +#### Update [_update_3] + +```js +client.helpers.bulk({ + datasource: myDatasource, + onDocument (doc) { + // Note that the update operation requires you to return + // an array, where the first element is the action, while + // the second are the document option + return [ + { update: { _index: 'my-index', _id: doc.id } }, + { doc_as_upsert: true } + ] + } +}) +``` + +#### Delete [_delete_10] + +```js +client.helpers.bulk({ + datasource: myDatasource, + onDocument (doc) { + return { + delete: { _index: 'my-index', _id: doc.id } + } + } +}) +``` + +### Abort a bulk operation [_abort_a_bulk_operation] + +If needed, you can abort a bulk operation at any time. The bulk helper returns a [thenable](https://promisesaplus.com/), which has an `abort` method. + +::::{note} +The abort method stops the execution of the bulk operation, but if you are using a concurrency higher than one, the operations that are already running will not be stopped. +:::: + +```js +const { createReadStream } = require('fs') +const split = require('split2') +const { Client } = require('@elastic/elasticsearch') + +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) +const b = client.helpers.bulk({ + datasource: createReadStream('./dataset.ndjson').pipe(split()), + onDocument (doc) { + return { + index: { _index: 'my-index' } + } + }, + onDrop (doc) { + b.abort() + } +}) + +console.log(await b) +``` + +### Passing custom options to the Bulk API [_passing_custom_options_to_the_bulk_api] + +You can pass any option supported by the link: [Bulk API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk) to the helper, and the helper uses those options in conjunction with the Bulk API call. + +```js +const result = await client.helpers.bulk({ + datasource: [...], + onDocument (doc) { + return { + index: { _index: 'my-index' } + } + }, + pipeline: 'my-pipeline' +}) +``` + +### Usage with an async generator [_usage_with_an_async_generator] + +```js +const { Client } = require('@elastic/elasticsearch') + +async function * generator () { + const dataset = [ + { user: 'jon', age: 23 }, + { user: 'arya', age: 18 }, + { user: 'tyrion', age: 39 } + ] + for (const doc of dataset) { + yield doc + } +} + +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) +const result = await client.helpers.bulk({ + datasource: generator(), + onDocument (doc) { + return { + index: { _index: 'my-index' } + } + } +}) + +console.log(result) +``` + +### Modifying a document before operation [_modifying_a_document_before_operation] + +Added in `v8.8.2` + +If you need to modify documents in your datasource before it is sent to {{es}}, you can return an array in the `onDocument` function rather than an operation object. The first item in the array must be the operation object, and the second item must be the document or partial document object as you’d like it to be sent to {{es}}. + +```js +const { Client } = require('@elastic/elasticsearch') + +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) +const result = await client.helpers.bulk({ + datasource: [...], + onDocument (doc) { + return [ + { index: { _index: 'my-index' } }, + { ...doc, favorite_color: 'mauve' }, + ] + } +}) + +console.log(result) +``` + +## Multi search helper [multi-search-helper] + +Added in `v7.8.0` + +If you send search request at a high rate, this helper might be useful for you. It uses the multi search API under the hood to batch the requests and improve the overall performances of your application. The `result` exposes a `documents` property as well, which allows you to access directly the hits sources. + +### Usage [_usage_4] + +```js +const { Client } = require('@elastic/elasticsearch') + +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) +const m = client.helpers.msearch() + +m.search( + { index: 'stackoverflow' }, + { query: { match: { title: 'javascript' } } } + ) + .then(result => console.log(result.body)) // or result.documents + .catch(err => console.error(err)) +``` + +To create a new instance of the multi search (msearch) helper, you should access it as shown in the example above, the configuration options are: + +`operations` +: How many search operations should be sent in a single msearch request. + *Default:* `5` + + ```js + const m = client.helpers.msearch({ + operations: 10 + }) + ``` + +`flushInterval` +: How much time (in milliseconds) the helper waits before flushing the operations from the last operation read. + *Default:* `500` + + ```js + const m = client.helpers.msearch({ + flushInterval: 500 + }) + ``` + +`concurrency` +: How many request is executed at the same time. + *Default:* `5` + + ```js + const m = client.helpers.msearch({ + concurrency: 10 + }) + ``` + +`retries` +: How many times an operation is retried before to resolve the request. An operation is retried only in case of a 429 error. + *Default:* Client max retries. + + ```js + const m = client.helpers.msearch({ + retries: 3 + }) + ``` + +`wait` +: How much time to wait before retries in milliseconds. + *Default:* 5000. + + ```js + const m = client.helpers.msearch({ + wait: 3000 + }) + ``` + +### Stopping the msearch helper [_stopping_the_msearch_helper] + +If needed, you can stop an msearch processor at any time. The msearch helper returns a [thenable](https://promisesaplus.com/), which has an `stop` method. + +If you are creating multiple msearch helpers instances and using them for a limitied period of time, remember to always use the `stop` method once you have finished using them, otherwise your application will start leaking memory. + +The `stop` method accepts an optional error, that will be dispatched every subsequent search request. + +::::{note} +The stop method stops the execution of the msearch processor, but if you are using a concurrency higher than one, the operations that are already running will not be stopped. +:::: + +```js +const { Client } = require('@elastic/elasticsearch') + +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) +const m = client.helpers.msearch() + +m.search( + { index: 'stackoverflow' }, + { query: { match: { title: 'javascript' } } } + ) + .then(result => console.log(result.body)) + .catch(err => console.error(err)) + +m.search( + { index: 'stackoverflow' }, + { query: { match: { title: 'ruby' } } } + ) + .then(result => console.log(result.body)) + .catch(err => console.error(err)) + +setImmediate(() => m.stop()) +``` + +## Search helper [search-helper] + +Added in `v7.7.0` + +A simple wrapper around the search API. Instead of returning the entire `result` object it returns only the search documents source. For improving the performances, this helper automatically adds `filter_path=hits.hits._source` to the query string. + +```js +const documents = await client.helpers.search({ + index: 'stackoverflow', + query: { + match: { + title: 'javascript' + } + } +}) + +for (const doc of documents) { + console.log(doc) +} +``` + +## Scroll search helper [scroll-search-helper] + +Added in `v7.7.0` + +This helpers offers a simple and intuitive way to use the scroll search API. Once called, it returns an [async iterator](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/async_function) which can be used in conjuction with a for-await…​of. It handles automatically the `429` error and uses the `maxRetries` option of the client. + +```js +const scrollSearch = client.helpers.scrollSearch({ + index: 'stackoverflow', + query: { + match: { + title: 'javascript' + } + } +}) + +for await (const result of scrollSearch) { + console.log(result) +} +``` + +### Clear a scroll search [_clear_a_scroll_search] + +If needed, you can clear a scroll search by calling `result.clear()`: + +```js +for await (const result of scrollSearch) { + if (condition) { + await result.clear() + } +} +``` + +### Quickly getting the documents [_quickly_getting_the_documents] + +If you only need the documents from the result of a scroll search, you can access them via `result.documents`: + +```js +for await (const result of scrollSearch) { + console.log(result.documents) +} +``` + +## Scroll documents helper [scroll-documents-helper] + +Added in `v7.7.0` + +It works in the same way as the scroll search helper, but it returns only the documents instead. Note, every loop cycle returns a single document, and you can’t use the `clear` method. For improving the performances, this helper automatically adds `filter_path=hits.hits._source` to the query string. + +```js +const scrollSearch = client.helpers.scrollDocuments({ + index: 'stackoverflow', + query: { + match: { + title: 'javascript' + } + } +}) + +for await (const doc of scrollSearch) { + console.log(doc) +} +``` + +## ES|QL helper [esql-helper] + +ES|QL queries can return their results in [several formats](docs-content://explore-analyze/query-filter/languages/esql-rest.md#esql-rest-format). The default JSON format returned by ES|QL queries contains arrays of values for each row, with column names and types returned separately: + +### Usage [_usage_5] + +#### `toRecords` [_torecords] + +Added in `v8.14.0` + +The default JSON format returned by ES|QL queries contains arrays of values for each row, with column names and types returned separately: + +```json +{ + "columns": [ + { "name": "@timestamp", "type": "date" }, + { "name": "client_ip", "type": "ip" }, + { "name": "event_duration", "type": "long" }, + { "name": "message", "type": "keyword" } + ], + "values": [ + [ + "2023-10-23T12:15:03.360Z", + "172.21.2.162", + 3450233, + "Connected to 10.1.0.3" + ], + [ + "2023-10-23T12:27:28.948Z", + "172.21.2.113", + 2764889, + "Connected to 10.1.0.2" + ] + ] +} +``` + +In many cases, it’s preferable to operate on an array of objects, one object per row, rather than an array of arrays. The ES|QL `toRecords` helper converts row data into objects. + +```js +await client.helpers + .esql({ query: 'FROM sample_data | LIMIT 2' }) + .toRecords() +// => +// { +// "columns": [ +// { "name": "@timestamp", "type": "date" }, +// { "name": "client_ip", "type": "ip" }, +// { "name": "event_duration", "type": "long" }, +// { "name": "message", "type": "keyword" } +// ], +// "records": [ +// { +// "@timestamp": "2023-10-23T12:15:03.360Z", +// "client_ip": "172.21.2.162", +// "event_duration": 3450233, +// "message": "Connected to 10.1.0.3" +// }, +// { +// "@timestamp": "2023-10-23T12:27:28.948Z", +// "client_ip": "172.21.2.113", +// "event_duration": 2764889, +// "message": "Connected to 10.1.0.2" +// }, +// ] +// } +``` + +In TypeScript, you can declare the type that `toRecords` returns: + +```ts +type EventLog = { + '@timestamp': string, + client_ip: string, + event_duration: number, + message: string, +} + +const result = await client.helpers + .esql({ query: 'FROM sample_data | LIMIT 2' }) + .toRecords() +``` + +#### `toArrowReader` [_toarrowreader] + +Added in `v8.16.0` + +ES|QL can return results in multiple binary formats, including [Apache Arrow](https://arrow.apache.org/)'s streaming format. Because it is a very efficient format to read, it can be valuable for performing high-performance in-memory analytics. And, because the response is streamed as batches of records, it can be used to produce aggregations and other calculations on larger-than-memory data sets. + +`toArrowReader` returns an [`AsyncRecordBatchStreamReader`](https://github.com/apache/arrow/blob/520ae44272d491bbb52eb3c9b84864ed7088f11a/js/src/ipc/reader.ts#L216). + +```ts +const reader = await client.helpers + .esql({ query: 'FROM sample_data' }) + .toArrowReader() + +// print each record as JSON +for await (const recordBatch of reader) { + for (const record of recordBatch) { + console.log(record.toJSON()) + } +} +``` + +#### `toArrowTable` [_toarrowtable] + +Added in `v8.16.0` + +If you would like to pull the entire data set in Arrow format but without streaming, you can use the `toArrowTable` helper to get a [Table](https://arrow.apache.org/docs/js/classes/Arrow_dom.Table.md) back instead. + +```ts +const table = await client.helpers + .esql({ query: 'FROM sample_data' }) + .toArrowTable() + +console.log(table.toArray()) +``` diff --git a/docs/reference/client-testing.md b/docs/reference/client-testing.md new file mode 100644 index 000000000..ffeb34d5e --- /dev/null +++ b/docs/reference/client-testing.md @@ -0,0 +1,121 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-testing.html +--- + +# Testing [client-testing] + +Testing is one of the most important parts of developing an application. The client is very flexible when it comes to testing and is compatible with most testing frameworks (such as [`ava`](https://www.npmjs.com/package/ava), which is used in the examples below). + +If you are using this client, you are most likely working with {{es}}, and one of the first issues you face is how to test your application. A perfectly valid solution is to use the real {{es}} instance for testing your application, but you would be doing an integration test, while you want a unit test. There are many ways to solve this problem, you could create the database with Docker, or use an in-memory compatible one, but if you are writing unit tests that can be easily parallelized this becomes quite uncomfortable. A different way of improving your testing experience while doing unit tests is to use a mock. + +The client is designed to be easy to extend and adapt to your needs. Thanks to its internal architecture it allows you to change some specific components while keeping the rest of it working as usual. Each {{es}} official client is composed of the following components: + +* `API layer`: every {{es}} API that you can call. +* `Transport`: a component that takes care of preparing a request before sending it and handling all the retry and sniffing strategies. +* `ConnectionPool`: {{es}} is a cluster and might have multiple nodes, the `ConnectionPool` takes care of them. +* `Serializer`: A class with all the serialization strategies, from the basic JSON to the new line delimited JSON. +* `Connection`: The actual HTTP library. + +The best way to mock {{es}} with the official clients is to replace the `Connection` component since it has very few responsibilities and it does not interact with other internal components other than getting requests and returning responses. + + +## `@elastic/elasticsearch-mock` [_elasticelasticsearch_mock] + +Writing each time a mock for your test can be annoying and error-prone, so we have built a simple yet powerful mocking library specifically designed for this client, and you can install it with the following command: + +```sh +npm install @elastic/elasticsearch-mock --save-dev +``` + +With this library you can create custom mocks for any request you can send to {{es}}. It offers a simple and intuitive API and it mocks only the HTTP layer, leaving the rest of the client working as usual. + +Before showing all of its features, and what you can do with it, let’s see an example: + +```js +const { Client } = require('@elastic/elasticsearch') +const Mock = require('@elastic/elasticsearch-mock') + +const mock = new Mock() +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' }, + Connection: mock.getConnection() +}) + +mock.add({ + method: 'GET', + path: '/' +}, () => { + return { status: 'ok' } +}) + +client.info().then(console.log, console.log) +``` + +As you can see it works closely with the client itself, once you have created a new instance of the mock library you just need to call the mock.getConnection() method and pass its result to the Connection option of the client. From now on, every request is handled by the mock library, and the HTTP layer will never be touched. As a result, your test is significantly faster and you are able to easily parallelize them! + +The library allows you to write both “strict” and “loose” mocks, which means that you can write a mock that handles a very specific request or be looser and handle a group of request, let’s see this in action: + +```js +mock.add({ + method: 'POST', + path: '/indexName/_search' +}, () => { + return { + hits: { + total: { value: 1, relation: 'eq' }, + hits: [{ _source: { baz: 'faz' } }] + } + } +}) + +mock.add({ + method: 'POST', + path: '/indexName/_search', + body: { query: { match: { foo: 'bar' } } } +}, () => { + return { + hits: { + total: { value: 0, relation: 'eq' }, + hits: [] + } + } +}) +``` + +In the example above, every search request gets the first response, while every search request that uses the query described in the second mock gets the second response. + +You can also specify dynamic paths: + +```js +mock.add({ + method: 'GET', + path: '/:index/_count' +}, () => { + return { count: 42 } +}) + +client.count({ index: 'foo' }).then(console.log, console.log) // => { count: 42 } +client.count({ index: 'bar' }).then(console.log, console.log) // => { count: 42 } +``` + +And wildcards are supported as well. + +Another very interesting use case is the ability to create a test that randomly fails to see how your code reacts to failures: + +```js +mock.add({ + method: 'GET', + path: '/:index/_count' +}, () => { + if (Math.random() > 0.8) { + return ResponseError({ body: {}, statusCode: 500 }) + } else { + return { count: 42 } + } +}) +``` + +We have seen how simple is mocking {{es}} and testing your application, you can find many more features and examples in the [module documentation](https://github.com/elastic/elasticsearch-js-mock). + diff --git a/docs/reference/configuration.md b/docs/reference/configuration.md new file mode 100644 index 000000000..744743ef6 --- /dev/null +++ b/docs/reference/configuration.md @@ -0,0 +1,14 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-configuration.html +--- + +# Configuration [client-configuration] + +The client is designed to be easily configured for your needs. In the following section, you can see the possible options that you can use to configure it. + +- [Basic configuration](/reference/basic-config.md) +- [Advanced configuration](/reference/advanced-config.md) +- [Timeout best practices](/reference/timeout-best-practices.md) +- [Creating a child client](/reference/child.md) +- [Testing](/reference/client-testing.md) \ No newline at end of file diff --git a/docs/reference/connecting.md b/docs/reference/connecting.md new file mode 100644 index 000000000..966fe687c --- /dev/null +++ b/docs/reference/connecting.md @@ -0,0 +1,562 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-connecting.html +--- + +# Connecting [client-connecting] + +This page contains the information you need to connect and use the Client with {{es}}. + +## Authentication [authentication] + +This document contains code snippets to show you how to connect to various {{es}} providers. + +### Elastic Cloud [auth-ec] + +If you are using [Elastic Cloud](https://www.elastic.co/cloud), the client offers an easy way to connect to it via the `cloud` option. You must pass the Cloud ID that you can find in the cloud console, then your username and password inside the `auth` option. + +::::{note} +When connecting to Elastic Cloud, the client will automatically enable both request and response compression by default, since it yields significant throughput improvements. Moreover, the client will also set the tls option `secureProtocol` to `TLSv1_2_method` unless specified otherwise. You can still override this option by configuring them. +:::: + +::::{important} +Do not enable sniffing when using Elastic Cloud, since the nodes are behind a load balancer, Elastic Cloud will take care of everything for you. Take a look [here](https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how) to know more. +:::: + +```js +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + cloud: { + id: '' + }, + auth: { + username: 'elastic', + password: 'changeme' + } +}) +``` + +## Connecting to an Elastic Cloud Serverless instance [connect-serverless] + +The Node.js client is built to support connecting to [Elastic Cloud Serverless](https://www.elastic.co/guide/en/serverless/current/intro.html). By setting the `serverMode` option to `"serverless"`, several default options will be modified to better suit the serverless environment. + +```js +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + cloud: { + id: '' + }, + auth: { + username: 'elastic', + password: 'changeme' + }, + serverMode: 'serverless' +}) + +``` + +## Connecting to a self-managed cluster [connect-self-managed-new] + +By default {{es}} will start with security features like authentication and TLS enabled. To connect to the {{es}} cluster you’ll need to configure the Node.js {{es}} client to use HTTPS with the generated CA certificate in order to make requests successfully. + +If you’re just getting started with {{es}} we recommend reading the documentation on [configuring](docs-content://deploy-manage/deploy/self-managed/configure-elasticsearch.md) and [starting {{es}}](docs-content://deploy-manage/maintenance/start-stop-services/start-stop-elasticsearch.md) to ensure your cluster is running as expected. + +When you start {{es}} for the first time you’ll see a distinct block like the one below in the output from {{es}} (you may have to scroll up if it’s been a while): + +```sh +-> Elasticsearch security features have been automatically configured! +-> Authentication is enabled and cluster connections are encrypted. + +-> Password for the elastic user (reset with `bin/elasticsearch-reset-password -u elastic`): + lhQpLELkjkrawaBoaz0Q + +-> HTTP CA certificate SHA-256 fingerprint: + a52dd93511e8c6045e21f16654b77c9ee0f34aea26d9f40320b531c474676228 +... +``` + +Depending on the circumstances there are two options for verifying the HTTPS connection, either verifying with the CA certificate itself or via the HTTP CA certificate fingerprint. + +::::{warning} +If you have set [the `agent` option](/reference/basic-config.md#agent-config) on your client instance to a function and are using `UndiciConnection`—the default `Connection` value starting in 8.0—all `caFingerprint` and `tls` options will be ignored. It is your responsibility to ensure that your custom agent will properly verify HTTPS connections. +:::: + +### TLS configuration [auth-tls] + +The generated root CA certificate can be found in the `certs` directory in your {{es}} config location (`$ES_CONF_PATH/certs/http_ca.crt`). If you’re running {{es}} in Docker there is [additional documentation for retrieving the CA certificate](docs-content://deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md). + +Without any additional configuration you can specify `https://` node urls, and the certificates used to sign these requests will be verified. To turn off certificate verification, you must specify an `tls` object in the top level config and set `rejectUnauthorized: false`. The default `tls` values are the same that Node.js’s [`tls.connect()`](https://nodejs.org/api/tls.md#tls_tls_connect_options_callback) uses. + +```js +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + node: 'https://localhost:9200/', + auth: { + username: 'elastic', + password: 'changeme' + }, + tls: { + ca: fs.readFileSync('./http_ca.crt'), + rejectUnauthorized: false + } +}) +``` + +### CA fingerprint [auth-ca-fingerprint] + +You can configure the client to only trust certificates that are signed by a specific CA certificate (CA certificate pinning) by providing a `caFingerprint` option. This will verify that the fingerprint of the CA certificate that has signed the certificate of the server matches the supplied value. You must configure a SHA256 digest. + +```js +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + node: 'https://example.com/' + auth: { ... }, + // the fingerprint (SHA256) of the CA certificate that is used to sign + // the certificate that the Elasticsearch node presents for TLS. + caFingerprint: '20:0D:CA:FA:76:...', + tls: { + // might be required if it's a self-signed certificate + rejectUnauthorized: false + } +}) +``` + +The certificate fingerprint can be calculated using `openssl x509` with the certificate file: + +```sh +openssl x509 -fingerprint -sha256 -noout -in /path/to/http_ca.crt +``` + +If you don’t have access to the generated CA file from {{es}} you can use the following script to output the root CA fingerprint of the {{es}} instance with `openssl s_client`: + +```sh +# Replace the values of 'localhost' and '9200' to the +# corresponding host and port values for the cluster. +openssl s_client -connect localhost:9200 -servername localhost -showcerts /dev/null \ + | openssl x509 -fingerprint -sha256 -noout -in /dev/stdin +``` + +The output of `openssl x509` will look something like this: + +```sh +SHA256 Fingerprint=A5:2D:D9:35:11:E8:C6:04:5E:21:F1:66:54:B7:7C:9E:E0:F3:4A:EA:26:D9:F4:03:20:B5:31:C4:74:67:62:28 +``` + +## Connecting without security enabled [connect-no-security] + +::::{warning} +Running {{es}} without security enabled is not recommended. +:::: + +If your cluster is configured with [security explicitly disabled](elasticsearch://reference/elasticsearch/configuration-reference/security-settings.md) then you can connect via HTTP: + +```js +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + node: 'http://example.com/' +}) +``` + +## Authentication strategies [auth-strategies] + +Following you can find all the supported authentication strategies. + +### ApiKey authentication [auth-apikey] + +You can use the [ApiKey](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key) authentication by passing the `apiKey` parameter via the `auth` option. The `apiKey` parameter can be either a base64 encoded string or an object with the values that you can obtain from the [create api key endpoint](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key). + +::::{note} +If you provide both basic authentication credentials and the ApiKey configuration, the ApiKey takes precedence. +:::: + +```js +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + node: 'https://localhost:9200/', + auth: { + apiKey: 'base64EncodedKey' + } +}) +``` + +```js +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + node: 'https://localhost:9200/', + auth: { + apiKey: { + id: 'foo', + api_key: 'bar' + } + } +}) +``` + +### Bearer authentication [auth-bearer] + +You can provide your credentials by passing the `bearer` token parameter via the `auth` option. Useful for [service account tokens](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-service-token). Be aware that it does not handle automatic token refresh. + +```js +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + node: 'https://localhost:9200/', + auth: { + bearer: 'token' + } +}) +``` + +### Basic authentication [auth-basic] + +You can provide your credentials by passing the `username` and `password` parameters via the `auth` option. + +::::{note} +If you provide both basic authentication credentials and the Api Key configuration, the Api Key will take precedence. +:::: + +```js +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + node: 'https://localhost:9200/', + auth: { + username: 'elastic', + password: 'changeme' + } +}) +``` + +Otherwise, you can provide your credentials in the node(s) URL. + +```js +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + node: 'https://username:password@localhost:9200/' +}) +``` + +## Usage [client-usage] + +Using the client is straightforward, it supports all the public APIs of {{es}}, and every method exposes the same signature. + +```js +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) + +const result = await client.search({ + index: 'my-index', + query: { + match: { hello: 'world' } + } +}) +``` + +The returned value of every API call is the response body from {{es}}. If you need to access additonal metadata, such as the status code or headers, you must specify `meta: true` in the request options: + +```js +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) + +const result = await client.search({ + index: 'my-index', + query: { + match: { hello: 'world' } + } +}, { meta: true }) +``` + +In this case, the result will be: + +```ts +{ + body: object | boolean + statusCode: number + headers: object + warnings: string[], + meta: object +} +``` + +::::{note} +The body is a boolean value when you use `HEAD` APIs. +:::: + +### Aborting a request [_aborting_a_request] + +If needed, you can abort a running request by using the `AbortController` standard. + +::::{warning} +If you abort a request, the request will fail with a `RequestAbortedError`. +:::: + +```js +const AbortController = require('node-abort-controller') +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) + +const abortController = new AbortController() +setImmediate(() => abortController.abort()) + +const result = await client.search({ + index: 'my-index', + query: { + match: { hello: 'world' } + } +}, { signal: abortController.signal }) +``` + +### Request specific options [_request_specific_options] + +If needed you can pass request specific options in a second object: + +```js +const result = await client.search({ + index: 'my-index', + body: { + query: { + match: { hello: 'world' } + } + } +}, { + ignore: [404], + maxRetries: 3 +}) +``` + +The supported request specific options are: + +| Option | Description | +| --- | ----------- | +| `ignore` | `number[]` -  HTTP status codes which should not be considered errors for this request.
*Default:* `null` | +| `requestTimeout` | `number` or `string` - Max request timeout for the request in milliseconds. This overrides the client default, which is to not time out at all. See [{{es}} best practices for HTML clients](elasticsearch://reference/elasticsearch/configuration-reference/networking-settings.md#_http_client_configuration) for more info.
_Default:_ No timeout |connecting +| `retryOnTimeout` | `boolean` - Retry requests that have timed out.*Default:* `false` | +| `maxRetries` | `number` - Max number of retries for the request, it overrides the client default.
*Default:* `3` | +| `compression` | `string` or `boolean` - Enables body compression for the request.
*Options:* `false`, `'gzip'`
*Default:* `false` | +| `asStream` | `boolean` - Instead of getting the parsed body back, you get the raw Node.js stream of data.
*Default:* `false` | +| `headers` | `object` - Custom headers for the request.
*Default:* `null` | +| `querystring` | `object` - Custom querystring for the request.
*Default:* `null` | +| `id` | `any` - Custom request ID. *(overrides the top level request id generator)*
*Default:* `null` | +| `context` | `any` - Custom object per request. *(you can use it to pass data to the clients events)*
*Default:* `null` | +| `opaqueId` | `string` - Set the `X-Opaque-Id` HTTP header. See [X-Opaque-Id HTTP header](elasticsearch://reference/elasticsearch/rest-apis/api-conventions.md#x-opaque-id)
*Default:* `null` | +| `maxResponseSize` | `number` - When configured, it verifies that the uncompressed response size is lower than the configured number, if it’s higher it will abort the request. It cannot be higher than buffer.constants.MAX_STRING_LENTGH
*Default:* `null` | +| `maxCompressedResponseSize` | `number` - When configured, it verifies that the compressed response size is lower than the configured number, if it’s higher it will abort the request. It cannot be higher than buffer.constants.MAX_LENTGH
*Default:* `null` | +| `signal` | `AbortSignal` - The AbortSignal instance to allow request abortion.
*Default:* `null` | +| `meta` | `boolean` - Rather than returning the body, return an object containing `body`, `statusCode`, `headers` and `meta` keys
*Default*: `false` | +| `redaction` | `object` - Options for redacting potentially sensitive data from error metadata. See [Redaction of potentially sensitive data](/reference/advanced-config.md#redaction). | +| `retryBackoff` | `(min: number, max: number, attempt: number) => number;` - A function that calculates how long to sleep, in seconds, before the next request retry
_Default:_ A built-in function that uses exponential backoff with jitter. | + +## Using the Client in a Function-as-a-Service Environment [client-faas-env] + +This section illustrates the best practices for leveraging the {{es}} client in a Function-as-a-Service (FaaS) environment. The most influential optimization is to initialize the client outside of the function, the global scope. This practice does not only improve performance but also enables background functionality as – for example – [sniffing](https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how). The following examples provide a skeleton for the best practices. + +### GCP Cloud Functions [_gcp_cloud_functions] + +```js +'use strict' + +const { Client } = require('@elastic/elasticsearch') + +const client = new Client({ + // client initialisation +}) + +exports.testFunction = async function (req, res) { + // use the client +} +``` + +### AWS Lambda [_aws_lambda] + +```js +'use strict' + +const { Client } = require('@elastic/elasticsearch') + +const client = new Client({ + // client initialisation +}) + +exports.handler = async function (event, context) { + // use the client +} +``` + +### Azure Functions [_azure_functions] + +```js +'use strict' + +const { Client } = require('@elastic/elasticsearch') + +const client = new Client({ + // client initialisation +}) + +module.exports = async function (context, req) { + // use the client +} +``` + +Resources used to assess these recommendations: + +* [GCP Cloud Functions: Tips & Tricks](https://cloud.google.com/functions/docs/bestpractices/tips#use_global_variables_to_reuse_objects_in_future_invocations) +* [Best practices for working with AWS Lambda functions](https://docs.aws.amazon.com/lambda/latest/dg/best-practices.html) +* [Azure Functions Python developer guide](https://docs.microsoft.com/en-us/azure/azure-functions/functions-reference-python?tabs=azurecli-linux%2Capplication-level#global-variables) +* [AWS Lambda: Comparing the effect of global scope](https://docs.aws.amazon.com/lambda/latest/operatorguide/global-scope.html) + +## Connecting through a proxy [client-connect-proxy] + +Added in `v7.10.0` + +If you need to pass through an http(s) proxy for connecting to {{es}}, the client out of the box offers a handy configuration for helping you with it. Under the hood, it uses the [`hpagent`](https://github.com/delvedor/hpagent) module. + +::::{important} +In versions 8.0+ of the client, the default `Connection` type is set to `UndiciConnection`, which does not support proxy configurations. To use a proxy, you will need to use the `HttpConnection` class from `@elastic/transport` instead. +:::: + +```js +import { HttpConnection } from '@elastic/transport' + +const client = new Client({ + node: 'http://localhost:9200/', + proxy: 'http://localhost:8080/', + Connection: HttpConnection, +}) +``` + +Basic authentication is supported as well: + +```js +const client = new Client({ + node: 'http://localhost:9200/', + proxy: 'http:user:pwd@//localhost:8080', + Connection: HttpConnection, +}) +``` + +If you are connecting through a non-http(s) proxy, such as a `socks5` or `pac`, you can use the `agent` option to configure it. + +```js +const SocksProxyAgent = require('socks-proxy-agent') +const client = new Client({ + node: 'http://localhost:9200/', + agent () { + return new SocksProxyAgent('socks://127.0.0.1:1080') + }, + Connection: HttpConnection, +}) +``` + +## Error handling [client-error-handling] + +The client exposes a variety of error objects that you can use to enhance your error handling. You can find all the error objects inside the `errors` key in the client. + +```js +const { errors } = require('@elastic/elasticsearch') +console.log(errors) +``` + +You can find the errors exported by the client in the table below. + +| **Error** | **Description** | **Properties** | +| --- | --- | --- | +| `ElasticsearchClientError` | Every error inherits from this class, it is the basic error generated by the client. | * `name` - `string`
* `message` - `string`
| +| `TimeoutError` | Generated when a request exceeds the `requestTimeout` option. | * `name` - `string`
* `message` - `string`
* `meta` - `object`, contains all the information about the request
| +| `ConnectionError` | Generated when an error occurs during the request, it can be a connection error or a malformed stream of data. | * `name` - `string`
* `message` - `string`
* `meta` - `object`, contains all the information about the request
| +| `RequestAbortedError` | Generated if the user calls the `request.abort()` method. | * `name` - `string`
* `message` - `string`
* `meta` - `object`, contains all the information about the request
| +| `NoLivingConnectionsError` | Given the configuration, the ConnectionPool was not able to find a usable Connection for this request. | * `name` - `string`
* `message` - `string`
* `meta` - `object`, contains all the information about the request
| +| `SerializationError` | Generated if the serialization fails. | * `name` - `string`
* `message` - `string`
* `data` - `object`, the object to serialize
| +| `DeserializationError` | Generated if the deserialization fails. | * `name` - `string`
* `message` - `string`
* `data` - `string`, the string to deserialize
| +| `ConfigurationError` | Generated if there is a malformed configuration or parameter. | * `name` - `string`
* `message` - `string`
| +| `ResponseError` | Generated when in case of a `4xx` or `5xx` response. | * `name` - `string`
* `message` - `string`
* `meta` - `object`, contains all the information about the request
* `body` - `object`, the response body
* `statusCode` - `object`, the response headers
* `headers` - `object`, the response status code
| + + +## Keep-alive connections [keep-alive] + +By default, the client uses persistent, keep-alive connections to reduce the overhead of creating a new HTTP connection for each {{es}} request. If you are using the default `UndiciConnection` connection class, it maintains a pool of 256 connections with a keep-alive of 10 minutes. If you are using the legacy `HttpConnection` connection class, it maintains a pool of 256 connections with a keep-alive of 1 minute. + +If you need to disable keep-alive connections, you can override the HTTP agent with your preferred [HTTP agent options](/reference/basic-config.md#agent-config): + +```js +const client = new Client({ + node: 'http://localhost:9200/', + // the function takes as parameter the option + // object passed to the Connection constructor + agent: (opts) => new CustomAgent() +}) +``` + +Or you can disable the HTTP agent entirely: + +```js +const client = new Client({ + node: 'http://localhost:9200/', + // Disable agent and keep-alive + agent: false +}) +``` + +## Managing open connection limits [limit-open-connections] + +Starting in client 9.0, when using `@elastic/transport` 9.2.0 or later, you can provide a custom `agent` function to share a singleton [Undici `Agent`](https://undici.nodejs.org/#/docs/api/Agent.md) instance that can enforce client-wide connection limits. + +```typescript +import { Agent } from 'undici' +import { HttpConnection } from '@elastic/transport' + +// `maxOrigins * connections` (50 in this case) is the total connection limit +const maxSocketAgent = new Agent({ + keepAliveTimeout: 1000, + maxOrigins: 5, + connections: 10 +}) + +const client = new Client({ + node: '...', + auth: { ... }, + agent: () => maxSocketAgent +}) +``` + +If using the legacy `HttpConnection`, you can use an [`Agent`](https://nodejs.org/api/https.html#class-httpsagent) singleton that enforces `maxTotalSockets`: + +```typescript +import { Agent } from 'node:http' +import { HttpConnection } from '@elastic/transport' + +const maxSocketAgent = new Agent({ + keepAlive: true, + keepAliveMsecs: 1000, + maxTotalSockets: 50 +}) + +const client = new Client({ + node: '...', + auth: { ... }, + Connection: HttpConnection, + agent: () => maxSocketAgent +}) +``` + +## Closing a client’s connections [close-connections] + +If you would like to close all open connections being managed by an instance of the client, use the `close()` function: + +```js +const client = new Client({ + node: 'http://localhost:9200/' +}); +client.close(); +``` + +## Automatic product check [product-check] + +Since v7.14.0, the client performs a required product check before the first call. This pre-flight product check allows the client to establish the version of {{es}} that it is communicating with. The product check requires one additional HTTP request to be sent to the server as part of the request pipeline before the main API call is sent. In most cases, this will succeed during the very first API call that the client sends. Once the product check completes, no further product check HTTP requests are sent for subsequent API calls. diff --git a/docs/reference/examples.md b/docs/reference/examples.md new file mode 100644 index 000000000..d307341d1 --- /dev/null +++ b/docs/reference/examples.md @@ -0,0 +1,38 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/examples.html +--- + +# Examples [examples] + +Following you can find some examples on how to use the client. + +* Use of the [asStream](/reference/as_stream_examples.md) parameter; +* Executing a [bulk](/reference/bulk_examples.md) request; +* Executing a [exists](/reference/exists_examples.md) request; +* Executing a [get](/reference/get_examples.md) request; +* Executing a [sql.query](/reference/sql_query_examples.md) request; +* Executing a [update](/reference/update_examples.md) request; +* Executing a [update by query](/reference/update_by_query_examples.md) request; +* Executing a [reindex](/reference/reindex_examples.md) request; +* Use of the [ignore](/reference/ignore_examples.md) parameter; +* Executing a [msearch](/reference/msearch_examples.md) request; +* How do I [scroll](/reference/scroll_examples.md)? +* Executing a [search](/reference/search_examples.md) request; +* I need [suggestions](/reference/suggest_examples.md); +* How to use the [transport.request](/reference/transport_request_examples.md) method; + + + + + + + + + + + + + + + diff --git a/docs/reference/exists_examples.md b/docs/reference/exists_examples.md new file mode 100644 index 000000000..c5a691f64 --- /dev/null +++ b/docs/reference/exists_examples.md @@ -0,0 +1,44 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/exists_examples.html +--- + +# Exists [exists_examples] + +Check that the document `/game-of-thrones/1` exists. + +::::{note} +Since this API uses the `HEAD` method, the body value will be boolean. +:::: + + +```js +'use strict' + +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) + +async function run () { + await client.index({ + index: 'game-of-thrones', + id: '1', + document: { + character: 'Ned Stark', + quote: 'Winter is coming.' + } + }) + + const exists = await client.exists({ + index: 'game-of-thrones', + id: 1 + }) + + console.log(exists) // true +} + +run().catch(console.log) +``` + diff --git a/docs/reference/get_examples.md b/docs/reference/get_examples.md new file mode 100644 index 000000000..6b545b072 --- /dev/null +++ b/docs/reference/get_examples.md @@ -0,0 +1,39 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/get_examples.html +--- + +# Get [get_examples] + +The get API allows to get a typed JSON document from the index based on its id. The following example gets a JSON document from an index called `game-of-thrones`, under a type called `_doc`, with id valued `'1'`. + +```js +'use strict' + +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) + +async function run () { + await client.index({ + index: 'game-of-thrones', + id: '1', + document: { + character: 'Ned Stark', + quote: 'Winter is coming.' + } + }) + + const document = await client.get({ + index: 'game-of-thrones', + id: '1' + }) + + console.log(document) +} + +run().catch(console.log) +``` + diff --git a/docs/reference/getting-started.md b/docs/reference/getting-started.md new file mode 100644 index 000000000..b0de914b8 --- /dev/null +++ b/docs/reference/getting-started.md @@ -0,0 +1,150 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/getting-started-js.html + - https://www.elastic.co/guide/en/serverless/current/elasticsearch-nodejs-client-getting-started.html +--- + +# Getting started [getting-started-js] + +This page guides you through the installation process of the Node.js client, shows you how to instantiate the client, and how to perform basic {{es}} operations with it. + + +### Requirements [_requirements] + +* [Node.js](https://nodejs.org/) version 14.x or newer +* [`npm`](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm), usually bundled with Node.js + + +### Installation [_installation] + +To install the latest version of the client, run the following command: + +```shell +npm install @elastic/elasticsearch +``` + +Refer to the [*Installation*](/reference/installation.md) page to learn more. + + +### Connecting [_connecting] + +You can connect to the Elastic Cloud using an API key and the {{es}} endpoint. + +```js +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + node: 'https://.../', // Elasticsearch endpoint + auth: { + apiKey: { // API key ID and secret + id: 'foo', + api_key: 'bar', + } + } +}) +``` + +Your {{es}} endpoint can be found on the **My deployment** page of your deployment: + +![Finding {{es}} endpoint](images/es-endpoint.jpg) + +You can generate an API key on the **Management** page under Security. + +![Create API key](images/create-api-key.png) + +For other connection options, refer to the [*Connecting*](/reference/connecting.md) section. + + +### Operations [_operations] + +Time to use {{es}}! This section walks you through the basic, and most important, operations of {{es}}. + + +#### Creating an index [_creating_an_index] + +This is how you create the `my_index` index: + +```js +await client.indices.create({ index: 'my_index' }) +``` + + +#### Indexing documents [_indexing_documents] + +This is a simple way of indexing a document: + +```js +await client.index({ + index: 'my_index', + id: 'my_document_id', + document: { + foo: 'foo', + bar: 'bar', + }, +}) +``` + + +#### Getting documents [_getting_documents] + +You can get documents by using the following code: + +```js +await client.get({ + index: 'my_index', + id: 'my_document_id', +}) +``` + + +#### Searching documents [_searching_documents] + +This is how you can create a single match query with the client: + +```js +await client.search({ + query: { + match: { + foo: 'foo' + } + } +}) +``` + + +#### Updating documents [_updating_documents] + +This is how you can update a document, for example to add a new field: + +```js +await client.update({ + index: 'my_index', + id: 'my_document_id', + doc: { + foo: 'bar', + new_field: 'new value' + } +}) +``` + + +#### Deleting documents [_deleting_documents] + +```js +await client.delete({ + index: 'my_index', + id: 'my_document_id', +}) +``` + + +#### Deleting an index [_deleting_an_index] + +```js +await client.indices.delete({ index: 'my_index' }) +``` + + +## Further reading [_further_reading] + +* Use [*Client helpers*](/reference/client-helpers.md) for a more comfortable experience with the APIs. +* For an elaborate example of how to ingest data into Elastic Cloud, refer to [this page](docs-content://manage-data/ingest/ingesting-data-from-applications/ingest-data-with-nodejs-on-elasticsearch-service.md). diff --git a/docs/reference/ignore_examples.md b/docs/reference/ignore_examples.md new file mode 100644 index 000000000..8414d007c --- /dev/null +++ b/docs/reference/ignore_examples.md @@ -0,0 +1,69 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/ignore_examples.html +--- + +# Ignore [ignore_examples] + +HTTP status codes which should not be considered errors for this request. + +```js +'use strict' + +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) + +async function run () { + const bulkResponse = await client.bulk({ + refresh: true, + operations: [ + // operation to perform + { index: { _index: 'game-of-thrones' } }, + // the document to index + { + character: 'Ned Stark', + quote: 'Winter is coming.' + }, + + { index: { _index: 'game-of-thrones' } }, + { + character: 'Daenerys Targaryen', + quote: 'I am the blood of the dragon.' + }, + + { index: { _index: 'game-of-thrones' } }, + { + character: 'Tyrion Lannister', + quote: 'A mind needs books like a sword needs a whetstone.' + } + ] + }) + + if (bulkResponse.errors) { + console.log(bulkResponse) + process.exit(1) + } + + // Let's search! + const result = await client.search({ + index: 'game-of-thrones', + body: { + query: { + match: { + quote: 'fire' + } + } + } + }, { + ignore: [404] + }) + + console.log(result) // ResponseError +} + +run().catch(console.log) +``` + diff --git a/docs/reference/images/create-api-key.png b/docs/reference/images/create-api-key.png new file mode 100644 index 000000000..d75c23030 Binary files /dev/null and b/docs/reference/images/create-api-key.png differ diff --git a/docs/reference/images/es-endpoint.jpg b/docs/reference/images/es-endpoint.jpg new file mode 100644 index 000000000..6da2e7565 Binary files /dev/null and b/docs/reference/images/es-endpoint.jpg differ diff --git a/docs/reference/index.md b/docs/reference/index.md new file mode 100644 index 000000000..cb62bde97 --- /dev/null +++ b/docs/reference/index.md @@ -0,0 +1,74 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/index.html + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/introduction.html +--- + +# JavaScript [introduction] + +This is the official Node.js client for {{es}}. This page gives a quick overview about the features of the client. + +## Features [_features] + +* One-to-one mapping with REST API. +* Generalized, pluggable architecture. +* Configurable, automatic discovery of cluster nodes. +* Persistent, Keep-Alive connections. +* Load balancing across all available nodes. +* Child client support. +* TypeScript support out of the box. + +### Install multiple versions [_install_multiple_versions] + +If you are using multiple versions of {{es}}, you need to use multiple versions of the client as well. In the past, installing multiple versions of the same package was not possible, but with `npm v6.9`, you can do it via aliasing. + +To install different version of the client, run the following command: + +```sh +npm install @npm:@elastic/elasticsearch@ +``` + +For example, if you need to install `7.x` and `6.x`, run the following commands: + +```sh +npm install es6@npm:@elastic/elasticsearch@6 +npm install es7@npm:@elastic/elasticsearch@7 +``` + +Your `package.json` will look similar to the following example: + +```json +"dependencies": { + "es6": "npm:@elastic/elasticsearch@^6.7.0", + "es7": "npm:@elastic/elasticsearch@^7.0.0" +} +``` + +Require the packages from your code by using the alias you have defined. + +```js +const { Client: Client6 } = require('es6') +const { Client: Client7 } = require('es7') + +const client6 = new Client6({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) +const client7 = new Client7({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) + +client6.info().then(console.log, console.log) +client7.info().then(console.log, console.log) +``` + +Finally, if you want to install the client for the next version of {{es}} (the one that lives in the {{es}} main branch), use the following command: + +```sh +npm install esmain@github:elastic/elasticsearch-js +``` + +::::{warning} +This command installs the main branch of the client which is not considered stable. +:::: diff --git a/docs/reference/installation.md b/docs/reference/installation.md new file mode 100644 index 000000000..07387d1b1 --- /dev/null +++ b/docs/reference/installation.md @@ -0,0 +1,61 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/installation.html +--- + +# Installation [installation] + +This page guides you through the installation process of the client. + +To install the latest version of the client, run the following command: + +```sh +npm install @elastic/elasticsearch +``` + +To install a specific major version of the client, run the following command: + +```sh +npm install @elastic/elasticsearch@ +``` + +To learn more about the supported major versions, please refer to the [Compatibility matrix](#js-compatibility-matrix). + +## Node.js support [nodejs-support] + +::::{note} +The minimum supported version of Node.js is `v18`. +:::: + +The client versioning follows the {{stack}} versioning, this means that major, minor, and patch releases are done following a precise schedule that often does not coincide with the [Node.js release](https://nodejs.org/en/about/releases/) times. + +To avoid support insecure and unsupported versions of Node.js, the client **will drop the support of EOL versions of Node.js between minor releases**. Typically, as soon as a Node.js version goes into EOL, the client will continue to support that version for at least another minor release. If you are using the client with a version of Node.js that will be unsupported soon, you will see a warning in your logs (the client will start logging the warning with two minors in advance). + +Unless you are **always** using a supported version of Node.js, we recommend defining the client dependency in your `package.json` with the `~` instead of `^`. In this way, you will lock the dependency on the minor release and not the major. (for example, `~7.10.0` instead of `^7.10.0`). + +| Node.js Version | Node.js EOL date | End of support | +| --------------- | ---------------- | ------------------- | +| `8.x` | December 2019 | `7.11` (early 2021) | +| `10.x` | April 2021 | `7.12` (mid 2021) | +| `12.x` | April 2022 | `8.2` (early 2022) | +| `14.x` | April 2023 | `8.8` (early 2023) | +| `16.x` | September 2023 | `8.11` (late 2023) | +| `18.x` | April 2025 | `9.2` (late 2025) | + +## Compatibility matrix [js-compatibility-matrix] + +Language clients are forward compatible; meaning that clients support communicating with greater or equal minor versions of {{es}} without breaking. It does not mean that the client automatically supports new features of newer {{es}} versions; it is only possible after a release of a new client version. For example, a 8.12 client version won’t automatically support the new features of the 8.13 version of {{es}}, the 8.13 client version is required for that. {{es}} language clients are only backwards compatible with default distributions and without guarantees made. + +| {{es}} Version | Client Version | Supported | +| -------------- | -------------- | --------- | +| `9.x` | `9.x` | `9.x` | +| `8.x` | `8.x` | `8.x` | +| `7.x` | `7.x` | `7.17` | +| `6.x` | `6.x` | | +| `5.x` | `5.x` | | + +### Browser [_browser] + +::::{warning} +There is no official support for the browser environment. It exposes your {{es}} instance to everyone, which could lead to security issues. We recommend you to write a lightweight proxy that uses this client instead, you can see a proxy example [here](https://github.com/elastic/elasticsearch-js/tree/master/docs/examples/proxy). +:::: diff --git a/docs/reference/integrations.md b/docs/reference/integrations.md new file mode 100644 index 000000000..d301e4217 --- /dev/null +++ b/docs/reference/integrations.md @@ -0,0 +1,16 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/integrations.html +--- + +# Integrations [integrations] + +The Client offers the following integration options for you: + +* [Observability](/reference/observability.md) +* [Transport](/reference/transport.md) +* [TypeScript support](/reference/typescript.md) + + + + diff --git a/docs/reference/msearch_examples.md b/docs/reference/msearch_examples.md new file mode 100644 index 000000000..f411f8699 --- /dev/null +++ b/docs/reference/msearch_examples.md @@ -0,0 +1,63 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/msearch_examples.html +--- + +# MSearch [msearch_examples] + +The multi search API allows to execute several search requests within the same API. + +```js +'use strict' + +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) + +async function run () { + const bulkResponse = await client.bulk({ + refresh: true, + operations: [ + { index: { _index: 'game-of-thrones' } }, + { + character: 'Ned Stark', + quote: 'Winter is coming.' + }, + + { index: { _index: 'game-of-thrones' } }, + { + character: 'Daenerys Targaryen', + quote: 'I am the blood of the dragon.' + }, + + { index: { _index: 'game-of-thrones' } }, + { + character: 'Tyrion Lannister', + quote: 'A mind needs books like a sword needs a whetstone.' + } + ] + }) + + if (bulkResponse.errors) { + console.log(bulkResponse) + process.exit(1) + } + + const result = await client.msearch({ + searches: [ + { index: 'game-of-thrones' }, + { query: { match: { character: 'Daenerys' } } }, + + { index: 'game-of-thrones' }, + { query: { match: { character: 'Tyrion' } } } + ] + }) + + console.log(result.responses) +} + +run().catch(console.log) +``` + diff --git a/docs/reference/observability.md b/docs/reference/observability.md new file mode 100644 index 000000000..8c3fe211f --- /dev/null +++ b/docs/reference/observability.md @@ -0,0 +1,417 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/observability.html +--- + +# Observability [observability] + +Several client features help you observe and measure {{es}} client usage. As of version 8.15.0, the JavaScript client provides native support for OpenTelemetry. You can send client usage data to OpenTelemetry endpoints without making changes to your JavaScript codebase. + +Rather than providing a default logger, the client offers an event emitter interface to hook into internal events like `request` and `response`. This allows you to log significant events or otherwise react to client usage. Because correlating events can be complex, the client provides a correlation ID system and other features. + +## OpenTelemetry [_opentelemetry] + +The client supports OpenTelemetry’s [zero-code instrumentation](https://opentelemetry.io/docs/zero-code/js/) to enable tracking each client request as an [OpenTelemetry span](https://opentelemetry.io/docs/concepts/signals/traces/#spans). These spans follow all of the [semantic OpenTelemetry conventions for {{es}}](https://opentelemetry.io/docs/specs/semconv/database/elasticsearch/) except for `db.query.text`. + +To start sending {{es}} trace data to your OpenTelemetry endpoint, instrument the client using the [Elastic Distribution of OpenTelemetry (EDOT) JavaScript](elastic-otel-node://reference/edot-node/index.md), or follow [OpenTelemetry’s zero-code instrumentation guide](https://opentelemetry.io/docs/zero-code/js/). + +### Turn off OpenTelemetry collection [disable-otel] + +As of `@elastic/transport` version 9.1.0—or 8.10.0 when using `@elastic/elasticsearch` 8.x—you can turn off OpenTelemetry tracing in several ways. + +To entirely turn off OpenTelemetry collection, you can provide a custom `Transport` at client instantiation time that sets `openTelemetry.enabled` to `false`: + +```typescript +import { Transport } from '@elastic/transport' + +class MyTransport extends Transport { + async request(params, options = {}): Promise { + options.openTelemetry = { enabled: false } + return super.request(params, options) + } +} + +const client = new Client({ + node: '...', + auth: { ... }, + Transport: MyTransport +}) +``` + +Alternatively, you can export the environment variable `OTEL_ELASTICSEARCH_ENABLED=false`. + +To suppress tracing without turning off all OpenTelemetry collection, use the option `openTelemetry.suppressInternalInstrumentation = true` instead. + +If you would like to keep either option enabled by default, but want to turn them off for a single API call, pass `Transport` options as a second argument to any API function call: + +```typescript +const response = await client.search({ ... }, { + openTelemetry: { enabled: false } +}) +``` + +## Events [_events] + +The client is an event emitter. This means that you can listen for its events to add additional logic to your code, without needing to change the client’s internals or how you use the client. You can find the events' names by accessing the `events` key of the client: + +```js +const { events } = require('@elastic/elasticsearch') +console.log(events) +``` + +The event emitter functionality can be useful if you want to log every request, response or error that is created by the client: + +```js +const logger = require('my-logger')() +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) + +client.diagnostic.on('response', (err, result) => { + if (err) { + logger.error(err) + } else { + logger.info(result) + } +}) +``` + +### Event types + +The client emits the following events: + +#### `serialization` + +Emitted before starting serialization and compression. If you want to measure this phase duration, you should measure the time elapsed between this event and `request`. + +```js +client.diagnostic.on("serialization", (err, result) => { + console.log(err, result) +}) +``` + +#### `request` + +Emitted before sending the actual request to {{es}} _(emitted multiple times in case of retries)_. + +```js +client.diagnostic.on("request", (err, result) => { + console.log(err, result) +}) +``` + +#### `deserialization` + +Emitted before starting deserialization and decompression. If you want to measure this phase duration, you should measure the time elapsed between this event and `response`. + +This event might not be emitted in certain situations: + +* When `asStream` is set to true, the response is returned in its raw stream form before deserialization occurs +* When a response is terminated early due to content length being too large +* When a response is terminated early by an `AbortController` + +```js +client.diagnostic.on("deserialization", (err, result) => { + console.log(err, result) +}) +``` + +#### `response` + +Emitted once {{es}} response has been received and parsed. + +```js +client.diagnostic.on("response", (err, result) => { + console.log(err, result) +}) +``` + +#### `sniff` + +Emitted when the client ends a sniffing request. + +```js +client.diagnostic.on("sniff", (err, result) => { + console.log(err, result) +}) +``` + +#### `resurrect` + +Emitted if the client is able to resurrect a dead node. + +```js +client.diagnostic.on("resurrect", (err, result) => { + console.log(err, result) +}) +``` + +The values of `result` in `serialization`, `request`, `deserialization`, `response` and `sniff` are: + +```ts +body: any; +statusCode: number | null; +headers: anyObject | null; +warnings: string[] | null; +meta: { + context: any; + name: string; + request: { + params: TransportRequestParams; + options: TransportRequestOptions; + id: any; + }; + connection: Connection; + attempts: number; + aborted: boolean; + sniff?: { + hosts: any[]; + reason: string; + }; +}; +``` + +While the `result` value in `resurrect` is: + +```ts +strategy: string; +isAlive: boolean; +connection: Connection; +name: string; +request: { + id: any; +}; +``` + +### Events order [_events_order] + +The event order is described in the following graph, in some edge cases, the order is not guaranteed. You can find in [`test/acceptance/events-order.test.js`](https://github.com/elastic/elasticsearch-js/blob/main/test/acceptance/events-order.test.js) how the order changes based on the situation. + +``` +serialization + │ + │ (serialization and compression happens between those two events) + │ + └─▶ request + │ + │ (actual time spent over the wire) + │ + └─▶ deserialization + │ + │ (deserialization and decompression happens between those two events) + │ + └─▶ response +``` + +## Correlation ID [_correlation_id] + +Correlating events can be hard, especially if there are many events at the same time. The client offers you an automatic (and configurable) system to help you handle this problem. + +```js +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) + +client.diagnostic.on('request', (err, result) => { + const { id } = result.meta.request + if (err) { + console.log({ error: err, reqId: id }) + } +}) + +client.diagnostic.on('response', (err, result) => { + const { id } = result.meta.request + if (err) { + console.log({ error: err, reqId: id }) + } +}) + +client.search({ + index: 'my-index', + query: { match_all: {} } +}).then(console.log, console.log) +``` + +By default the ID is an incremental integer, but you can configure it with the `generateRequestId` option: + +```js +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' }, + // it takes two parameters, the request parameters and options + generateRequestId: function (params, options) { + // your id generation logic + // must be synchronous + return 'id' + } +}) +``` + +You can also specify a custom ID per request: + +```js +client.search({ + index: 'my-index', + query: { match_all: {} } +}, { + id: 'custom-id' +}).then(console.log, console.log) +``` + +## Context object [_context_object] + +Sometimes, you might need to make some custom data available in your events, you can do that via the `context` option of a request: + +```js +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) + +client.diagnostic.on('request', (err, result) => { + const { id } = result.meta.request + const { context } = result.meta + if (err) { + console.log({ error: err, reqId: id, context }) + } +}) + +client.diagnostic.on('response', (err, result) => { + const { id } = result.meta.request + const { winter } = result.meta.context + if (err) { + console.log({ error: err, reqId: id, winter }) + } +}) + +client.search({ + index: 'my-index', + query: { match_all: {} } +}, { + context: { winter: 'is coming' } +}).then(console.log, console.log) +``` + +The context object can also be configured as a global option in the client configuration. If you provide both, the two context objects will be shallow merged, and the API level object will take precedence. + +```js +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' }, + context: { winter: 'is coming' } +}) + +client.diagnostic.on('request', (err, result) => { + const { id } = result.meta.request + const { context } = result.meta + if (err) { + console.log({ error: err, reqId: id, context }) + } +}) + +client.diagnostic.on('response', (err, result) => { + const { id } = result.meta.request + const { winter } = result.meta.context + if (err) { + console.log({ error: err, reqId: id, winter }) + } +}) + +client.search({ + index: 'my-index', + query: { match_all: {} } +}, { + context: { winter: 'has come' } +}).then(console.log, console.log) +``` + +## Client name [_client_name] + +If you are using multiple instances of the client or if you are using multiple child clients _(which is the recommended way to have multiple instances of the client)_, you might need to recognize which client you are using. The `name` options help you in this regard. + +```js +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' }, + name: 'parent-client' // default to 'elasticsearch-js' +}) + +const child = client.child({ + name: 'child-client' +}) + +console.log(client.name, child.name) + +client.diagnostic.on('request', (err, result) => { + const { id } = result.meta.request + const { name } = result.meta + if (err) { + console.log({ error: err, reqId: id, name }) + } +}) + +client.diagnostic.on('response', (err, result) => { + const { id } = result.meta.request + const { name } = result.meta + if (err) { + console.log({ error: err, reqId: id, name }) + } +}) + +client.search({ + index: 'my-index', + query: { match_all: {} } +}).then(console.log, console.log) + +child.search({ + index: 'my-index', + query: { match_all: {} } +}).then(console.log, console.log) +``` + +## X-Opaque-Id support [_x_opaque_id_support] + +To improve observability, the client offers an easy way to configure the `X-Opaque-Id` header. If you set the `X-Opaque-Id` in a specific request, this allows you to discover this identifier in the [deprecation logs](docs-content://deploy-manage/monitor/logging-configuration/update-elasticsearch-logging-levels.md#deprecation-logging), helps you with [identifying search slow log origin](elasticsearch://reference/elasticsearch/index-settings/slow-log.md) as well as [identifying running tasks](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks). + +The `X-Opaque-Id` should be configured in each request, for doing that you can use the `opaqueId` option, as you can see in the following example. The resulting header will be `{ 'X-Opaque-Id': 'my-search' }`. + +```js +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) + +client.search({ + index: 'my-index', + body: { foo: 'bar' } +}, { + opaqueId: 'my-search' +}).then(console.log, console.log) +``` + +Sometimes it may be useful to prefix all the `X-Opaque-Id` headers with a specific string, in case you need to identify a specific client or server. For doing this, the client offers a top-level configuration option: `opaqueIdPrefix`. In the following example, the resulting header will be `{ 'X-Opaque-Id': 'proxy-client::my-search' }`. + +```js +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' }, + opaqueIdPrefix: 'proxy-client::' +}) + +client.search({ + index: 'my-index', + body: { foo: 'bar' } +}, { + opaqueId: 'my-search' +}).then(console.log, console.log) +``` diff --git a/docs/reference/reindex_examples.md b/docs/reference/reindex_examples.md new file mode 100644 index 000000000..82955c8d9 --- /dev/null +++ b/docs/reference/reindex_examples.md @@ -0,0 +1,78 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/reindex_examples.html +--- + +# Reindex [reindex_examples] + +The `reindex` API extracts the document source from the source index and indexes the documents into the destination index. You can copy all documents to the destination index, reindex a subset of the documents or update the source before to reindex it. + +In the following example we have a `game-of-thrones` index which contains different quotes of various characters, we want to create a new index only for the house Stark and remove the `house` field from the document source. + +```js +'use strict' + +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) + +async function run () { + await client.index({ + index: 'game-of-thrones', + document: { + character: 'Ned Stark', + quote: 'Winter is coming.', + house: 'stark' + } + }) + + await client.index({ + index: 'game-of-thrones', + document: { + character: 'Arya Stark', + quote: 'A girl is Arya Stark of Winterfell. And I\'m going home.', + house: 'stark' + } + }) + + await client.index({ + index: 'game-of-thrones', + refresh: true, + document: { + character: 'Tyrion Lannister', + quote: 'A Lannister always pays his debts.', + house: 'lannister' + } + }) + + await client.reindex({ + wait_for_completion: true, + refresh: true, + source: { + index: 'game-of-thrones', + query: { + match: { house: 'stark' } + } + }, + dest: { + index: 'stark-index' + }, + script: { + lang: 'painless', + source: 'ctx._source.remove("house")' + } + }) + + const result = await client.search({ + index: 'stark-index', + query: { match_all: {} } + }) + + console.log(result.hits.hits) +} + +run().catch(console.log) +``` + diff --git a/docs/reference/scroll_examples.md b/docs/reference/scroll_examples.md new file mode 100644 index 000000000..7f2eb4da7 --- /dev/null +++ b/docs/reference/scroll_examples.md @@ -0,0 +1,193 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/scroll_examples.html +--- + +# Scroll [scroll_examples] + +While a search request returns a single “page” of results, the scroll API can be used to retrieve large numbers of results (or even all results) from a single search request, in much the same way as you would use a cursor on a traditional database. + +Scrolling is not intended for real time user requests, but rather for processing large amounts of data, for example in order to reindex the contents of one index into a new index with a different configuration. + +::::{note} +The results that are returned from a scroll request reflect the state of the index at the time that the initial search request was made, like a snapshot in time. Subsequent changes to documents (index, update or delete) will only affect later search requests. +:::: + + +In order to use scrolling, the initial search request should specify the scroll parameter in the query string, which tells {{es}} how long it should keep the “search context” alive. + +::::{note} +Did you know that we provide an helper for sending scroll requests? You can find it [here](/reference/client-helpers.md#scroll-search-helper). +:::: + + +```js +'use strict' + +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) + +async function run () { + const allQuotes = [] + const responseQueue = [] + + // Let's index some data! + const bulkResponse = await client.bulk({ + // here we are forcing an index refresh, + // otherwise we will not get any result + // in the consequent search + refresh: true, + operations: [ + // operation to perform + { index: { _index: 'game-of-thrones' } }, + // the document to index + { + character: 'Ned Stark', + quote: 'Winter is coming.' + }, + + { index: { _index: 'game-of-thrones' } }, + { + character: 'Daenerys Targaryen', + quote: 'I am the blood of the dragon.' + }, + + { index: { _index: 'game-of-thrones' } }, + { + character: 'Tyrion Lannister', + quote: 'A mind needs books like a sword needs a whetstone.' + } + ] + }) + + if (bulkResponse.errors) { + console.log(bulkResponse) + process.exit(1) + } + + // start things off by searching, setting a scroll timeout, and pushing + // our first response into the queue to be processed + const response = await client.search({ + index: 'game-of-thrones', + // keep the search results "scrollable" for 30 seconds + scroll: '30s', + // for the sake of this example, we will get only one result per search + size: 1, + // filter the source to only include the quote field + _source: ['quote'], + query: { + match_all: {} + } + }) + + responseQueue.push(response) + + while (responseQueue.length) { + const body = responseQueue.shift() + + // collect the titles from this response + body.hits.hits.forEach(function (hit) { + allQuotes.push(hit._source.quote) + }) + + // check to see if we have collected all of the quotes + if (body.hits.total.value === allQuotes.length) { + console.log('Every quote', allQuotes) + break + } + + // get the next response if there are more quotes to fetch + responseQueue.push( + await client.scroll({ + scroll_id: body._scroll_id, + scroll: '30s' + }) + ) + } +} + +run().catch(console.log) +``` + +Another cool usage of the `scroll` API can be done with Node.js ≥ 10, by using async iteration! + +```js +'use strict' + +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) + +// Scroll utility +async function * scrollSearch (params) { + let response = await client.search(params) + + while (true) { + const sourceHits = response.hits.hits + + if (sourceHits.length === 0) { + break + } + + for (const hit of sourceHits) { + yield hit + } + + if (!response._scroll_id) { + break + } + + response = await client.scroll({ + scroll_id: response._scroll_id, + scroll: params.scroll + }) + } +} + +async function run () { + await client.bulk({ + refresh: true, + operations: [ + { index: { _index: 'game-of-thrones' } }, + { + character: 'Ned Stark', + quote: 'Winter is coming.' + }, + + { index: { _index: 'game-of-thrones' } }, + { + character: 'Daenerys Targaryen', + quote: 'I am the blood of the dragon.' + }, + + { index: { _index: 'game-of-thrones' } }, + { + character: 'Tyrion Lannister', + quote: 'A mind needs books like a sword needs a whetstone.' + } + ] + }) + + const params = { + index: 'game-of-thrones', + scroll: '30s', + size: 1, + _source: ['quote'], + query: { + match_all: {} + } + } + + for await (const hit of scrollSearch(params)) { + console.log(hit._source) + } +} + +run().catch(console.log) +``` + diff --git a/docs/reference/search_examples.md b/docs/reference/search_examples.md new file mode 100644 index 000000000..2847c1de0 --- /dev/null +++ b/docs/reference/search_examples.md @@ -0,0 +1,64 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/search_examples.html +--- + +# Search [search_examples] + +The `search` API allows you to execute a search query and get back search hits that match the query. The query can either be provided using a simple [query string as a parameter](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search), or using a [request body](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-body.html). + +```js +'use strict' + +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) + +async function run () { + // Let's start by indexing some data + await client.index({ + index: 'game-of-thrones', + document: { + character: 'Ned Stark', + quote: 'Winter is coming.' + } + }) + + await client.index({ + index: 'game-of-thrones', + document: { + character: 'Daenerys Targaryen', + quote: 'I am the blood of the dragon.' + } + }) + + await client.index({ + index: 'game-of-thrones', + // here we are forcing an index refresh, + // otherwise we will not get any result + // in the consequent search + refresh: true, + document: { + character: 'Tyrion Lannister', + quote: 'A mind needs books like a sword needs a whetstone.' + } + }) + + // Let's search! + const result = await client.search({ + index: 'game-of-thrones', + query: { + match: { + quote: 'winter' + } + } + }) + + console.log(result.hits.hits) +} + +run().catch(console.log) +``` + diff --git a/docs/reference/sql_query_examples.md b/docs/reference/sql_query_examples.md new file mode 100644 index 000000000..f2a955d27 --- /dev/null +++ b/docs/reference/sql_query_examples.md @@ -0,0 +1,69 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/sql_query_examples.html +--- + +# SQL [sql_query_examples] + +{{es}} SQL is an X-Pack component that allows SQL-like queries to be executed in real-time against {{es}}. Whether using the REST interface, command-line or JDBC, any client can use SQL to search and aggregate data natively inside {{es}}. One can think of {{es}} SQL as a translator, one that understands both SQL and {{es}} and makes it easy to read and process data in real-time, at scale by leveraging {{es}} capabilities. + +In the following example we will search all the documents that has the field `house` equals to `stark`, log the result with the tabular view and then manipulate the result to obtain an object easy to navigate. + +```js +'use strict' + +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) + +async function run () { + await client.index({ + index: 'game-of-thrones', + document: { + character: 'Ned Stark', + quote: 'Winter is coming.', + house: 'stark' + } + }) + + await client.index({ + index: 'game-of-thrones', + document: { + character: 'Arya Stark', + quote: 'A girl is Arya Stark of Winterfell. And I\'m going home.', + house: 'stark' + } + }) + + await client.index({ + index: 'game-of-thrones', + refresh: true, + document: { + character: 'Tyrion Lannister', + quote: 'A Lannister always pays his debts.', + house: 'lannister' + } + }) + + const result = await client.sql.query({ + query: "SELECT * FROM \"game-of-thrones\" WHERE house='stark'" + }) + + console.log(result) + + const data = result.rows.map(row => { + const obj = {} + for (let i = 0; i < row.length; i++) { + obj[result.columns[i].name] = row[i] + } + return obj + }) + + console.log(data) +} + +run().catch(console.log) +``` + diff --git a/docs/reference/suggest_examples.md b/docs/reference/suggest_examples.md new file mode 100644 index 000000000..70fbbd05a --- /dev/null +++ b/docs/reference/suggest_examples.md @@ -0,0 +1,68 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/suggest_examples.html +--- + +# Suggest [suggest_examples] + +The suggest feature suggests similar looking terms based on a provided text by using a suggester. *Parts of the suggest feature are still under development.* + +The suggest request part is defined alongside the query part in a `search` request. If the query part is left out, only suggestions are returned. + +```js +'use strict' + +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) + +async function run () { + const bulkResponse = await client.bulk({ + refresh: true, + operations: [ + { index: { _index: 'game-of-thrones' } }, + { + character: 'Ned Stark', + quote: 'Winter is coming.' + }, + + { index: { _index: 'game-of-thrones' } }, + { + character: 'Daenerys Targaryen', + quote: 'I am the blood of the dragon.' + }, + + { index: { _index: 'game-of-thrones' } }, + { + character: 'Tyrion Lannister', + quote: 'A mind needs books like a sword needs a whetstone.' + } + ] + }) + + if (bulkResponse.errors) { + console.log(bulkResponse) + process.exit(1) + } + + const result = await client.search({ + index: 'game-of-thrones', + query: { + match: { quote: 'winter' } + }, + suggest: { + gotsuggest: { + text: 'winter', + term: { field: 'quote' } + } + } + }) + + console.log(result) +} + +run().catch(console.log) +``` + diff --git a/docs/reference/timeout-best-practices.md b/docs/reference/timeout-best-practices.md new file mode 100644 index 000000000..9938287c4 --- /dev/null +++ b/docs/reference/timeout-best-practices.md @@ -0,0 +1,13 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/timeout-best-practices.html +--- + +# Timeout best practices [timeout-best-practices] + +Starting in 9.0.0, this client is configured to not time out any HTTP request by default. {{es}} will always eventually respond to any request, even if it takes several minutes. Reissuing a request that it has not responded to yet can cause performance side effects. See the [official {{es}} recommendations for HTTP clients](elasticsearch://reference/elasticsearch/configuration-reference/networking-settings.md#_http_client_configuration) for more information. + +Prior to 9.0, this client was configured by default to operate like many HTTP client libraries do, by using a relatively short (30 second) timeout on all requests sent to {{es}}, raising a `TimeoutError` when that time period elapsed without receiving a response. + +If you need to set timeouts on {{es}} requests, setting the `requestTimeout` value to a millisecond value will cause this client to operate as it did prior to 9.0. + diff --git a/docs/reference/toc.yml b/docs/reference/toc.yml new file mode 100644 index 000000000..9fbda6f58 --- /dev/null +++ b/docs/reference/toc.yml @@ -0,0 +1,35 @@ +toc: + - file: index.md + - file: getting-started.md + - file: installation.md + - file: connecting.md + - file: configuration.md + children: + - file: basic-config.md + - file: advanced-config.md + - file: child.md + - file: client-testing.md + - file: integrations.md + children: + - file: observability.md + - file: transport.md + - file: typescript.md + - file: api-reference.md + - file: examples.md + children: + - file: as_stream_examples.md + - file: bulk_examples.md + - file: exists_examples.md + - file: get_examples.md + - file: ignore_examples.md + - file: msearch_examples.md + - file: scroll_examples.md + - file: search_examples.md + - file: suggest_examples.md + - file: transport_request_examples.md + - file: sql_query_examples.md + - file: update_examples.md + - file: update_by_query_examples.md + - file: reindex_examples.md + - file: client-helpers.md + - file: timeout-best-practices.md \ No newline at end of file diff --git a/docs/reference/transport.md b/docs/reference/transport.md new file mode 100644 index 000000000..4977a2890 --- /dev/null +++ b/docs/reference/transport.md @@ -0,0 +1,53 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/transport.html +--- + +# Transport [transport] + +This class is responsible for performing the request to {{es}} and handling errors, it also handles sniffing. + +```js +const { Client } = require('@elastic/elasticsearch') +const { Transport } = require('@elastic/transport') + +class MyTransport extends Transport { + request (params, options) { + // your code + } +} + +const client = new Client({ + Transport: MyTransport +}) +``` + +Sometimes you need to inject a small snippet of your code and then continue to use the usual client code. In such cases, call `super.method`: + +```js +class MyTransport extends Transport { + request (params, options) { + // your code + return super.request(params, options) + } +} +``` + +## Supported content types [_supported_content_types] + +Depending on the `content-type` of the response, the transport will return the body as different types: + +| Content-Type | JavaScript type | +| --- | --- | +| `application/json` | `object` | +| `text/plain` | `string` | +| `application/vnd.elasticsearch+json` | `object` | +| `application/vnd.mapbox-vector-tile` | `Buffer` | +| `application/vnd.apache.arrow.stream` | `Buffer` | +| `application/vnd.elasticsearch+arrow+stream` | `Buffer` | +| `application/smile` | `Buffer` | +| `application/vnd.elasticsearch+smile` | `Buffer` | +| `application/cbor` | `Buffer` | +| `application/vnd.elasticsearch+cbor` | `Buffer` | + + diff --git a/docs/reference/transport_request_examples.md b/docs/reference/transport_request_examples.md new file mode 100644 index 000000000..1558d0403 --- /dev/null +++ b/docs/reference/transport_request_examples.md @@ -0,0 +1,76 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/transport_request_examples.html +--- + +# transport.request [transport_request_examples] + +It can happen that you need to communicate with {{es}} by using an API that is not supported by the client, to mitigate this issue you can directly call `client.transport.request`, which is the internal utility that the client uses to communicate with {{es}} when you use an API method. + +::::{note} +When using the `transport.request` method you must provide all the parameters needed to perform an HTTP call, such as `method`, `path`, `querystring`, and `body`. +:::: + + +::::{tip} +If you find yourself use this method too often, take in consideration the use of `client.extend`, which will make your code look cleaner and easier to maintain. +:::: + + +```js +'use strict' + +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) + +async function run () { + const bulkResponse = await client.bulk({ + refresh: true, + operations: [ + { index: { _index: 'game-of-thrones' } }, + { + character: 'Ned Stark', + quote: 'Winter is coming.' + }, + + { index: { _index: 'game-of-thrones' } }, + { + character: 'Daenerys Targaryen', + quote: 'I am the blood of the dragon.' + }, + + { index: { _index: 'game-of-thrones' } }, + { + character: 'Tyrion Lannister', + quote: 'A mind needs books like a sword needs a whetstone.' + } + ] + }) + + if (bulkResponse.errors) { + console.log(bulkResponse) + process.exit(1) + } + + const response = await client.transport.request({ + method: 'POST', + path: '/game-of-thrones/_search', + body: { + query: { + match: { + quote: 'winter' + } + } + }, + querystring: {} + }) + + console.log(response) +} + +run().catch(console.log) +``` + diff --git a/docs/reference/typescript.md b/docs/reference/typescript.md new file mode 100644 index 000000000..d7cff4ed6 --- /dev/null +++ b/docs/reference/typescript.md @@ -0,0 +1,81 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/typescript.html +--- + +# TypeScript support [typescript] + +The client offers a first-class support for TypeScript, shipping a complete set of type definitions of {{es}}'s API surface. + +The types are not 100% complete yet. Some APIs are missing (the newest ones, e.g. EQL), and others may contain some errors, but we are continuously pushing fixes & improvements. Contribute type fixes and improvements to [elasticsearch-specification github repository](https://github.com/elastic/elasticsearch-specification). + +::::{note} +The client is developed against the [latest](https://www.npmjs.com/package/typescript?activeTab=versions) version of TypeScript. Furthermore, unless you have set `skipLibCheck` to `true`, you should configure `esModuleInterop` to `true`. +:::: + +## Example [_example] + +```ts +import { Client } from '@elastic/elasticsearch' + +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) + +interface Document { + character: string + quote: string +} + +async function run () { + // Let's start by indexing some data + await client.index({ + index: 'game-of-thrones', + document: { + character: 'Ned Stark', + quote: 'Winter is coming.' + } + }) + + await client.index({ + index: 'game-of-thrones', + document: { + character: 'Daenerys Targaryen', + quote: 'I am the blood of the dragon.' + } + }) + + await client.index({ + index: 'game-of-thrones', + document: { + character: 'Tyrion Lannister', + quote: 'A mind needs books like a sword needs a whetstone.' + } + }) + + // here we are forcing an index refresh, otherwise we will not + // get any result in the consequent search + await client.indices.refresh({ index: 'game-of-thrones' }) + + // Let's search! + const result= await client.search({ + index: 'game-of-thrones', + query: { + match: { quote: 'winter' } + } + }) + + console.log(result.hits.hits) +} + +run().catch(console.log) +``` + +## Request & Response types [_request_response_types] + +You can import the full TypeScript requests & responses definitions as it follows: + +```ts +import { estypes } from '@elastic/elasticsearch' +``` diff --git a/docs/reference/update_by_query_examples.md b/docs/reference/update_by_query_examples.md new file mode 100644 index 000000000..0c61c0617 --- /dev/null +++ b/docs/reference/update_by_query_examples.md @@ -0,0 +1,61 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/update_by_query_examples.html +--- + +# Update By Query [update_by_query_examples] + +The simplest usage of _update_by_query just performs an update on every document in the index without changing the source. This is useful to pick up a new property or some other online mapping change. + +```js +'use strict' + +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) + +async function run () { + await client.index({ + index: 'game-of-thrones', + document: { + character: 'Ned Stark', + quote: 'Winter is coming.' + } + }) + + await client.index({ + index: 'game-of-thrones', + refresh: true, + document: { + character: 'Arya Stark', + quote: 'A girl is Arya Stark of Winterfell. And I\'m going home.' + } + }) + + await client.updateByQuery({ + index: 'game-of-thrones', + refresh: true, + script: { + lang: 'painless', + source: 'ctx._source["house"] = "stark"' + }, + query: { + match: { + character: 'stark' + } + } + }) + + const result = await client.search({ + index: 'game-of-thrones', + query: { match_all: {} } + }) + + console.log(result.hits.hits) +} + +run().catch(console.log) +``` + diff --git a/docs/reference/update_examples.md b/docs/reference/update_examples.md new file mode 100644 index 000000000..5de58586b --- /dev/null +++ b/docs/reference/update_examples.md @@ -0,0 +1,93 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/update_examples.html +--- + +# Update [update_examples] + +The update API allows updates of a specific document using the given script. In the following example, we will index a document that also tracks how many times a character has said the given quote, and then we will update the `times` field. + +```js +'use strict' + +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) + +async function run () { + await client.index({ + index: 'game-of-thrones', + id: '1', + document: { + character: 'Ned Stark', + quote: 'Winter is coming.', + times: 0 + } + }) + + await client.update({ + index: 'game-of-thrones', + id: '1', + script: { + lang: 'painless', + source: 'ctx._source.times++' + // you can also use parameters + // source: 'ctx._source.times += params.count', + // params: { count: 1 } + } + }) + + const document = await client.get({ + index: 'game-of-thrones', + id: '1' + }) + + console.log(document) +} + +run().catch(console.log) +``` + +With the update API, you can also run a partial update of a document. + +```js +'use strict' + +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + cloud: { id: '' }, + auth: { apiKey: 'base64EncodedKey' } +}) + +async function run () { + await client.index({ + index: 'game-of-thrones', + id: '1', + document: { + character: 'Ned Stark', + quote: 'Winter is coming.', + isAlive: true + } + }) + + await client.update({ + index: 'game-of-thrones', + id: '1', + doc: { + isAlive: false + } + }) + + const document = await client.get({ + index: 'game-of-thrones', + id: '1' + }) + + console.log(document) +} + +run().catch(console.log) +``` + diff --git a/docs/release-notes/breaking-changes.md b/docs/release-notes/breaking-changes.md new file mode 100644 index 000000000..1829923b8 --- /dev/null +++ b/docs/release-notes/breaking-changes.md @@ -0,0 +1,55 @@ +--- +navigation_title: "Breaking changes" +--- + +# Elasticsearch JavaScript Client breaking changes [elasticsearch-javascript-client-breaking-changes] + +Breaking changes can impact your Elastic applications, potentially disrupting normal operations. Before you upgrade, carefully review the Elasticsearch JavaScript Client breaking changes and take the necessary steps to mitigate any issues. To learn how to upgrade, check [Upgrade](docs-content://deploy-manage/upgrade.md). + +% ## Next version [elasticsearch-javascript-client-versionnext-breaking-changes] + +% ::::{dropdown} Title of breaking change +% Description of the breaking change. +% For more information, check [PR #](PR link). +% **Impact**
Impact of the breaking change. +% **Action**
Steps for mitigating deprecation impact. +% :::: + +## 9.0.0 [elasticsearch-javascript-client-900-breaking-changes] + +::::{dropdown} Changes to the optional body property + +In 8.x, every API function had a `body` property that would provide a place to put arbitrary values that should go in the HTTP request body, even if they were not noted in the specification or documentation. In 9.0, each API function still includes an optional `body` property, but TypeScript's type checker will disallow properties that should go in the root of the object. A `querystring` parameter has also been added that behaves the same as `body`, but inserts its values into the request querystring. + +**Impact**
Some adjustments to API calls may be necessary for code that used a `body` property 8.x, especially to appease the TypeScript type checker, but it should not have any impact on any code that was not using a `body` property. The `estypesWithBody` export and `typesWithBodyKey` module are no longer available. + +**Action**
Check existing code for use of the `body` property, and move any properties that should be in the root object according to the API function's request type definition. If using TypeScript, the TypeScript type checker will surface most of these issues for you. Also look for any imports of `estypesWithBody` or `typesWithBodyKey` and update them to `estypes` and `types`, respectively. +:::: + +::::{dropdown} Changes to API parameter collation into an HTTP request + +The logic for where each parameter in an API function call should be added to its HTTP request has been updated: + +1. If recognized as a `body` parameter according to the Elasticsearch specification, put it in the JSON body +2. If recognized as a `path` parameter, put it in the URL path +3. If recognized as a `query` parameter or a "common" query parameter (e.g. `pretty`, `error_trace`), put it in the querystring +4. If not recognized, and this API accepts a JSON body, put it in the JSON body +5. If not recognized and this API does not accept a JSON body, put it in the querystring + +The first two steps are identical in 8.x. The final three steps replace the logic from 8.x that put all unrecognized parameters in the querystring. + +**Impact**
Some parameters that were sent via querystring to Elasticsearch may be sent in the JSON request body, and vice versa. + +**Action**
If Elasticsearch sends back an error response due to a request not being valid, verify with the client's TypeScript type definitions, or via the docs, that the parameters your code passes are correct. +:::: + +::::{dropdown} Removal of the default 30-second timeout on all API calls + +The default 30-second timeout on all HTTP requests sent to Elasticsearch has been dropped in favor of having no timeout set at all. The previous behavior still works as it did when setting the `requestTimeout` value. + +See pull request [#2573](https://github.com/elastic/elasticsearch-js/pull/2573) for more information. + +**Impact**
Requests to Elasticsearch that used to time out after 30 seconds will now wait for as long as it takes for Elasticsearch to respond. + +**Action**
In environments where it is not ideal to wait for an API response indefinitely, manually setting the `requestTimeout` option when instantiating the client still works as it did in 8.x. +:::: diff --git a/docs/release-notes/deprecations.md b/docs/release-notes/deprecations.md new file mode 100644 index 000000000..a137fb0cf --- /dev/null +++ b/docs/release-notes/deprecations.md @@ -0,0 +1,21 @@ +--- +navigation_title: "Deprecations" +--- + +# Elasticsearch JavaScript Client deprecations [elasticsearch-javascript-client-deprecations] +Over time, certain Elastic functionality becomes outdated and is replaced or removed. To help with the transition, Elastic deprecates functionality for a period before removal, giving you time to update your applications. + +Review the deprecated functionality for the Elasticsearch JavaScript Client. While deprecations have no immediate impact, we strongly encourage you update your implementation after you upgrade. To learn how to upgrade, check out [Upgrade](docs-content://deploy-manage/upgrade.md). + +## 9.0.0 [elasticsearch-javascript-client-900-deprecations] + +_No deprecations_ + +% ## Next version + +% ::::{dropdown} Deprecation title +% Description of the deprecation. +% For more information, check [PR #](PR link). +% **Impact**
Impact of deprecation. +% **Action**
Steps for mitigating deprecation impact. +% :::: \ No newline at end of file diff --git a/docs/release-notes/index.md b/docs/release-notes/index.md new file mode 100644 index 000000000..0a70b6214 --- /dev/null +++ b/docs/release-notes/index.md @@ -0,0 +1,91 @@ +--- +navigation_title: "Elasticsearch JavaScript Client" +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/changelog-client.html +--- + +# Elasticsearch JavaScript Client release notes [elasticsearch-javascript-client-release-notes] + +Review the changes, fixes, and more in each version of Elasticsearch JavaScript Client. + +To check for security updates, go to [Security announcements for the Elastic stack](https://discuss.elastic.co/c/announcements/security-announcements/31). + +% Release notes include only features, enhancements, and fixes. Add breaking changes, deprecations, and known issues to the applicable release notes sections. + +% ## version.next [elasticsearch-javascript-client-next-release-notes] + +% ### Features and enhancements [elasticsearch-javascript-client-next-features-enhancements] +% \* + +% ### Fixes [elasticsearch-javascript-client-next-fixes] +% \* + +## 9.2.0 [elasticsearch-javascript-client-9.2.0-release-notes] + +### Features and enhancements [elasticsearch-javascript-client-9.2.0-features-enhancements] + +- **Compatibility with Elasticsearch 9.2:** All changes and additions to Elasticsearch APIs for its 9.2 release are reflected in this release. + +- **Accepted parameter names added to transport request metadata:** All requests sent through `@elastic/transport` already included some metadata about the request (API name, path parameters). An `acceptedParams` array has been added that includes the names of all parameters that an API supports. This helps support more flexible pre-flight request modifications made by custom transports. + +## 9.1.1 [elasticsearch-javascript-client-9.1.1-release-notes] + +### Fixes [elasticsearch-javascript-client-9.1.1-fixes] + +- **Propagate telemetry disabling option to transport:** an upcoming version of `@elastic/transport` will include the `x-elastic-client-meta` HTTP header that is used to capture some basic client telemetry. This change ensures the client's `enableMetaHeader` setting, which disables collecting this telemetry, is propagated to the transport. + +## 9.1.0 [elasticsearch-javascript-client-9.1.0-release-notes] + +### Features and enhancements [elasticsearch-javascript-client-9.1.0-features-enhancements] + +- **Compatibility with Elasticsearch 9.1:** All changes and additions to Elasticsearch APIs for its 9.1 release are reflected in this release. + +### Fixes [elasticsearch-javascript-client-9.1.0-fixes] + +- **Deep merge nested options on client instantiation:** If custom values for `redaction` and `headers` options were set by the user during `Client` instantiation, nested default values would be dropped rather than deep-merged. This has been fixed. + +## 9.0.4 + +### Fixes [elasticsearch-javascript-client-9.0.4-fixes] + +- **Propagate telemetry disabling option to transport:** an upcoming version of `@elastic/transport` will include the `x-elastic-client-meta` HTTP header that is used to capture some basic client telemetry. This change ensures the client's `enableMetaHeader` setting, which disables collecting this telemetry, is propagated to the transport. + +## 9.0.3 + +### Fixes [elasticsearch-javascript-client-9.0.3-fixes] + +- **Improved compatibility with Elasticsearch 9.0:** Several fixes and improvements have been made to APIs and TypeScript type definitions to better reflect the Elasticsearch 9.0 specification. + +## 9.0.2 + +### Fixes [elasticsearch-javascript-client-9.0.2-fixes] + +- **Remove dangling references to `typesWithBodyKey`:** the `typesWithBodyKey.ts` file and `estypesWithBody` export were removed in 9.0.0 but were still being referenced in the `index.d.ts` file that declares TypeScript types. This reference has been removed. + +## 9.0.1 + +### Fixes [elasticsearch-javascript-client-9.0.1-fixes] + +- **Reinstate `nodeFilter` and node `roles` feature:** The docs note a `nodeFilter` option on the client that will, by default, filter the nodes based on any `roles` values that are set at instantiation. At some point, this functionality was partially disabled. This brings the feature back, ensuring that it matches what the documentation has said it does all along. + +- **Ensure Apache Arrow ES|QL helper uses async iterator:** the [`esql.toArrowReader()` helper function](/reference/client-helpers.md#_toarrowreader) was trying to return `RecordBatchStreamReader`—a synchronous iterator—despite the fact that the `apache-arrow` package was, in most cases, automatically coercing it to `AsyncRecordBatchStreamReader`, its asynchronous counterpart. It now is always returned as an async iterator. + +## 9.0.0 [elasticsearch-javascript-client-9.0.0-release-notes] + +### Features and enhancements [elasticsearch-javascript-client-9.0.0-features-enhancements] + +- **Compatibility with Elasticsearch 9.0:** All changes and additions to Elasticsearch APIs for its 9.0 release are reflected in this release. +- **Serverless client merged in:** the `@elastic/elasticsearch-serverless` client is being deprecated, and its functionality has been merged back into this client. This should have zero impact on the way the client works by default, except that a new `serverMode` option has been added. When it's explicitly set to `"serverless"` by a user, a few default settings and behaviors are changed: + + - turns off sniffing and ignores any sniffing-related options + - ignores all nodes passed in config except the first one, and ignores any node filtering and selecting options + - enables compression and `TLSv1_2_method` (same as when configured for Elastic Cloud) + - adds an `elastic-api-version` HTTP header to all requests + - uses `CloudConnectionPool` by default instead of `WeightedConnectionPool` + - turns off vendored `content-type` and `accept` headers in favor or standard MIME types + + Docstrings for types that differ between stack and serverless have also been updated to indicate when that is the case. + +- **Improved Cloud ID parsing:** when using a Cloud ID as the `cloud` parameter to instantiate the client, that ID was assumed to be in the correct format. New assertions have been added to verify that format and throw a `ConfigurationError` if it is invalid. See [#2694](https://github.com/elastic/elasticsearch-js/issues/2694). + +% ### Fixes [elasticsearch-javascript-client-9.0.0-fixes] diff --git a/docs/release-notes/known-issues.md b/docs/release-notes/known-issues.md new file mode 100644 index 000000000..e35bd7826 --- /dev/null +++ b/docs/release-notes/known-issues.md @@ -0,0 +1,24 @@ +--- +navigation_title: "Known issues" + +--- + +# Elasticsearch JavaScript Client known issues [elasticsearch-javascript-client-known-issues] + +## 9.0.0 + +_No known issues_ + +% Use the following template to add entries to this page. + +% :::{dropdown} Title of known issue +% **Details** +% On [Month/Day/Year], a known issue was discovered that [description of known issue]. + +% **Workaround** +% Workaround description. + +% **Resolved** +% On [Month/Day/Year], this issue was resolved. + +% ::: \ No newline at end of file diff --git a/docs/release-notes/toc.yml b/docs/release-notes/toc.yml new file mode 100644 index 000000000..a41006794 --- /dev/null +++ b/docs/release-notes/toc.yml @@ -0,0 +1,5 @@ +toc: + - file: index.md + - file: known-issues.md + - file: breaking-changes.md + - file: deprecations.md \ No newline at end of file diff --git a/docs/testing.asciidoc b/docs/testing.asciidoc deleted file mode 100644 index 2e687cacb..000000000 --- a/docs/testing.asciidoc +++ /dev/null @@ -1,152 +0,0 @@ -[[client-testing]] -== Testing - -Testing is one of the most important parts of developing an application. -The client is very flexible when it comes to testing and is compatible with -most testing frameworks (such as https://www.npmjs.com/package/ava[`ava`], -which is used in the examples below). - -If you are using this client, you are very likely working with Elasticsearch, -and one of the first issues you will face is how to test your application. -A perfectly valid solution is to use the real Elasticsearch instance for -testing your application, but you would be doing an integration test, -while you want a unit test. -There are many ways to solve this problem, you could create the database -with docker, or use an in-memory compatible one, but if you are writing -unit tests that can be easily parallelized this will become quite uncomfortable. -A different way of improving your testing experience while doing unit tests -is to use a mock. - -The client is designed to be easy to extend and adapt to your needs. -Thanks to its internal architecture it allows you to change some specific -components while keeping the rest of it working as usual. -Each Elasticsearch official client is composed of the following components: - -* `API layer`: every Elasticsearch API that you can call -* `Transport`: a component that takes care of preparing a request before sending it and handling all the retry and sniffing strategies -* `ConnectionPool`: Elasticsearch is a cluster and might have multiple nodes, the * `ConnectionPool` takes care of them -* `Serializer`: A class with all the serialization strategies, from the basic JSON to the new line delimited JSON. -* `Connection`: The actual HTTP library. - -The best way to mock Elasticsearch with the official clients is to replace -the `Connection` component since it has very few responsibilities and -it does not interact with other internal components other than getting -requests and returning responses. - -=== `@elastic/elasticsearch-mock` - -Writing each time a mock for your test can be annoying and error-prone, -so we have built a simple yet powerful mocking library specifically designed -for this client, and you can install it with the following command: - -[source,sh] ----- -npm install @elastic/elasticsearch-mock --save-dev ----- - -With this library you can easily create custom mocks for any request you can -send to Elasticsearch. It offers a simple and intuitive API and it mocks only -the HTTP layer, leaving the rest of the client working as usual. - -Before showing all of its features, and what you can do with it, let’s see an example: - -[source,js] ----- -const { Client } = require('@elastic/elasticsearch') -const Mock = require('@elastic/elasticsearch-mock') - -const mock = new Mock() -const client = new Client({ - node: 'http://localhost:9200/', - Connection: mock.getConnection() -}) - -mock.add({ - method: 'GET', - path: '/' -}, () => { - return { status: 'ok' } -}) - -client.info(console.log) ----- - -As you can see it works closely with the client itself, once you have created -a new instance of the mock library you just need to call the mock.getConnection() -method and pass its result to the Connection option of the client. -From now on, every request will be handled by the mock library, and the HTTP -layer will never be touched. As a result, your test will be significantly faster -and you will be able to easily parallelize them! - -The library allows you to write both “strict” and “loose” mocks, which means -that you can write a mock that will handle a very specific request or be looser -and handle a group of request, let’s see this in action: - -[source,js] ----- -mock.add({ - method: 'POST', - path: '/indexName/_search' -}, () => { - return { - hits: { - total: { value: 1, relation: 'eq' }, - hits: [{ _source: { baz: 'faz' } }] - } - } -}) - -mock.add({ - method: 'POST', - path: '/indexName/_search', - body: { query: { match: { foo: 'bar' } } } -}, () => { - return { - hits: { - total: { value: 0, relation: 'eq' }, - hits: [] - } - } -}) ----- - -In the example above every search request will get the first response, -while every search request that uses the query described in the second mock, -will get the second response. - -You can also specify dynamic paths: - -[source,js] ----- -mock.add({ - method: 'GET', - path: '/:index/_count' -}, () => { - return { count: 42 } -}) - -client.count({ index: 'foo' }, console.log) // => { count: 42 } -client.count({ index: 'bar' }, console.log) // => { count: 42 } ----- - -And wildcards are supported as well. - -Another very interesting use case is the ability to create a test that randomly -fails to see how your code reacts to failures: - -[source,js] ----- -mock.add({ - method: 'GET', - path: '/:index/_count' -}, () => { - if (Math.random() > 0.8) { - return ResponseError({ body: {}, statusCode: 500 }) - } else { - return { count: 42 } - } -}) ----- - -We have seen how simple is mocking Elasticsearch and testing your application, -you can find many more features and examples in the https://github.com/elastic/elasticsearch-js-mock[module documentation]. \ No newline at end of file diff --git a/docs/typescript.asciidoc b/docs/typescript.asciidoc deleted file mode 100644 index 476292eed..000000000 --- a/docs/typescript.asciidoc +++ /dev/null @@ -1,149 +0,0 @@ -[[typescript]] - -== TypeScript support - -The client offers a first-class support for TypeScript, since it ships the type -definitions for every exposed API. - -NOTE: If you are using TypeScript you will be required to use _snake_case_ style -to define the API parameters instead of _camelCase_. - -By default event API uses https://www.typescriptlang.org/docs/handbook/generics.html[generics] to specify the requets and response bodies and the `meta.context`. Currently we can't provide those definitions, but we are working to improve this situation. - -You can't fid a partial definition of the request types by importing `RequestParams`, which it is used by default in the client and accepts a body (when needed) as a generic to provide a better specification. - -The body defaults to `RequestBody` and `RequestNDBody`, which are defined as follows: - -[source,ts] ----- -type RequestBody> = T | string | Buffer | ReadableStream -type RequestNDBody[]> = T | string | string[] | Buffer | ReadableStream ----- - -You can specify the response and request body in each API as follows: - -[source,ts] ----- -const response = await client.search({ - index: 'test', - body: { - query: { - match: { foo: 'bar' } - } - } -}) - -console.log(response.body) ----- - -You don't have to specify all the generics, but the order must be respected. - - -=== A complete example - -[source,ts] ----- -import { - Client, - // Object that contains the type definitions of every API method - RequestParams, - // Interface of the generic API response - ApiResponse, -} from '@elastic/elasticsearch' - -const client = new Client({ node: 'http://localhost:9200/' }) - -// Define the type of the body for the Search request -interface SearchBody { - query: { - match: { foo: string } - } -} - -// Complete definition of the Search response -interface ShardsResponse { - total: number; - successful: number; - failed: number; - skipped: number; -} - -interface Explanation { - value: number; - description: string; - details: Explanation[]; -} - -interface SearchResponse { - took: number; - timed_out: boolean; - _scroll_id?: string; - _shards: ShardsResponse; - hits: { - total: number; - max_score: number; - hits: Array<{ - _index: string; - _type: string; - _id: string; - _score: number; - _source: T; - _version?: number; - _explanation?: Explanation; - fields?: any; - highlight?: any; - inner_hits?: any; - matched_queries?: string[]; - sort?: string[]; - }>; - }; - aggregations?: any; -} - -// Define the interface of the source object -interface Source { - foo: string -} - -async function run () { - // All of the examples below are valid code, by default, - // the request body will be `RequestBody` and response will be `Record`. - const response = await client.search({ - index: 'test', - body: { - query: { - match: { foo: 'bar' } - } - } - }) - // body here is `ResponseBody` - console.log(response.body) - - // The first generic is the response body - const response = await client.search>({ - index: 'test', - // Here the body must follow the `RequestBody` interface - body: { - query: { - match: { foo: 'bar' } - } - } - }) - // body here is `SearchResponse` - console.log(response.body) - - const response = await client.search, SearchBody>({ - index: 'test', - // Here the body must follow the `SearchBody` interface - body: { - query: { - match: { foo: 'bar' } - } - } - }) - // body here is `SearchResponse` - console.log(response.body) -} - -run().catch(console.log) ----- diff --git a/docs/usage.asciidoc b/docs/usage.asciidoc deleted file mode 100644 index c279b886d..000000000 --- a/docs/usage.asciidoc +++ /dev/null @@ -1,225 +0,0 @@ -[[client-usage]] -== Usage - -Using the client is straightforward, it supports all the public APIs of {es}, -and every method exposes the same signature. - - -[source,js] ----- -const { Client } = require('@elastic/elasticsearch') -const client = new Client({ node: 'http://localhost:9200/' }) - -// promise API -const result = await client.search({ - index: 'my-index', - body: { foo: 'bar' } -}) - -// callback API -client.search({ - index: 'my-index', - body: { foo: 'bar' } -}, (err, result) => { - if (err) console.log(err) -}) ----- - -The returned value of every API call is formed as follows: - -[source,ts] ----- -{ - body: object | boolean - statusCode: number - headers: object - warnings: [string], - meta: object -} ----- - -NOTE: The body is a boolean value when you use `HEAD` APIs. - -The above value is returned even if there is an error during the execution of -the request, this means that you can safely use the -https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/Destructuring_assignment[destructuring assignment]. - -The `meta` key contains all the information about the request, such as attempt, -options, and the connection that has been used. - -[source,js] ----- -// promise API -const { body } = await client.search({ - index: 'my-index', - body: { foo: 'bar' } -}) - -// callback API -client.search({ - index: 'my-index', - body: { foo: 'bar' } -}, (err, { body }) => { - if (err) console.log(err) -}) ----- - - -=== Aborting a request - -If needed, you can abort a running request by calling the `request.abort()` method returned by the API. - -CAUTION: If you abort a request, the request will fail with a `RequestAbortedError`. - - -[source,js] ----- -const request = client.search({ - index: 'my-index', - body: { foo: 'bar' } -}, { - ignore: [404], - maxRetries: 3 -}, (err, result) => { - if (err) { - console.log(err) // RequestAbortedError - } else { - console.log(result) - } -}) - -request.abort() ----- - -The same behavior is valid for the promise style API as well. -[source,js] ----- -const request = client.search({ - index: 'my-index', - body: { foo: 'bar' } -}, { - ignore: [404], - maxRetries: 3 -}) - -request - .then(result => console.log(result)) - .catch(err => console.log(err)) // RequestAbortedError - -request.abort() ----- - - -=== Request specific options -If needed you can pass request specific options in a second object: - -[source,js] ----- -// promise API -const result = await client.search({ - index: 'my-index', - body: { foo: 'bar' } -}, { - ignore: [404], - maxRetries: 3 -}) - -// calback API -client.search({ - index: 'my-index', - body: { foo: 'bar' } -}, { - ignore: [404], - maxRetries: 3 -}, (err, { body }) => { - if (err) console.log(err) -}) ----- - - -The supported request specific options are: -[cols=2*] -|=== -|`ignore` -|`[number]` -  HTTP status codes which should not be considered errors for this request. + -_Default:_ `null` - -|`requestTimeout` -|`number` - Max request timeout for the request, it overrides the client default. + -_Default:_ `30000` - -|`maxRetries` -|`number` - Max number of retries for the request, it overrides the client default. + -_Default:_ `3` - -|`compression` -|`string, boolean` - Enables body compression for the request. + -_Options:_ `false`, `'gzip'` + -_Default:_ `false` - -|`asStream` -|`boolean` - Instead of getting the parsed body back, you get the raw Node.js stream of data. + -_Default:_ `false` - -|`headers` -|`object` - Custom headers for the request. + -_Default:_ `null` - -|`querystring` -|`object` - Custom querystring for the request. + -_Default:_ `null` - -|`id` -|`any` - Custom request id. _(overrides the top level request id generator)_ + -_Default:_ `null` - -|`context` -|`any` - Custom object per request. _(you can use it to pass data to the clients events)_ + -_Default:_ `null` -|=== - - -=== Error handling - -The client exposes a variety of error objects that you can use to enhance your -error handling. You can find all the error objects inside the `errors` key in -the client. - -[source,js] ----- -const { errors } = require('@elastic/elasticsearch') -console.log(errors) ----- - - -You can find the errors exported by the client in the table below. - -[cols=2*] -|=== -|`ElasticsearchClientError` -|Every error inherits from this class, it is the basic error generated by the client. - -|`TimeoutError` -|Generated when a request exceeds the `requestTimeout` option. - -|`ConnectionError` -|Generated when an error occurs during the request, it can be a connection error or a malformed stream of data. - -|`RequestAbortedError` -|Generated if the user calls the `request.abort()` method. - -|`NoLivingConnectionsError` -|Given the configuration, the ConnectionPool was not able to find a usable Connection for this request. - -|`SerializationError` -|Generated if the serialization fails. - -|`DeserializationError` -|Generated if the deserialization fails. - -|`ConfigurationError` -|Generated if there is a malformed configuration or parameter. - -|`ResponseError` -|Generated when in case of a `4xx` or `5xx` response. -|=== diff --git a/index.d.ts b/index.d.ts index b4d5c60c0..8d48439a1 100644 --- a/index.d.ts +++ b/index.d.ts @@ -1,2581 +1,13 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -/// - -import { ConnectionOptions as TlsConnectionOptions } from 'tls'; -import Transport, { - ApiError, - ApiResponse, - RequestEvent, - TransportRequestParams, - TransportRequestOptions, - nodeFilterFn, - nodeSelectorFn, - generateRequestIdFn, - TransportRequestCallback, - TransportRequestPromise, - RequestBody, - RequestNDBody -} from './lib/Transport'; -import { URL } from 'url'; -import Connection, { AgentOptions, agentFn } from './lib/Connection'; -import { - ConnectionPool, - BaseConnectionPool, - CloudConnectionPool, - ResurrectEvent, - BasicAuth, - ApiKeyAuth -} from './lib/pool'; -import Serializer from './lib/Serializer'; -import Helpers from './lib/Helpers'; -import * as RequestParams from './api/requestParams'; -import * as errors from './lib/errors'; - -declare type callbackFn = (err: ApiError, result: ApiResponse) => void; - -// Extend API -interface ClientExtendsCallbackOptions { - ConfigurationError: errors.ConfigurationError, - makeRequest(params: TransportRequestParams, options?: TransportRequestOptions): Promise | void; - result: { - body: null, - statusCode: null, - headers: null, - warnings: null - } -} -declare type extendsCallback = (options: ClientExtendsCallbackOptions) => any; -interface ClientExtends { - (method: string, fn: extendsCallback): void; - (method: string, opts: { force: boolean }, fn: extendsCallback): void; -} -// /Extend API - -interface NodeOptions { - url: URL; - id?: string; - agent?: AgentOptions; - ssl?: TlsConnectionOptions; - headers?: Record; - roles?: { - master: boolean; - data: boolean; - ingest: boolean; - ml: boolean; - } -} - -interface ClientOptions { - node?: string | string[] | NodeOptions | NodeOptions[]; - nodes?: string | string[] | NodeOptions | NodeOptions[]; - Connection?: typeof Connection; - ConnectionPool?: typeof ConnectionPool; - Transport?: typeof Transport; - Serializer?: typeof Serializer; - maxRetries?: number; - requestTimeout?: number; - pingTimeout?: number; - sniffInterval?: number | boolean; - sniffOnStart?: boolean; - sniffEndpoint?: string; - sniffOnConnectionFault?: boolean; - resurrectStrategy?: 'ping' | 'optimistic' | 'none'; - suggestCompression?: boolean; - compression?: 'gzip'; - ssl?: TlsConnectionOptions; - agent?: AgentOptions | agentFn; - nodeFilter?: nodeFilterFn; - nodeSelector?: nodeSelectorFn | string; - headers?: Record; - opaqueIdPrefix?: string; - generateRequestId?: generateRequestIdFn; - name?: string; - auth?: BasicAuth | ApiKeyAuth; - cloud?: { - id: string; - // TODO: remove username and password here in 8 - username?: string; - password?: string; - } -} - -declare class Client extends EventEmitter { - constructor(opts?: ClientOptions); - connectionPool: ConnectionPool; - transport: Transport; - serializer: Serializer; - extend: ClientExtends; - helpers: Helpers; - child(opts?: ClientOptions): Client; - close(callback?: Function): Promise | void; - /* GENERATED */ - async_search: { - delete, TContext = unknown>(params?: RequestParams.AsyncSearchDelete, options?: TransportRequestOptions): TransportRequestPromise> - delete, TContext = unknown>(callback: callbackFn): TransportRequestCallback - delete, TContext = unknown>(params: RequestParams.AsyncSearchDelete, callback: callbackFn): TransportRequestCallback - delete, TContext = unknown>(params: RequestParams.AsyncSearchDelete, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get, TContext = unknown>(params?: RequestParams.AsyncSearchGet, options?: TransportRequestOptions): TransportRequestPromise> - get, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get, TContext = unknown>(params: RequestParams.AsyncSearchGet, callback: callbackFn): TransportRequestCallback - get, TContext = unknown>(params: RequestParams.AsyncSearchGet, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - submit, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.AsyncSearchSubmit, options?: TransportRequestOptions): TransportRequestPromise> - submit, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - submit, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.AsyncSearchSubmit, callback: callbackFn): TransportRequestCallback - submit, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.AsyncSearchSubmit, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - asyncSearch: { - delete, TContext = unknown>(params?: RequestParams.AsyncSearchDelete, options?: TransportRequestOptions): TransportRequestPromise> - delete, TContext = unknown>(callback: callbackFn): TransportRequestCallback - delete, TContext = unknown>(params: RequestParams.AsyncSearchDelete, callback: callbackFn): TransportRequestCallback - delete, TContext = unknown>(params: RequestParams.AsyncSearchDelete, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get, TContext = unknown>(params?: RequestParams.AsyncSearchGet, options?: TransportRequestOptions): TransportRequestPromise> - get, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get, TContext = unknown>(params: RequestParams.AsyncSearchGet, callback: callbackFn): TransportRequestCallback - get, TContext = unknown>(params: RequestParams.AsyncSearchGet, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - submit, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.AsyncSearchSubmit, options?: TransportRequestOptions): TransportRequestPromise> - submit, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - submit, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.AsyncSearchSubmit, callback: callbackFn): TransportRequestCallback - submit, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.AsyncSearchSubmit, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - autoscaling: { - delete_autoscaling_policy, TContext = unknown>(params?: RequestParams.AutoscalingDeleteAutoscalingPolicy, options?: TransportRequestOptions): TransportRequestPromise> - delete_autoscaling_policy, TContext = unknown>(callback: callbackFn): TransportRequestCallback - delete_autoscaling_policy, TContext = unknown>(params: RequestParams.AutoscalingDeleteAutoscalingPolicy, callback: callbackFn): TransportRequestCallback - delete_autoscaling_policy, TContext = unknown>(params: RequestParams.AutoscalingDeleteAutoscalingPolicy, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteAutoscalingPolicy, TContext = unknown>(params?: RequestParams.AutoscalingDeleteAutoscalingPolicy, options?: TransportRequestOptions): TransportRequestPromise> - deleteAutoscalingPolicy, TContext = unknown>(callback: callbackFn): TransportRequestCallback - deleteAutoscalingPolicy, TContext = unknown>(params: RequestParams.AutoscalingDeleteAutoscalingPolicy, callback: callbackFn): TransportRequestCallback - deleteAutoscalingPolicy, TContext = unknown>(params: RequestParams.AutoscalingDeleteAutoscalingPolicy, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_autoscaling_decision, TContext = unknown>(params?: RequestParams.AutoscalingGetAutoscalingDecision, options?: TransportRequestOptions): TransportRequestPromise> - get_autoscaling_decision, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_autoscaling_decision, TContext = unknown>(params: RequestParams.AutoscalingGetAutoscalingDecision, callback: callbackFn): TransportRequestCallback - get_autoscaling_decision, TContext = unknown>(params: RequestParams.AutoscalingGetAutoscalingDecision, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getAutoscalingDecision, TContext = unknown>(params?: RequestParams.AutoscalingGetAutoscalingDecision, options?: TransportRequestOptions): TransportRequestPromise> - getAutoscalingDecision, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getAutoscalingDecision, TContext = unknown>(params: RequestParams.AutoscalingGetAutoscalingDecision, callback: callbackFn): TransportRequestCallback - getAutoscalingDecision, TContext = unknown>(params: RequestParams.AutoscalingGetAutoscalingDecision, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_autoscaling_policy, TContext = unknown>(params?: RequestParams.AutoscalingGetAutoscalingPolicy, options?: TransportRequestOptions): TransportRequestPromise> - get_autoscaling_policy, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_autoscaling_policy, TContext = unknown>(params: RequestParams.AutoscalingGetAutoscalingPolicy, callback: callbackFn): TransportRequestCallback - get_autoscaling_policy, TContext = unknown>(params: RequestParams.AutoscalingGetAutoscalingPolicy, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getAutoscalingPolicy, TContext = unknown>(params?: RequestParams.AutoscalingGetAutoscalingPolicy, options?: TransportRequestOptions): TransportRequestPromise> - getAutoscalingPolicy, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getAutoscalingPolicy, TContext = unknown>(params: RequestParams.AutoscalingGetAutoscalingPolicy, callback: callbackFn): TransportRequestCallback - getAutoscalingPolicy, TContext = unknown>(params: RequestParams.AutoscalingGetAutoscalingPolicy, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_autoscaling_policy, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.AutoscalingPutAutoscalingPolicy, options?: TransportRequestOptions): TransportRequestPromise> - put_autoscaling_policy, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - put_autoscaling_policy, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.AutoscalingPutAutoscalingPolicy, callback: callbackFn): TransportRequestCallback - put_autoscaling_policy, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.AutoscalingPutAutoscalingPolicy, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putAutoscalingPolicy, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.AutoscalingPutAutoscalingPolicy, options?: TransportRequestOptions): TransportRequestPromise> - putAutoscalingPolicy, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - putAutoscalingPolicy, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.AutoscalingPutAutoscalingPolicy, callback: callbackFn): TransportRequestCallback - putAutoscalingPolicy, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.AutoscalingPutAutoscalingPolicy, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - bulk, TRequestBody extends RequestNDBody = Record[], TContext = unknown>(params?: RequestParams.Bulk, options?: TransportRequestOptions): TransportRequestPromise> - bulk, TRequestBody extends RequestNDBody = Record[], TContext = unknown>(callback: callbackFn): TransportRequestCallback - bulk, TRequestBody extends RequestNDBody = Record[], TContext = unknown>(params: RequestParams.Bulk, callback: callbackFn): TransportRequestCallback - bulk, TRequestBody extends RequestNDBody = Record[], TContext = unknown>(params: RequestParams.Bulk, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - cat: { - aliases, TContext = unknown>(params?: RequestParams.CatAliases, options?: TransportRequestOptions): TransportRequestPromise> - aliases, TContext = unknown>(callback: callbackFn): TransportRequestCallback - aliases, TContext = unknown>(params: RequestParams.CatAliases, callback: callbackFn): TransportRequestCallback - aliases, TContext = unknown>(params: RequestParams.CatAliases, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - allocation, TContext = unknown>(params?: RequestParams.CatAllocation, options?: TransportRequestOptions): TransportRequestPromise> - allocation, TContext = unknown>(callback: callbackFn): TransportRequestCallback - allocation, TContext = unknown>(params: RequestParams.CatAllocation, callback: callbackFn): TransportRequestCallback - allocation, TContext = unknown>(params: RequestParams.CatAllocation, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - count, TContext = unknown>(params?: RequestParams.CatCount, options?: TransportRequestOptions): TransportRequestPromise> - count, TContext = unknown>(callback: callbackFn): TransportRequestCallback - count, TContext = unknown>(params: RequestParams.CatCount, callback: callbackFn): TransportRequestCallback - count, TContext = unknown>(params: RequestParams.CatCount, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - fielddata, TContext = unknown>(params?: RequestParams.CatFielddata, options?: TransportRequestOptions): TransportRequestPromise> - fielddata, TContext = unknown>(callback: callbackFn): TransportRequestCallback - fielddata, TContext = unknown>(params: RequestParams.CatFielddata, callback: callbackFn): TransportRequestCallback - fielddata, TContext = unknown>(params: RequestParams.CatFielddata, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - health, TContext = unknown>(params?: RequestParams.CatHealth, options?: TransportRequestOptions): TransportRequestPromise> - health, TContext = unknown>(callback: callbackFn): TransportRequestCallback - health, TContext = unknown>(params: RequestParams.CatHealth, callback: callbackFn): TransportRequestCallback - health, TContext = unknown>(params: RequestParams.CatHealth, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - help, TContext = unknown>(params?: RequestParams.CatHelp, options?: TransportRequestOptions): TransportRequestPromise> - help, TContext = unknown>(callback: callbackFn): TransportRequestCallback - help, TContext = unknown>(params: RequestParams.CatHelp, callback: callbackFn): TransportRequestCallback - help, TContext = unknown>(params: RequestParams.CatHelp, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - indices, TContext = unknown>(params?: RequestParams.CatIndices, options?: TransportRequestOptions): TransportRequestPromise> - indices, TContext = unknown>(callback: callbackFn): TransportRequestCallback - indices, TContext = unknown>(params: RequestParams.CatIndices, callback: callbackFn): TransportRequestCallback - indices, TContext = unknown>(params: RequestParams.CatIndices, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - master, TContext = unknown>(params?: RequestParams.CatMaster, options?: TransportRequestOptions): TransportRequestPromise> - master, TContext = unknown>(callback: callbackFn): TransportRequestCallback - master, TContext = unknown>(params: RequestParams.CatMaster, callback: callbackFn): TransportRequestCallback - master, TContext = unknown>(params: RequestParams.CatMaster, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - ml_data_frame_analytics, TContext = unknown>(params?: RequestParams.CatMlDataFrameAnalytics, options?: TransportRequestOptions): TransportRequestPromise> - ml_data_frame_analytics, TContext = unknown>(callback: callbackFn): TransportRequestCallback - ml_data_frame_analytics, TContext = unknown>(params: RequestParams.CatMlDataFrameAnalytics, callback: callbackFn): TransportRequestCallback - ml_data_frame_analytics, TContext = unknown>(params: RequestParams.CatMlDataFrameAnalytics, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - mlDataFrameAnalytics, TContext = unknown>(params?: RequestParams.CatMlDataFrameAnalytics, options?: TransportRequestOptions): TransportRequestPromise> - mlDataFrameAnalytics, TContext = unknown>(callback: callbackFn): TransportRequestCallback - mlDataFrameAnalytics, TContext = unknown>(params: RequestParams.CatMlDataFrameAnalytics, callback: callbackFn): TransportRequestCallback - mlDataFrameAnalytics, TContext = unknown>(params: RequestParams.CatMlDataFrameAnalytics, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - ml_datafeeds, TContext = unknown>(params?: RequestParams.CatMlDatafeeds, options?: TransportRequestOptions): TransportRequestPromise> - ml_datafeeds, TContext = unknown>(callback: callbackFn): TransportRequestCallback - ml_datafeeds, TContext = unknown>(params: RequestParams.CatMlDatafeeds, callback: callbackFn): TransportRequestCallback - ml_datafeeds, TContext = unknown>(params: RequestParams.CatMlDatafeeds, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - mlDatafeeds, TContext = unknown>(params?: RequestParams.CatMlDatafeeds, options?: TransportRequestOptions): TransportRequestPromise> - mlDatafeeds, TContext = unknown>(callback: callbackFn): TransportRequestCallback - mlDatafeeds, TContext = unknown>(params: RequestParams.CatMlDatafeeds, callback: callbackFn): TransportRequestCallback - mlDatafeeds, TContext = unknown>(params: RequestParams.CatMlDatafeeds, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - ml_jobs, TContext = unknown>(params?: RequestParams.CatMlJobs, options?: TransportRequestOptions): TransportRequestPromise> - ml_jobs, TContext = unknown>(callback: callbackFn): TransportRequestCallback - ml_jobs, TContext = unknown>(params: RequestParams.CatMlJobs, callback: callbackFn): TransportRequestCallback - ml_jobs, TContext = unknown>(params: RequestParams.CatMlJobs, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - mlJobs, TContext = unknown>(params?: RequestParams.CatMlJobs, options?: TransportRequestOptions): TransportRequestPromise> - mlJobs, TContext = unknown>(callback: callbackFn): TransportRequestCallback - mlJobs, TContext = unknown>(params: RequestParams.CatMlJobs, callback: callbackFn): TransportRequestCallback - mlJobs, TContext = unknown>(params: RequestParams.CatMlJobs, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - ml_trained_models, TContext = unknown>(params?: RequestParams.CatMlTrainedModels, options?: TransportRequestOptions): TransportRequestPromise> - ml_trained_models, TContext = unknown>(callback: callbackFn): TransportRequestCallback - ml_trained_models, TContext = unknown>(params: RequestParams.CatMlTrainedModels, callback: callbackFn): TransportRequestCallback - ml_trained_models, TContext = unknown>(params: RequestParams.CatMlTrainedModels, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - mlTrainedModels, TContext = unknown>(params?: RequestParams.CatMlTrainedModels, options?: TransportRequestOptions): TransportRequestPromise> - mlTrainedModels, TContext = unknown>(callback: callbackFn): TransportRequestCallback - mlTrainedModels, TContext = unknown>(params: RequestParams.CatMlTrainedModels, callback: callbackFn): TransportRequestCallback - mlTrainedModels, TContext = unknown>(params: RequestParams.CatMlTrainedModels, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - nodeattrs, TContext = unknown>(params?: RequestParams.CatNodeattrs, options?: TransportRequestOptions): TransportRequestPromise> - nodeattrs, TContext = unknown>(callback: callbackFn): TransportRequestCallback - nodeattrs, TContext = unknown>(params: RequestParams.CatNodeattrs, callback: callbackFn): TransportRequestCallback - nodeattrs, TContext = unknown>(params: RequestParams.CatNodeattrs, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - nodes, TContext = unknown>(params?: RequestParams.CatNodes, options?: TransportRequestOptions): TransportRequestPromise> - nodes, TContext = unknown>(callback: callbackFn): TransportRequestCallback - nodes, TContext = unknown>(params: RequestParams.CatNodes, callback: callbackFn): TransportRequestCallback - nodes, TContext = unknown>(params: RequestParams.CatNodes, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - pending_tasks, TContext = unknown>(params?: RequestParams.CatPendingTasks, options?: TransportRequestOptions): TransportRequestPromise> - pending_tasks, TContext = unknown>(callback: callbackFn): TransportRequestCallback - pending_tasks, TContext = unknown>(params: RequestParams.CatPendingTasks, callback: callbackFn): TransportRequestCallback - pending_tasks, TContext = unknown>(params: RequestParams.CatPendingTasks, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - pendingTasks, TContext = unknown>(params?: RequestParams.CatPendingTasks, options?: TransportRequestOptions): TransportRequestPromise> - pendingTasks, TContext = unknown>(callback: callbackFn): TransportRequestCallback - pendingTasks, TContext = unknown>(params: RequestParams.CatPendingTasks, callback: callbackFn): TransportRequestCallback - pendingTasks, TContext = unknown>(params: RequestParams.CatPendingTasks, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - plugins, TContext = unknown>(params?: RequestParams.CatPlugins, options?: TransportRequestOptions): TransportRequestPromise> - plugins, TContext = unknown>(callback: callbackFn): TransportRequestCallback - plugins, TContext = unknown>(params: RequestParams.CatPlugins, callback: callbackFn): TransportRequestCallback - plugins, TContext = unknown>(params: RequestParams.CatPlugins, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - recovery, TContext = unknown>(params?: RequestParams.CatRecovery, options?: TransportRequestOptions): TransportRequestPromise> - recovery, TContext = unknown>(callback: callbackFn): TransportRequestCallback - recovery, TContext = unknown>(params: RequestParams.CatRecovery, callback: callbackFn): TransportRequestCallback - recovery, TContext = unknown>(params: RequestParams.CatRecovery, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - repositories, TContext = unknown>(params?: RequestParams.CatRepositories, options?: TransportRequestOptions): TransportRequestPromise> - repositories, TContext = unknown>(callback: callbackFn): TransportRequestCallback - repositories, TContext = unknown>(params: RequestParams.CatRepositories, callback: callbackFn): TransportRequestCallback - repositories, TContext = unknown>(params: RequestParams.CatRepositories, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - segments, TContext = unknown>(params?: RequestParams.CatSegments, options?: TransportRequestOptions): TransportRequestPromise> - segments, TContext = unknown>(callback: callbackFn): TransportRequestCallback - segments, TContext = unknown>(params: RequestParams.CatSegments, callback: callbackFn): TransportRequestCallback - segments, TContext = unknown>(params: RequestParams.CatSegments, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - shards, TContext = unknown>(params?: RequestParams.CatShards, options?: TransportRequestOptions): TransportRequestPromise> - shards, TContext = unknown>(callback: callbackFn): TransportRequestCallback - shards, TContext = unknown>(params: RequestParams.CatShards, callback: callbackFn): TransportRequestCallback - shards, TContext = unknown>(params: RequestParams.CatShards, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - snapshots, TContext = unknown>(params?: RequestParams.CatSnapshots, options?: TransportRequestOptions): TransportRequestPromise> - snapshots, TContext = unknown>(callback: callbackFn): TransportRequestCallback - snapshots, TContext = unknown>(params: RequestParams.CatSnapshots, callback: callbackFn): TransportRequestCallback - snapshots, TContext = unknown>(params: RequestParams.CatSnapshots, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - tasks, TContext = unknown>(params?: RequestParams.CatTasks, options?: TransportRequestOptions): TransportRequestPromise> - tasks, TContext = unknown>(callback: callbackFn): TransportRequestCallback - tasks, TContext = unknown>(params: RequestParams.CatTasks, callback: callbackFn): TransportRequestCallback - tasks, TContext = unknown>(params: RequestParams.CatTasks, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - templates, TContext = unknown>(params?: RequestParams.CatTemplates, options?: TransportRequestOptions): TransportRequestPromise> - templates, TContext = unknown>(callback: callbackFn): TransportRequestCallback - templates, TContext = unknown>(params: RequestParams.CatTemplates, callback: callbackFn): TransportRequestCallback - templates, TContext = unknown>(params: RequestParams.CatTemplates, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - thread_pool, TContext = unknown>(params?: RequestParams.CatThreadPool, options?: TransportRequestOptions): TransportRequestPromise> - thread_pool, TContext = unknown>(callback: callbackFn): TransportRequestCallback - thread_pool, TContext = unknown>(params: RequestParams.CatThreadPool, callback: callbackFn): TransportRequestCallback - thread_pool, TContext = unknown>(params: RequestParams.CatThreadPool, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - threadPool, TContext = unknown>(params?: RequestParams.CatThreadPool, options?: TransportRequestOptions): TransportRequestPromise> - threadPool, TContext = unknown>(callback: callbackFn): TransportRequestCallback - threadPool, TContext = unknown>(params: RequestParams.CatThreadPool, callback: callbackFn): TransportRequestCallback - threadPool, TContext = unknown>(params: RequestParams.CatThreadPool, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - transforms, TContext = unknown>(params?: RequestParams.CatTransforms, options?: TransportRequestOptions): TransportRequestPromise> - transforms, TContext = unknown>(callback: callbackFn): TransportRequestCallback - transforms, TContext = unknown>(params: RequestParams.CatTransforms, callback: callbackFn): TransportRequestCallback - transforms, TContext = unknown>(params: RequestParams.CatTransforms, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - ccr: { - delete_auto_follow_pattern, TContext = unknown>(params?: RequestParams.CcrDeleteAutoFollowPattern, options?: TransportRequestOptions): TransportRequestPromise> - delete_auto_follow_pattern, TContext = unknown>(callback: callbackFn): TransportRequestCallback - delete_auto_follow_pattern, TContext = unknown>(params: RequestParams.CcrDeleteAutoFollowPattern, callback: callbackFn): TransportRequestCallback - delete_auto_follow_pattern, TContext = unknown>(params: RequestParams.CcrDeleteAutoFollowPattern, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteAutoFollowPattern, TContext = unknown>(params?: RequestParams.CcrDeleteAutoFollowPattern, options?: TransportRequestOptions): TransportRequestPromise> - deleteAutoFollowPattern, TContext = unknown>(callback: callbackFn): TransportRequestCallback - deleteAutoFollowPattern, TContext = unknown>(params: RequestParams.CcrDeleteAutoFollowPattern, callback: callbackFn): TransportRequestCallback - deleteAutoFollowPattern, TContext = unknown>(params: RequestParams.CcrDeleteAutoFollowPattern, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - follow, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.CcrFollow, options?: TransportRequestOptions): TransportRequestPromise> - follow, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - follow, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.CcrFollow, callback: callbackFn): TransportRequestCallback - follow, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.CcrFollow, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - follow_info, TContext = unknown>(params?: RequestParams.CcrFollowInfo, options?: TransportRequestOptions): TransportRequestPromise> - follow_info, TContext = unknown>(callback: callbackFn): TransportRequestCallback - follow_info, TContext = unknown>(params: RequestParams.CcrFollowInfo, callback: callbackFn): TransportRequestCallback - follow_info, TContext = unknown>(params: RequestParams.CcrFollowInfo, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - followInfo, TContext = unknown>(params?: RequestParams.CcrFollowInfo, options?: TransportRequestOptions): TransportRequestPromise> - followInfo, TContext = unknown>(callback: callbackFn): TransportRequestCallback - followInfo, TContext = unknown>(params: RequestParams.CcrFollowInfo, callback: callbackFn): TransportRequestCallback - followInfo, TContext = unknown>(params: RequestParams.CcrFollowInfo, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - follow_stats, TContext = unknown>(params?: RequestParams.CcrFollowStats, options?: TransportRequestOptions): TransportRequestPromise> - follow_stats, TContext = unknown>(callback: callbackFn): TransportRequestCallback - follow_stats, TContext = unknown>(params: RequestParams.CcrFollowStats, callback: callbackFn): TransportRequestCallback - follow_stats, TContext = unknown>(params: RequestParams.CcrFollowStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - followStats, TContext = unknown>(params?: RequestParams.CcrFollowStats, options?: TransportRequestOptions): TransportRequestPromise> - followStats, TContext = unknown>(callback: callbackFn): TransportRequestCallback - followStats, TContext = unknown>(params: RequestParams.CcrFollowStats, callback: callbackFn): TransportRequestCallback - followStats, TContext = unknown>(params: RequestParams.CcrFollowStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - forget_follower, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.CcrForgetFollower, options?: TransportRequestOptions): TransportRequestPromise> - forget_follower, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - forget_follower, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.CcrForgetFollower, callback: callbackFn): TransportRequestCallback - forget_follower, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.CcrForgetFollower, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - forgetFollower, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.CcrForgetFollower, options?: TransportRequestOptions): TransportRequestPromise> - forgetFollower, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - forgetFollower, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.CcrForgetFollower, callback: callbackFn): TransportRequestCallback - forgetFollower, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.CcrForgetFollower, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_auto_follow_pattern, TContext = unknown>(params?: RequestParams.CcrGetAutoFollowPattern, options?: TransportRequestOptions): TransportRequestPromise> - get_auto_follow_pattern, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_auto_follow_pattern, TContext = unknown>(params: RequestParams.CcrGetAutoFollowPattern, callback: callbackFn): TransportRequestCallback - get_auto_follow_pattern, TContext = unknown>(params: RequestParams.CcrGetAutoFollowPattern, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getAutoFollowPattern, TContext = unknown>(params?: RequestParams.CcrGetAutoFollowPattern, options?: TransportRequestOptions): TransportRequestPromise> - getAutoFollowPattern, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getAutoFollowPattern, TContext = unknown>(params: RequestParams.CcrGetAutoFollowPattern, callback: callbackFn): TransportRequestCallback - getAutoFollowPattern, TContext = unknown>(params: RequestParams.CcrGetAutoFollowPattern, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - pause_auto_follow_pattern, TContext = unknown>(params?: RequestParams.CcrPauseAutoFollowPattern, options?: TransportRequestOptions): TransportRequestPromise> - pause_auto_follow_pattern, TContext = unknown>(callback: callbackFn): TransportRequestCallback - pause_auto_follow_pattern, TContext = unknown>(params: RequestParams.CcrPauseAutoFollowPattern, callback: callbackFn): TransportRequestCallback - pause_auto_follow_pattern, TContext = unknown>(params: RequestParams.CcrPauseAutoFollowPattern, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - pauseAutoFollowPattern, TContext = unknown>(params?: RequestParams.CcrPauseAutoFollowPattern, options?: TransportRequestOptions): TransportRequestPromise> - pauseAutoFollowPattern, TContext = unknown>(callback: callbackFn): TransportRequestCallback - pauseAutoFollowPattern, TContext = unknown>(params: RequestParams.CcrPauseAutoFollowPattern, callback: callbackFn): TransportRequestCallback - pauseAutoFollowPattern, TContext = unknown>(params: RequestParams.CcrPauseAutoFollowPattern, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - pause_follow, TContext = unknown>(params?: RequestParams.CcrPauseFollow, options?: TransportRequestOptions): TransportRequestPromise> - pause_follow, TContext = unknown>(callback: callbackFn): TransportRequestCallback - pause_follow, TContext = unknown>(params: RequestParams.CcrPauseFollow, callback: callbackFn): TransportRequestCallback - pause_follow, TContext = unknown>(params: RequestParams.CcrPauseFollow, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - pauseFollow, TContext = unknown>(params?: RequestParams.CcrPauseFollow, options?: TransportRequestOptions): TransportRequestPromise> - pauseFollow, TContext = unknown>(callback: callbackFn): TransportRequestCallback - pauseFollow, TContext = unknown>(params: RequestParams.CcrPauseFollow, callback: callbackFn): TransportRequestCallback - pauseFollow, TContext = unknown>(params: RequestParams.CcrPauseFollow, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_auto_follow_pattern, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.CcrPutAutoFollowPattern, options?: TransportRequestOptions): TransportRequestPromise> - put_auto_follow_pattern, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - put_auto_follow_pattern, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.CcrPutAutoFollowPattern, callback: callbackFn): TransportRequestCallback - put_auto_follow_pattern, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.CcrPutAutoFollowPattern, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putAutoFollowPattern, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.CcrPutAutoFollowPattern, options?: TransportRequestOptions): TransportRequestPromise> - putAutoFollowPattern, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - putAutoFollowPattern, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.CcrPutAutoFollowPattern, callback: callbackFn): TransportRequestCallback - putAutoFollowPattern, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.CcrPutAutoFollowPattern, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - resume_auto_follow_pattern, TContext = unknown>(params?: RequestParams.CcrResumeAutoFollowPattern, options?: TransportRequestOptions): TransportRequestPromise> - resume_auto_follow_pattern, TContext = unknown>(callback: callbackFn): TransportRequestCallback - resume_auto_follow_pattern, TContext = unknown>(params: RequestParams.CcrResumeAutoFollowPattern, callback: callbackFn): TransportRequestCallback - resume_auto_follow_pattern, TContext = unknown>(params: RequestParams.CcrResumeAutoFollowPattern, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - resumeAutoFollowPattern, TContext = unknown>(params?: RequestParams.CcrResumeAutoFollowPattern, options?: TransportRequestOptions): TransportRequestPromise> - resumeAutoFollowPattern, TContext = unknown>(callback: callbackFn): TransportRequestCallback - resumeAutoFollowPattern, TContext = unknown>(params: RequestParams.CcrResumeAutoFollowPattern, callback: callbackFn): TransportRequestCallback - resumeAutoFollowPattern, TContext = unknown>(params: RequestParams.CcrResumeAutoFollowPattern, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - resume_follow, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.CcrResumeFollow, options?: TransportRequestOptions): TransportRequestPromise> - resume_follow, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - resume_follow, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.CcrResumeFollow, callback: callbackFn): TransportRequestCallback - resume_follow, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.CcrResumeFollow, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - resumeFollow, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.CcrResumeFollow, options?: TransportRequestOptions): TransportRequestPromise> - resumeFollow, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - resumeFollow, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.CcrResumeFollow, callback: callbackFn): TransportRequestCallback - resumeFollow, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.CcrResumeFollow, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stats, TContext = unknown>(params?: RequestParams.CcrStats, options?: TransportRequestOptions): TransportRequestPromise> - stats, TContext = unknown>(callback: callbackFn): TransportRequestCallback - stats, TContext = unknown>(params: RequestParams.CcrStats, callback: callbackFn): TransportRequestCallback - stats, TContext = unknown>(params: RequestParams.CcrStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - unfollow, TContext = unknown>(params?: RequestParams.CcrUnfollow, options?: TransportRequestOptions): TransportRequestPromise> - unfollow, TContext = unknown>(callback: callbackFn): TransportRequestCallback - unfollow, TContext = unknown>(params: RequestParams.CcrUnfollow, callback: callbackFn): TransportRequestCallback - unfollow, TContext = unknown>(params: RequestParams.CcrUnfollow, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - clear_scroll, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.ClearScroll, options?: TransportRequestOptions): TransportRequestPromise> - clear_scroll, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - clear_scroll, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.ClearScroll, callback: callbackFn): TransportRequestCallback - clear_scroll, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.ClearScroll, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - clearScroll, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.ClearScroll, options?: TransportRequestOptions): TransportRequestPromise> - clearScroll, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - clearScroll, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.ClearScroll, callback: callbackFn): TransportRequestCallback - clearScroll, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.ClearScroll, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - cluster: { - allocation_explain, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.ClusterAllocationExplain, options?: TransportRequestOptions): TransportRequestPromise> - allocation_explain, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - allocation_explain, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.ClusterAllocationExplain, callback: callbackFn): TransportRequestCallback - allocation_explain, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.ClusterAllocationExplain, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - allocationExplain, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.ClusterAllocationExplain, options?: TransportRequestOptions): TransportRequestPromise> - allocationExplain, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - allocationExplain, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.ClusterAllocationExplain, callback: callbackFn): TransportRequestCallback - allocationExplain, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.ClusterAllocationExplain, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_component_template, TContext = unknown>(params?: RequestParams.ClusterDeleteComponentTemplate, options?: TransportRequestOptions): TransportRequestPromise> - delete_component_template, TContext = unknown>(callback: callbackFn): TransportRequestCallback - delete_component_template, TContext = unknown>(params: RequestParams.ClusterDeleteComponentTemplate, callback: callbackFn): TransportRequestCallback - delete_component_template, TContext = unknown>(params: RequestParams.ClusterDeleteComponentTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteComponentTemplate, TContext = unknown>(params?: RequestParams.ClusterDeleteComponentTemplate, options?: TransportRequestOptions): TransportRequestPromise> - deleteComponentTemplate, TContext = unknown>(callback: callbackFn): TransportRequestCallback - deleteComponentTemplate, TContext = unknown>(params: RequestParams.ClusterDeleteComponentTemplate, callback: callbackFn): TransportRequestCallback - deleteComponentTemplate, TContext = unknown>(params: RequestParams.ClusterDeleteComponentTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_voting_config_exclusions, TContext = unknown>(params?: RequestParams.ClusterDeleteVotingConfigExclusions, options?: TransportRequestOptions): TransportRequestPromise> - delete_voting_config_exclusions, TContext = unknown>(callback: callbackFn): TransportRequestCallback - delete_voting_config_exclusions, TContext = unknown>(params: RequestParams.ClusterDeleteVotingConfigExclusions, callback: callbackFn): TransportRequestCallback - delete_voting_config_exclusions, TContext = unknown>(params: RequestParams.ClusterDeleteVotingConfigExclusions, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteVotingConfigExclusions, TContext = unknown>(params?: RequestParams.ClusterDeleteVotingConfigExclusions, options?: TransportRequestOptions): TransportRequestPromise> - deleteVotingConfigExclusions, TContext = unknown>(callback: callbackFn): TransportRequestCallback - deleteVotingConfigExclusions, TContext = unknown>(params: RequestParams.ClusterDeleteVotingConfigExclusions, callback: callbackFn): TransportRequestCallback - deleteVotingConfigExclusions, TContext = unknown>(params: RequestParams.ClusterDeleteVotingConfigExclusions, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - exists_component_template, TContext = unknown>(params?: RequestParams.ClusterExistsComponentTemplate, options?: TransportRequestOptions): TransportRequestPromise> - exists_component_template, TContext = unknown>(callback: callbackFn): TransportRequestCallback - exists_component_template, TContext = unknown>(params: RequestParams.ClusterExistsComponentTemplate, callback: callbackFn): TransportRequestCallback - exists_component_template, TContext = unknown>(params: RequestParams.ClusterExistsComponentTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - existsComponentTemplate, TContext = unknown>(params?: RequestParams.ClusterExistsComponentTemplate, options?: TransportRequestOptions): TransportRequestPromise> - existsComponentTemplate, TContext = unknown>(callback: callbackFn): TransportRequestCallback - existsComponentTemplate, TContext = unknown>(params: RequestParams.ClusterExistsComponentTemplate, callback: callbackFn): TransportRequestCallback - existsComponentTemplate, TContext = unknown>(params: RequestParams.ClusterExistsComponentTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_component_template, TContext = unknown>(params?: RequestParams.ClusterGetComponentTemplate, options?: TransportRequestOptions): TransportRequestPromise> - get_component_template, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_component_template, TContext = unknown>(params: RequestParams.ClusterGetComponentTemplate, callback: callbackFn): TransportRequestCallback - get_component_template, TContext = unknown>(params: RequestParams.ClusterGetComponentTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getComponentTemplate, TContext = unknown>(params?: RequestParams.ClusterGetComponentTemplate, options?: TransportRequestOptions): TransportRequestPromise> - getComponentTemplate, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getComponentTemplate, TContext = unknown>(params: RequestParams.ClusterGetComponentTemplate, callback: callbackFn): TransportRequestCallback - getComponentTemplate, TContext = unknown>(params: RequestParams.ClusterGetComponentTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_settings, TContext = unknown>(params?: RequestParams.ClusterGetSettings, options?: TransportRequestOptions): TransportRequestPromise> - get_settings, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_settings, TContext = unknown>(params: RequestParams.ClusterGetSettings, callback: callbackFn): TransportRequestCallback - get_settings, TContext = unknown>(params: RequestParams.ClusterGetSettings, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getSettings, TContext = unknown>(params?: RequestParams.ClusterGetSettings, options?: TransportRequestOptions): TransportRequestPromise> - getSettings, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getSettings, TContext = unknown>(params: RequestParams.ClusterGetSettings, callback: callbackFn): TransportRequestCallback - getSettings, TContext = unknown>(params: RequestParams.ClusterGetSettings, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - health, TContext = unknown>(params?: RequestParams.ClusterHealth, options?: TransportRequestOptions): TransportRequestPromise> - health, TContext = unknown>(callback: callbackFn): TransportRequestCallback - health, TContext = unknown>(params: RequestParams.ClusterHealth, callback: callbackFn): TransportRequestCallback - health, TContext = unknown>(params: RequestParams.ClusterHealth, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - pending_tasks, TContext = unknown>(params?: RequestParams.ClusterPendingTasks, options?: TransportRequestOptions): TransportRequestPromise> - pending_tasks, TContext = unknown>(callback: callbackFn): TransportRequestCallback - pending_tasks, TContext = unknown>(params: RequestParams.ClusterPendingTasks, callback: callbackFn): TransportRequestCallback - pending_tasks, TContext = unknown>(params: RequestParams.ClusterPendingTasks, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - pendingTasks, TContext = unknown>(params?: RequestParams.ClusterPendingTasks, options?: TransportRequestOptions): TransportRequestPromise> - pendingTasks, TContext = unknown>(callback: callbackFn): TransportRequestCallback - pendingTasks, TContext = unknown>(params: RequestParams.ClusterPendingTasks, callback: callbackFn): TransportRequestCallback - pendingTasks, TContext = unknown>(params: RequestParams.ClusterPendingTasks, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - post_voting_config_exclusions, TContext = unknown>(params?: RequestParams.ClusterPostVotingConfigExclusions, options?: TransportRequestOptions): TransportRequestPromise> - post_voting_config_exclusions, TContext = unknown>(callback: callbackFn): TransportRequestCallback - post_voting_config_exclusions, TContext = unknown>(params: RequestParams.ClusterPostVotingConfigExclusions, callback: callbackFn): TransportRequestCallback - post_voting_config_exclusions, TContext = unknown>(params: RequestParams.ClusterPostVotingConfigExclusions, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - postVotingConfigExclusions, TContext = unknown>(params?: RequestParams.ClusterPostVotingConfigExclusions, options?: TransportRequestOptions): TransportRequestPromise> - postVotingConfigExclusions, TContext = unknown>(callback: callbackFn): TransportRequestCallback - postVotingConfigExclusions, TContext = unknown>(params: RequestParams.ClusterPostVotingConfigExclusions, callback: callbackFn): TransportRequestCallback - postVotingConfigExclusions, TContext = unknown>(params: RequestParams.ClusterPostVotingConfigExclusions, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_component_template, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.ClusterPutComponentTemplate, options?: TransportRequestOptions): TransportRequestPromise> - put_component_template, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - put_component_template, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.ClusterPutComponentTemplate, callback: callbackFn): TransportRequestCallback - put_component_template, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.ClusterPutComponentTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putComponentTemplate, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.ClusterPutComponentTemplate, options?: TransportRequestOptions): TransportRequestPromise> - putComponentTemplate, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - putComponentTemplate, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.ClusterPutComponentTemplate, callback: callbackFn): TransportRequestCallback - putComponentTemplate, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.ClusterPutComponentTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_settings, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.ClusterPutSettings, options?: TransportRequestOptions): TransportRequestPromise> - put_settings, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - put_settings, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.ClusterPutSettings, callback: callbackFn): TransportRequestCallback - put_settings, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.ClusterPutSettings, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putSettings, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.ClusterPutSettings, options?: TransportRequestOptions): TransportRequestPromise> - putSettings, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - putSettings, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.ClusterPutSettings, callback: callbackFn): TransportRequestCallback - putSettings, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.ClusterPutSettings, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - remote_info, TContext = unknown>(params?: RequestParams.ClusterRemoteInfo, options?: TransportRequestOptions): TransportRequestPromise> - remote_info, TContext = unknown>(callback: callbackFn): TransportRequestCallback - remote_info, TContext = unknown>(params: RequestParams.ClusterRemoteInfo, callback: callbackFn): TransportRequestCallback - remote_info, TContext = unknown>(params: RequestParams.ClusterRemoteInfo, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - remoteInfo, TContext = unknown>(params?: RequestParams.ClusterRemoteInfo, options?: TransportRequestOptions): TransportRequestPromise> - remoteInfo, TContext = unknown>(callback: callbackFn): TransportRequestCallback - remoteInfo, TContext = unknown>(params: RequestParams.ClusterRemoteInfo, callback: callbackFn): TransportRequestCallback - remoteInfo, TContext = unknown>(params: RequestParams.ClusterRemoteInfo, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - reroute, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.ClusterReroute, options?: TransportRequestOptions): TransportRequestPromise> - reroute, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - reroute, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.ClusterReroute, callback: callbackFn): TransportRequestCallback - reroute, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.ClusterReroute, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - state, TContext = unknown>(params?: RequestParams.ClusterState, options?: TransportRequestOptions): TransportRequestPromise> - state, TContext = unknown>(callback: callbackFn): TransportRequestCallback - state, TContext = unknown>(params: RequestParams.ClusterState, callback: callbackFn): TransportRequestCallback - state, TContext = unknown>(params: RequestParams.ClusterState, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stats, TContext = unknown>(params?: RequestParams.ClusterStats, options?: TransportRequestOptions): TransportRequestPromise> - stats, TContext = unknown>(callback: callbackFn): TransportRequestCallback - stats, TContext = unknown>(params: RequestParams.ClusterStats, callback: callbackFn): TransportRequestCallback - stats, TContext = unknown>(params: RequestParams.ClusterStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - count, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.Count, options?: TransportRequestOptions): TransportRequestPromise> - count, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - count, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.Count, callback: callbackFn): TransportRequestCallback - count, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.Count, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - create, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.Create, options?: TransportRequestOptions): TransportRequestPromise> - create, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - create, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.Create, callback: callbackFn): TransportRequestCallback - create, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.Create, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - data_frame_transform_deprecated: { - delete_transform, TContext = unknown>(params?: RequestParams.DataFrameTransformDeprecatedDeleteTransform, options?: TransportRequestOptions): TransportRequestPromise> - delete_transform, TContext = unknown>(callback: callbackFn): TransportRequestCallback - delete_transform, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedDeleteTransform, callback: callbackFn): TransportRequestCallback - delete_transform, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedDeleteTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteTransform, TContext = unknown>(params?: RequestParams.DataFrameTransformDeprecatedDeleteTransform, options?: TransportRequestOptions): TransportRequestPromise> - deleteTransform, TContext = unknown>(callback: callbackFn): TransportRequestCallback - deleteTransform, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedDeleteTransform, callback: callbackFn): TransportRequestCallback - deleteTransform, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedDeleteTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_transform, TContext = unknown>(params?: RequestParams.DataFrameTransformDeprecatedGetTransform, options?: TransportRequestOptions): TransportRequestPromise> - get_transform, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_transform, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedGetTransform, callback: callbackFn): TransportRequestCallback - get_transform, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedGetTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getTransform, TContext = unknown>(params?: RequestParams.DataFrameTransformDeprecatedGetTransform, options?: TransportRequestOptions): TransportRequestPromise> - getTransform, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getTransform, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedGetTransform, callback: callbackFn): TransportRequestCallback - getTransform, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedGetTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_transform_stats, TContext = unknown>(params?: RequestParams.DataFrameTransformDeprecatedGetTransformStats, options?: TransportRequestOptions): TransportRequestPromise> - get_transform_stats, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_transform_stats, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedGetTransformStats, callback: callbackFn): TransportRequestCallback - get_transform_stats, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedGetTransformStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getTransformStats, TContext = unknown>(params?: RequestParams.DataFrameTransformDeprecatedGetTransformStats, options?: TransportRequestOptions): TransportRequestPromise> - getTransformStats, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getTransformStats, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedGetTransformStats, callback: callbackFn): TransportRequestCallback - getTransformStats, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedGetTransformStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - preview_transform, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.DataFrameTransformDeprecatedPreviewTransform, options?: TransportRequestOptions): TransportRequestPromise> - preview_transform, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - preview_transform, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedPreviewTransform, callback: callbackFn): TransportRequestCallback - preview_transform, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedPreviewTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - previewTransform, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.DataFrameTransformDeprecatedPreviewTransform, options?: TransportRequestOptions): TransportRequestPromise> - previewTransform, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - previewTransform, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedPreviewTransform, callback: callbackFn): TransportRequestCallback - previewTransform, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedPreviewTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_transform, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.DataFrameTransformDeprecatedPutTransform, options?: TransportRequestOptions): TransportRequestPromise> - put_transform, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - put_transform, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedPutTransform, callback: callbackFn): TransportRequestCallback - put_transform, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedPutTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putTransform, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.DataFrameTransformDeprecatedPutTransform, options?: TransportRequestOptions): TransportRequestPromise> - putTransform, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - putTransform, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedPutTransform, callback: callbackFn): TransportRequestCallback - putTransform, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedPutTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - start_transform, TContext = unknown>(params?: RequestParams.DataFrameTransformDeprecatedStartTransform, options?: TransportRequestOptions): TransportRequestPromise> - start_transform, TContext = unknown>(callback: callbackFn): TransportRequestCallback - start_transform, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedStartTransform, callback: callbackFn): TransportRequestCallback - start_transform, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedStartTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - startTransform, TContext = unknown>(params?: RequestParams.DataFrameTransformDeprecatedStartTransform, options?: TransportRequestOptions): TransportRequestPromise> - startTransform, TContext = unknown>(callback: callbackFn): TransportRequestCallback - startTransform, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedStartTransform, callback: callbackFn): TransportRequestCallback - startTransform, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedStartTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stop_transform, TContext = unknown>(params?: RequestParams.DataFrameTransformDeprecatedStopTransform, options?: TransportRequestOptions): TransportRequestPromise> - stop_transform, TContext = unknown>(callback: callbackFn): TransportRequestCallback - stop_transform, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedStopTransform, callback: callbackFn): TransportRequestCallback - stop_transform, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedStopTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stopTransform, TContext = unknown>(params?: RequestParams.DataFrameTransformDeprecatedStopTransform, options?: TransportRequestOptions): TransportRequestPromise> - stopTransform, TContext = unknown>(callback: callbackFn): TransportRequestCallback - stopTransform, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedStopTransform, callback: callbackFn): TransportRequestCallback - stopTransform, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedStopTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - update_transform, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.DataFrameTransformDeprecatedUpdateTransform, options?: TransportRequestOptions): TransportRequestPromise> - update_transform, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - update_transform, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedUpdateTransform, callback: callbackFn): TransportRequestCallback - update_transform, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedUpdateTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - updateTransform, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.DataFrameTransformDeprecatedUpdateTransform, options?: TransportRequestOptions): TransportRequestPromise> - updateTransform, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - updateTransform, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedUpdateTransform, callback: callbackFn): TransportRequestCallback - updateTransform, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedUpdateTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - dataFrameTransformDeprecated: { - delete_transform, TContext = unknown>(params?: RequestParams.DataFrameTransformDeprecatedDeleteTransform, options?: TransportRequestOptions): TransportRequestPromise> - delete_transform, TContext = unknown>(callback: callbackFn): TransportRequestCallback - delete_transform, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedDeleteTransform, callback: callbackFn): TransportRequestCallback - delete_transform, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedDeleteTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteTransform, TContext = unknown>(params?: RequestParams.DataFrameTransformDeprecatedDeleteTransform, options?: TransportRequestOptions): TransportRequestPromise> - deleteTransform, TContext = unknown>(callback: callbackFn): TransportRequestCallback - deleteTransform, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedDeleteTransform, callback: callbackFn): TransportRequestCallback - deleteTransform, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedDeleteTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_transform, TContext = unknown>(params?: RequestParams.DataFrameTransformDeprecatedGetTransform, options?: TransportRequestOptions): TransportRequestPromise> - get_transform, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_transform, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedGetTransform, callback: callbackFn): TransportRequestCallback - get_transform, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedGetTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getTransform, TContext = unknown>(params?: RequestParams.DataFrameTransformDeprecatedGetTransform, options?: TransportRequestOptions): TransportRequestPromise> - getTransform, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getTransform, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedGetTransform, callback: callbackFn): TransportRequestCallback - getTransform, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedGetTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_transform_stats, TContext = unknown>(params?: RequestParams.DataFrameTransformDeprecatedGetTransformStats, options?: TransportRequestOptions): TransportRequestPromise> - get_transform_stats, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_transform_stats, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedGetTransformStats, callback: callbackFn): TransportRequestCallback - get_transform_stats, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedGetTransformStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getTransformStats, TContext = unknown>(params?: RequestParams.DataFrameTransformDeprecatedGetTransformStats, options?: TransportRequestOptions): TransportRequestPromise> - getTransformStats, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getTransformStats, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedGetTransformStats, callback: callbackFn): TransportRequestCallback - getTransformStats, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedGetTransformStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - preview_transform, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.DataFrameTransformDeprecatedPreviewTransform, options?: TransportRequestOptions): TransportRequestPromise> - preview_transform, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - preview_transform, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedPreviewTransform, callback: callbackFn): TransportRequestCallback - preview_transform, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedPreviewTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - previewTransform, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.DataFrameTransformDeprecatedPreviewTransform, options?: TransportRequestOptions): TransportRequestPromise> - previewTransform, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - previewTransform, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedPreviewTransform, callback: callbackFn): TransportRequestCallback - previewTransform, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedPreviewTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_transform, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.DataFrameTransformDeprecatedPutTransform, options?: TransportRequestOptions): TransportRequestPromise> - put_transform, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - put_transform, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedPutTransform, callback: callbackFn): TransportRequestCallback - put_transform, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedPutTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putTransform, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.DataFrameTransformDeprecatedPutTransform, options?: TransportRequestOptions): TransportRequestPromise> - putTransform, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - putTransform, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedPutTransform, callback: callbackFn): TransportRequestCallback - putTransform, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedPutTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - start_transform, TContext = unknown>(params?: RequestParams.DataFrameTransformDeprecatedStartTransform, options?: TransportRequestOptions): TransportRequestPromise> - start_transform, TContext = unknown>(callback: callbackFn): TransportRequestCallback - start_transform, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedStartTransform, callback: callbackFn): TransportRequestCallback - start_transform, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedStartTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - startTransform, TContext = unknown>(params?: RequestParams.DataFrameTransformDeprecatedStartTransform, options?: TransportRequestOptions): TransportRequestPromise> - startTransform, TContext = unknown>(callback: callbackFn): TransportRequestCallback - startTransform, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedStartTransform, callback: callbackFn): TransportRequestCallback - startTransform, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedStartTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stop_transform, TContext = unknown>(params?: RequestParams.DataFrameTransformDeprecatedStopTransform, options?: TransportRequestOptions): TransportRequestPromise> - stop_transform, TContext = unknown>(callback: callbackFn): TransportRequestCallback - stop_transform, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedStopTransform, callback: callbackFn): TransportRequestCallback - stop_transform, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedStopTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stopTransform, TContext = unknown>(params?: RequestParams.DataFrameTransformDeprecatedStopTransform, options?: TransportRequestOptions): TransportRequestPromise> - stopTransform, TContext = unknown>(callback: callbackFn): TransportRequestCallback - stopTransform, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedStopTransform, callback: callbackFn): TransportRequestCallback - stopTransform, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedStopTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - update_transform, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.DataFrameTransformDeprecatedUpdateTransform, options?: TransportRequestOptions): TransportRequestPromise> - update_transform, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - update_transform, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedUpdateTransform, callback: callbackFn): TransportRequestCallback - update_transform, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedUpdateTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - updateTransform, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.DataFrameTransformDeprecatedUpdateTransform, options?: TransportRequestOptions): TransportRequestPromise> - updateTransform, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - updateTransform, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedUpdateTransform, callback: callbackFn): TransportRequestCallback - updateTransform, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.DataFrameTransformDeprecatedUpdateTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - delete, TContext = unknown>(params?: RequestParams.Delete, options?: TransportRequestOptions): TransportRequestPromise> - delete, TContext = unknown>(callback: callbackFn): TransportRequestCallback - delete, TContext = unknown>(params: RequestParams.Delete, callback: callbackFn): TransportRequestCallback - delete, TContext = unknown>(params: RequestParams.Delete, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_by_query, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.DeleteByQuery, options?: TransportRequestOptions): TransportRequestPromise> - delete_by_query, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - delete_by_query, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.DeleteByQuery, callback: callbackFn): TransportRequestCallback - delete_by_query, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.DeleteByQuery, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteByQuery, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.DeleteByQuery, options?: TransportRequestOptions): TransportRequestPromise> - deleteByQuery, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - deleteByQuery, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.DeleteByQuery, callback: callbackFn): TransportRequestCallback - deleteByQuery, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.DeleteByQuery, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_by_query_rethrottle, TContext = unknown>(params?: RequestParams.DeleteByQueryRethrottle, options?: TransportRequestOptions): TransportRequestPromise> - delete_by_query_rethrottle, TContext = unknown>(callback: callbackFn): TransportRequestCallback - delete_by_query_rethrottle, TContext = unknown>(params: RequestParams.DeleteByQueryRethrottle, callback: callbackFn): TransportRequestCallback - delete_by_query_rethrottle, TContext = unknown>(params: RequestParams.DeleteByQueryRethrottle, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteByQueryRethrottle, TContext = unknown>(params?: RequestParams.DeleteByQueryRethrottle, options?: TransportRequestOptions): TransportRequestPromise> - deleteByQueryRethrottle, TContext = unknown>(callback: callbackFn): TransportRequestCallback - deleteByQueryRethrottle, TContext = unknown>(params: RequestParams.DeleteByQueryRethrottle, callback: callbackFn): TransportRequestCallback - deleteByQueryRethrottle, TContext = unknown>(params: RequestParams.DeleteByQueryRethrottle, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_script, TContext = unknown>(params?: RequestParams.DeleteScript, options?: TransportRequestOptions): TransportRequestPromise> - delete_script, TContext = unknown>(callback: callbackFn): TransportRequestCallback - delete_script, TContext = unknown>(params: RequestParams.DeleteScript, callback: callbackFn): TransportRequestCallback - delete_script, TContext = unknown>(params: RequestParams.DeleteScript, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteScript, TContext = unknown>(params?: RequestParams.DeleteScript, options?: TransportRequestOptions): TransportRequestPromise> - deleteScript, TContext = unknown>(callback: callbackFn): TransportRequestCallback - deleteScript, TContext = unknown>(params: RequestParams.DeleteScript, callback: callbackFn): TransportRequestCallback - deleteScript, TContext = unknown>(params: RequestParams.DeleteScript, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - enrich: { - delete_policy, TContext = unknown>(params?: RequestParams.EnrichDeletePolicy, options?: TransportRequestOptions): TransportRequestPromise> - delete_policy, TContext = unknown>(callback: callbackFn): TransportRequestCallback - delete_policy, TContext = unknown>(params: RequestParams.EnrichDeletePolicy, callback: callbackFn): TransportRequestCallback - delete_policy, TContext = unknown>(params: RequestParams.EnrichDeletePolicy, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deletePolicy, TContext = unknown>(params?: RequestParams.EnrichDeletePolicy, options?: TransportRequestOptions): TransportRequestPromise> - deletePolicy, TContext = unknown>(callback: callbackFn): TransportRequestCallback - deletePolicy, TContext = unknown>(params: RequestParams.EnrichDeletePolicy, callback: callbackFn): TransportRequestCallback - deletePolicy, TContext = unknown>(params: RequestParams.EnrichDeletePolicy, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - execute_policy, TContext = unknown>(params?: RequestParams.EnrichExecutePolicy, options?: TransportRequestOptions): TransportRequestPromise> - execute_policy, TContext = unknown>(callback: callbackFn): TransportRequestCallback - execute_policy, TContext = unknown>(params: RequestParams.EnrichExecutePolicy, callback: callbackFn): TransportRequestCallback - execute_policy, TContext = unknown>(params: RequestParams.EnrichExecutePolicy, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - executePolicy, TContext = unknown>(params?: RequestParams.EnrichExecutePolicy, options?: TransportRequestOptions): TransportRequestPromise> - executePolicy, TContext = unknown>(callback: callbackFn): TransportRequestCallback - executePolicy, TContext = unknown>(params: RequestParams.EnrichExecutePolicy, callback: callbackFn): TransportRequestCallback - executePolicy, TContext = unknown>(params: RequestParams.EnrichExecutePolicy, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_policy, TContext = unknown>(params?: RequestParams.EnrichGetPolicy, options?: TransportRequestOptions): TransportRequestPromise> - get_policy, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_policy, TContext = unknown>(params: RequestParams.EnrichGetPolicy, callback: callbackFn): TransportRequestCallback - get_policy, TContext = unknown>(params: RequestParams.EnrichGetPolicy, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getPolicy, TContext = unknown>(params?: RequestParams.EnrichGetPolicy, options?: TransportRequestOptions): TransportRequestPromise> - getPolicy, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getPolicy, TContext = unknown>(params: RequestParams.EnrichGetPolicy, callback: callbackFn): TransportRequestCallback - getPolicy, TContext = unknown>(params: RequestParams.EnrichGetPolicy, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_policy, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.EnrichPutPolicy, options?: TransportRequestOptions): TransportRequestPromise> - put_policy, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - put_policy, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.EnrichPutPolicy, callback: callbackFn): TransportRequestCallback - put_policy, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.EnrichPutPolicy, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putPolicy, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.EnrichPutPolicy, options?: TransportRequestOptions): TransportRequestPromise> - putPolicy, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - putPolicy, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.EnrichPutPolicy, callback: callbackFn): TransportRequestCallback - putPolicy, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.EnrichPutPolicy, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stats, TContext = unknown>(params?: RequestParams.EnrichStats, options?: TransportRequestOptions): TransportRequestPromise> - stats, TContext = unknown>(callback: callbackFn): TransportRequestCallback - stats, TContext = unknown>(params: RequestParams.EnrichStats, callback: callbackFn): TransportRequestCallback - stats, TContext = unknown>(params: RequestParams.EnrichStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - eql: { - search, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.EqlSearch, options?: TransportRequestOptions): TransportRequestPromise> - search, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - search, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.EqlSearch, callback: callbackFn): TransportRequestCallback - search, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.EqlSearch, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - exists, TContext = unknown>(params?: RequestParams.Exists, options?: TransportRequestOptions): TransportRequestPromise> - exists, TContext = unknown>(callback: callbackFn): TransportRequestCallback - exists, TContext = unknown>(params: RequestParams.Exists, callback: callbackFn): TransportRequestCallback - exists, TContext = unknown>(params: RequestParams.Exists, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - exists_source, TContext = unknown>(params?: RequestParams.ExistsSource, options?: TransportRequestOptions): TransportRequestPromise> - exists_source, TContext = unknown>(callback: callbackFn): TransportRequestCallback - exists_source, TContext = unknown>(params: RequestParams.ExistsSource, callback: callbackFn): TransportRequestCallback - exists_source, TContext = unknown>(params: RequestParams.ExistsSource, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - existsSource, TContext = unknown>(params?: RequestParams.ExistsSource, options?: TransportRequestOptions): TransportRequestPromise> - existsSource, TContext = unknown>(callback: callbackFn): TransportRequestCallback - existsSource, TContext = unknown>(params: RequestParams.ExistsSource, callback: callbackFn): TransportRequestCallback - existsSource, TContext = unknown>(params: RequestParams.ExistsSource, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - explain, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.Explain, options?: TransportRequestOptions): TransportRequestPromise> - explain, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - explain, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.Explain, callback: callbackFn): TransportRequestCallback - explain, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.Explain, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - field_caps, TContext = unknown>(params?: RequestParams.FieldCaps, options?: TransportRequestOptions): TransportRequestPromise> - field_caps, TContext = unknown>(callback: callbackFn): TransportRequestCallback - field_caps, TContext = unknown>(params: RequestParams.FieldCaps, callback: callbackFn): TransportRequestCallback - field_caps, TContext = unknown>(params: RequestParams.FieldCaps, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - fieldCaps, TContext = unknown>(params?: RequestParams.FieldCaps, options?: TransportRequestOptions): TransportRequestPromise> - fieldCaps, TContext = unknown>(callback: callbackFn): TransportRequestCallback - fieldCaps, TContext = unknown>(params: RequestParams.FieldCaps, callback: callbackFn): TransportRequestCallback - fieldCaps, TContext = unknown>(params: RequestParams.FieldCaps, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get, TContext = unknown>(params?: RequestParams.Get, options?: TransportRequestOptions): TransportRequestPromise> - get, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get, TContext = unknown>(params: RequestParams.Get, callback: callbackFn): TransportRequestCallback - get, TContext = unknown>(params: RequestParams.Get, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_script, TContext = unknown>(params?: RequestParams.GetScript, options?: TransportRequestOptions): TransportRequestPromise> - get_script, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_script, TContext = unknown>(params: RequestParams.GetScript, callback: callbackFn): TransportRequestCallback - get_script, TContext = unknown>(params: RequestParams.GetScript, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getScript, TContext = unknown>(params?: RequestParams.GetScript, options?: TransportRequestOptions): TransportRequestPromise> - getScript, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getScript, TContext = unknown>(params: RequestParams.GetScript, callback: callbackFn): TransportRequestCallback - getScript, TContext = unknown>(params: RequestParams.GetScript, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_script_context, TContext = unknown>(params?: RequestParams.GetScriptContext, options?: TransportRequestOptions): TransportRequestPromise> - get_script_context, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_script_context, TContext = unknown>(params: RequestParams.GetScriptContext, callback: callbackFn): TransportRequestCallback - get_script_context, TContext = unknown>(params: RequestParams.GetScriptContext, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getScriptContext, TContext = unknown>(params?: RequestParams.GetScriptContext, options?: TransportRequestOptions): TransportRequestPromise> - getScriptContext, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getScriptContext, TContext = unknown>(params: RequestParams.GetScriptContext, callback: callbackFn): TransportRequestCallback - getScriptContext, TContext = unknown>(params: RequestParams.GetScriptContext, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_script_languages, TContext = unknown>(params?: RequestParams.GetScriptLanguages, options?: TransportRequestOptions): TransportRequestPromise> - get_script_languages, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_script_languages, TContext = unknown>(params: RequestParams.GetScriptLanguages, callback: callbackFn): TransportRequestCallback - get_script_languages, TContext = unknown>(params: RequestParams.GetScriptLanguages, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getScriptLanguages, TContext = unknown>(params?: RequestParams.GetScriptLanguages, options?: TransportRequestOptions): TransportRequestPromise> - getScriptLanguages, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getScriptLanguages, TContext = unknown>(params: RequestParams.GetScriptLanguages, callback: callbackFn): TransportRequestCallback - getScriptLanguages, TContext = unknown>(params: RequestParams.GetScriptLanguages, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_source, TContext = unknown>(params?: RequestParams.GetSource, options?: TransportRequestOptions): TransportRequestPromise> - get_source, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_source, TContext = unknown>(params: RequestParams.GetSource, callback: callbackFn): TransportRequestCallback - get_source, TContext = unknown>(params: RequestParams.GetSource, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getSource, TContext = unknown>(params?: RequestParams.GetSource, options?: TransportRequestOptions): TransportRequestPromise> - getSource, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getSource, TContext = unknown>(params: RequestParams.GetSource, callback: callbackFn): TransportRequestCallback - getSource, TContext = unknown>(params: RequestParams.GetSource, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - graph: { - explore, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.GraphExplore, options?: TransportRequestOptions): TransportRequestPromise> - explore, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - explore, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.GraphExplore, callback: callbackFn): TransportRequestCallback - explore, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.GraphExplore, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - ilm: { - delete_lifecycle, TContext = unknown>(params?: RequestParams.IlmDeleteLifecycle, options?: TransportRequestOptions): TransportRequestPromise> - delete_lifecycle, TContext = unknown>(callback: callbackFn): TransportRequestCallback - delete_lifecycle, TContext = unknown>(params: RequestParams.IlmDeleteLifecycle, callback: callbackFn): TransportRequestCallback - delete_lifecycle, TContext = unknown>(params: RequestParams.IlmDeleteLifecycle, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteLifecycle, TContext = unknown>(params?: RequestParams.IlmDeleteLifecycle, options?: TransportRequestOptions): TransportRequestPromise> - deleteLifecycle, TContext = unknown>(callback: callbackFn): TransportRequestCallback - deleteLifecycle, TContext = unknown>(params: RequestParams.IlmDeleteLifecycle, callback: callbackFn): TransportRequestCallback - deleteLifecycle, TContext = unknown>(params: RequestParams.IlmDeleteLifecycle, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - explain_lifecycle, TContext = unknown>(params?: RequestParams.IlmExplainLifecycle, options?: TransportRequestOptions): TransportRequestPromise> - explain_lifecycle, TContext = unknown>(callback: callbackFn): TransportRequestCallback - explain_lifecycle, TContext = unknown>(params: RequestParams.IlmExplainLifecycle, callback: callbackFn): TransportRequestCallback - explain_lifecycle, TContext = unknown>(params: RequestParams.IlmExplainLifecycle, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - explainLifecycle, TContext = unknown>(params?: RequestParams.IlmExplainLifecycle, options?: TransportRequestOptions): TransportRequestPromise> - explainLifecycle, TContext = unknown>(callback: callbackFn): TransportRequestCallback - explainLifecycle, TContext = unknown>(params: RequestParams.IlmExplainLifecycle, callback: callbackFn): TransportRequestCallback - explainLifecycle, TContext = unknown>(params: RequestParams.IlmExplainLifecycle, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_lifecycle, TContext = unknown>(params?: RequestParams.IlmGetLifecycle, options?: TransportRequestOptions): TransportRequestPromise> - get_lifecycle, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_lifecycle, TContext = unknown>(params: RequestParams.IlmGetLifecycle, callback: callbackFn): TransportRequestCallback - get_lifecycle, TContext = unknown>(params: RequestParams.IlmGetLifecycle, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getLifecycle, TContext = unknown>(params?: RequestParams.IlmGetLifecycle, options?: TransportRequestOptions): TransportRequestPromise> - getLifecycle, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getLifecycle, TContext = unknown>(params: RequestParams.IlmGetLifecycle, callback: callbackFn): TransportRequestCallback - getLifecycle, TContext = unknown>(params: RequestParams.IlmGetLifecycle, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_status, TContext = unknown>(params?: RequestParams.IlmGetStatus, options?: TransportRequestOptions): TransportRequestPromise> - get_status, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_status, TContext = unknown>(params: RequestParams.IlmGetStatus, callback: callbackFn): TransportRequestCallback - get_status, TContext = unknown>(params: RequestParams.IlmGetStatus, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getStatus, TContext = unknown>(params?: RequestParams.IlmGetStatus, options?: TransportRequestOptions): TransportRequestPromise> - getStatus, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getStatus, TContext = unknown>(params: RequestParams.IlmGetStatus, callback: callbackFn): TransportRequestCallback - getStatus, TContext = unknown>(params: RequestParams.IlmGetStatus, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - move_to_step, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.IlmMoveToStep, options?: TransportRequestOptions): TransportRequestPromise> - move_to_step, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - move_to_step, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IlmMoveToStep, callback: callbackFn): TransportRequestCallback - move_to_step, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IlmMoveToStep, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - moveToStep, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.IlmMoveToStep, options?: TransportRequestOptions): TransportRequestPromise> - moveToStep, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - moveToStep, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IlmMoveToStep, callback: callbackFn): TransportRequestCallback - moveToStep, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IlmMoveToStep, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_lifecycle, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.IlmPutLifecycle, options?: TransportRequestOptions): TransportRequestPromise> - put_lifecycle, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - put_lifecycle, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IlmPutLifecycle, callback: callbackFn): TransportRequestCallback - put_lifecycle, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IlmPutLifecycle, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putLifecycle, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.IlmPutLifecycle, options?: TransportRequestOptions): TransportRequestPromise> - putLifecycle, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - putLifecycle, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IlmPutLifecycle, callback: callbackFn): TransportRequestCallback - putLifecycle, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IlmPutLifecycle, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - remove_policy, TContext = unknown>(params?: RequestParams.IlmRemovePolicy, options?: TransportRequestOptions): TransportRequestPromise> - remove_policy, TContext = unknown>(callback: callbackFn): TransportRequestCallback - remove_policy, TContext = unknown>(params: RequestParams.IlmRemovePolicy, callback: callbackFn): TransportRequestCallback - remove_policy, TContext = unknown>(params: RequestParams.IlmRemovePolicy, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - removePolicy, TContext = unknown>(params?: RequestParams.IlmRemovePolicy, options?: TransportRequestOptions): TransportRequestPromise> - removePolicy, TContext = unknown>(callback: callbackFn): TransportRequestCallback - removePolicy, TContext = unknown>(params: RequestParams.IlmRemovePolicy, callback: callbackFn): TransportRequestCallback - removePolicy, TContext = unknown>(params: RequestParams.IlmRemovePolicy, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - retry, TContext = unknown>(params?: RequestParams.IlmRetry, options?: TransportRequestOptions): TransportRequestPromise> - retry, TContext = unknown>(callback: callbackFn): TransportRequestCallback - retry, TContext = unknown>(params: RequestParams.IlmRetry, callback: callbackFn): TransportRequestCallback - retry, TContext = unknown>(params: RequestParams.IlmRetry, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - start, TContext = unknown>(params?: RequestParams.IlmStart, options?: TransportRequestOptions): TransportRequestPromise> - start, TContext = unknown>(callback: callbackFn): TransportRequestCallback - start, TContext = unknown>(params: RequestParams.IlmStart, callback: callbackFn): TransportRequestCallback - start, TContext = unknown>(params: RequestParams.IlmStart, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stop, TContext = unknown>(params?: RequestParams.IlmStop, options?: TransportRequestOptions): TransportRequestPromise> - stop, TContext = unknown>(callback: callbackFn): TransportRequestCallback - stop, TContext = unknown>(params: RequestParams.IlmStop, callback: callbackFn): TransportRequestCallback - stop, TContext = unknown>(params: RequestParams.IlmStop, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - index, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.Index, options?: TransportRequestOptions): TransportRequestPromise> - index, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - index, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.Index, callback: callbackFn): TransportRequestCallback - index, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.Index, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - indices: { - analyze, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.IndicesAnalyze, options?: TransportRequestOptions): TransportRequestPromise> - analyze, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - analyze, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesAnalyze, callback: callbackFn): TransportRequestCallback - analyze, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesAnalyze, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - clear_cache, TContext = unknown>(params?: RequestParams.IndicesClearCache, options?: TransportRequestOptions): TransportRequestPromise> - clear_cache, TContext = unknown>(callback: callbackFn): TransportRequestCallback - clear_cache, TContext = unknown>(params: RequestParams.IndicesClearCache, callback: callbackFn): TransportRequestCallback - clear_cache, TContext = unknown>(params: RequestParams.IndicesClearCache, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - clearCache, TContext = unknown>(params?: RequestParams.IndicesClearCache, options?: TransportRequestOptions): TransportRequestPromise> - clearCache, TContext = unknown>(callback: callbackFn): TransportRequestCallback - clearCache, TContext = unknown>(params: RequestParams.IndicesClearCache, callback: callbackFn): TransportRequestCallback - clearCache, TContext = unknown>(params: RequestParams.IndicesClearCache, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - clone, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.IndicesClone, options?: TransportRequestOptions): TransportRequestPromise> - clone, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - clone, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesClone, callback: callbackFn): TransportRequestCallback - clone, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesClone, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - close, TContext = unknown>(params?: RequestParams.IndicesClose, options?: TransportRequestOptions): TransportRequestPromise> - close, TContext = unknown>(callback: callbackFn): TransportRequestCallback - close, TContext = unknown>(params: RequestParams.IndicesClose, callback: callbackFn): TransportRequestCallback - close, TContext = unknown>(params: RequestParams.IndicesClose, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - create, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.IndicesCreate, options?: TransportRequestOptions): TransportRequestPromise> - create, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - create, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesCreate, callback: callbackFn): TransportRequestCallback - create, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesCreate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - create_data_stream, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.IndicesCreateDataStream, options?: TransportRequestOptions): TransportRequestPromise> - create_data_stream, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - create_data_stream, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesCreateDataStream, callback: callbackFn): TransportRequestCallback - create_data_stream, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesCreateDataStream, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - createDataStream, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.IndicesCreateDataStream, options?: TransportRequestOptions): TransportRequestPromise> - createDataStream, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - createDataStream, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesCreateDataStream, callback: callbackFn): TransportRequestCallback - createDataStream, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesCreateDataStream, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete, TContext = unknown>(params?: RequestParams.IndicesDelete, options?: TransportRequestOptions): TransportRequestPromise> - delete, TContext = unknown>(callback: callbackFn): TransportRequestCallback - delete, TContext = unknown>(params: RequestParams.IndicesDelete, callback: callbackFn): TransportRequestCallback - delete, TContext = unknown>(params: RequestParams.IndicesDelete, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_alias, TContext = unknown>(params?: RequestParams.IndicesDeleteAlias, options?: TransportRequestOptions): TransportRequestPromise> - delete_alias, TContext = unknown>(callback: callbackFn): TransportRequestCallback - delete_alias, TContext = unknown>(params: RequestParams.IndicesDeleteAlias, callback: callbackFn): TransportRequestCallback - delete_alias, TContext = unknown>(params: RequestParams.IndicesDeleteAlias, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteAlias, TContext = unknown>(params?: RequestParams.IndicesDeleteAlias, options?: TransportRequestOptions): TransportRequestPromise> - deleteAlias, TContext = unknown>(callback: callbackFn): TransportRequestCallback - deleteAlias, TContext = unknown>(params: RequestParams.IndicesDeleteAlias, callback: callbackFn): TransportRequestCallback - deleteAlias, TContext = unknown>(params: RequestParams.IndicesDeleteAlias, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_data_stream, TContext = unknown>(params?: RequestParams.IndicesDeleteDataStream, options?: TransportRequestOptions): TransportRequestPromise> - delete_data_stream, TContext = unknown>(callback: callbackFn): TransportRequestCallback - delete_data_stream, TContext = unknown>(params: RequestParams.IndicesDeleteDataStream, callback: callbackFn): TransportRequestCallback - delete_data_stream, TContext = unknown>(params: RequestParams.IndicesDeleteDataStream, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteDataStream, TContext = unknown>(params?: RequestParams.IndicesDeleteDataStream, options?: TransportRequestOptions): TransportRequestPromise> - deleteDataStream, TContext = unknown>(callback: callbackFn): TransportRequestCallback - deleteDataStream, TContext = unknown>(params: RequestParams.IndicesDeleteDataStream, callback: callbackFn): TransportRequestCallback - deleteDataStream, TContext = unknown>(params: RequestParams.IndicesDeleteDataStream, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_index_template, TContext = unknown>(params?: RequestParams.IndicesDeleteIndexTemplate, options?: TransportRequestOptions): TransportRequestPromise> - delete_index_template, TContext = unknown>(callback: callbackFn): TransportRequestCallback - delete_index_template, TContext = unknown>(params: RequestParams.IndicesDeleteIndexTemplate, callback: callbackFn): TransportRequestCallback - delete_index_template, TContext = unknown>(params: RequestParams.IndicesDeleteIndexTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteIndexTemplate, TContext = unknown>(params?: RequestParams.IndicesDeleteIndexTemplate, options?: TransportRequestOptions): TransportRequestPromise> - deleteIndexTemplate, TContext = unknown>(callback: callbackFn): TransportRequestCallback - deleteIndexTemplate, TContext = unknown>(params: RequestParams.IndicesDeleteIndexTemplate, callback: callbackFn): TransportRequestCallback - deleteIndexTemplate, TContext = unknown>(params: RequestParams.IndicesDeleteIndexTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_template, TContext = unknown>(params?: RequestParams.IndicesDeleteTemplate, options?: TransportRequestOptions): TransportRequestPromise> - delete_template, TContext = unknown>(callback: callbackFn): TransportRequestCallback - delete_template, TContext = unknown>(params: RequestParams.IndicesDeleteTemplate, callback: callbackFn): TransportRequestCallback - delete_template, TContext = unknown>(params: RequestParams.IndicesDeleteTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteTemplate, TContext = unknown>(params?: RequestParams.IndicesDeleteTemplate, options?: TransportRequestOptions): TransportRequestPromise> - deleteTemplate, TContext = unknown>(callback: callbackFn): TransportRequestCallback - deleteTemplate, TContext = unknown>(params: RequestParams.IndicesDeleteTemplate, callback: callbackFn): TransportRequestCallback - deleteTemplate, TContext = unknown>(params: RequestParams.IndicesDeleteTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - exists, TContext = unknown>(params?: RequestParams.IndicesExists, options?: TransportRequestOptions): TransportRequestPromise> - exists, TContext = unknown>(callback: callbackFn): TransportRequestCallback - exists, TContext = unknown>(params: RequestParams.IndicesExists, callback: callbackFn): TransportRequestCallback - exists, TContext = unknown>(params: RequestParams.IndicesExists, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - exists_alias, TContext = unknown>(params?: RequestParams.IndicesExistsAlias, options?: TransportRequestOptions): TransportRequestPromise> - exists_alias, TContext = unknown>(callback: callbackFn): TransportRequestCallback - exists_alias, TContext = unknown>(params: RequestParams.IndicesExistsAlias, callback: callbackFn): TransportRequestCallback - exists_alias, TContext = unknown>(params: RequestParams.IndicesExistsAlias, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - existsAlias, TContext = unknown>(params?: RequestParams.IndicesExistsAlias, options?: TransportRequestOptions): TransportRequestPromise> - existsAlias, TContext = unknown>(callback: callbackFn): TransportRequestCallback - existsAlias, TContext = unknown>(params: RequestParams.IndicesExistsAlias, callback: callbackFn): TransportRequestCallback - existsAlias, TContext = unknown>(params: RequestParams.IndicesExistsAlias, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - exists_index_template, TContext = unknown>(params?: RequestParams.IndicesExistsIndexTemplate, options?: TransportRequestOptions): TransportRequestPromise> - exists_index_template, TContext = unknown>(callback: callbackFn): TransportRequestCallback - exists_index_template, TContext = unknown>(params: RequestParams.IndicesExistsIndexTemplate, callback: callbackFn): TransportRequestCallback - exists_index_template, TContext = unknown>(params: RequestParams.IndicesExistsIndexTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - existsIndexTemplate, TContext = unknown>(params?: RequestParams.IndicesExistsIndexTemplate, options?: TransportRequestOptions): TransportRequestPromise> - existsIndexTemplate, TContext = unknown>(callback: callbackFn): TransportRequestCallback - existsIndexTemplate, TContext = unknown>(params: RequestParams.IndicesExistsIndexTemplate, callback: callbackFn): TransportRequestCallback - existsIndexTemplate, TContext = unknown>(params: RequestParams.IndicesExistsIndexTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - exists_template, TContext = unknown>(params?: RequestParams.IndicesExistsTemplate, options?: TransportRequestOptions): TransportRequestPromise> - exists_template, TContext = unknown>(callback: callbackFn): TransportRequestCallback - exists_template, TContext = unknown>(params: RequestParams.IndicesExistsTemplate, callback: callbackFn): TransportRequestCallback - exists_template, TContext = unknown>(params: RequestParams.IndicesExistsTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - existsTemplate, TContext = unknown>(params?: RequestParams.IndicesExistsTemplate, options?: TransportRequestOptions): TransportRequestPromise> - existsTemplate, TContext = unknown>(callback: callbackFn): TransportRequestCallback - existsTemplate, TContext = unknown>(params: RequestParams.IndicesExistsTemplate, callback: callbackFn): TransportRequestCallback - existsTemplate, TContext = unknown>(params: RequestParams.IndicesExistsTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - exists_type, TContext = unknown>(params?: RequestParams.IndicesExistsType, options?: TransportRequestOptions): TransportRequestPromise> - exists_type, TContext = unknown>(callback: callbackFn): TransportRequestCallback - exists_type, TContext = unknown>(params: RequestParams.IndicesExistsType, callback: callbackFn): TransportRequestCallback - exists_type, TContext = unknown>(params: RequestParams.IndicesExistsType, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - existsType, TContext = unknown>(params?: RequestParams.IndicesExistsType, options?: TransportRequestOptions): TransportRequestPromise> - existsType, TContext = unknown>(callback: callbackFn): TransportRequestCallback - existsType, TContext = unknown>(params: RequestParams.IndicesExistsType, callback: callbackFn): TransportRequestCallback - existsType, TContext = unknown>(params: RequestParams.IndicesExistsType, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - flush, TContext = unknown>(params?: RequestParams.IndicesFlush, options?: TransportRequestOptions): TransportRequestPromise> - flush, TContext = unknown>(callback: callbackFn): TransportRequestCallback - flush, TContext = unknown>(params: RequestParams.IndicesFlush, callback: callbackFn): TransportRequestCallback - flush, TContext = unknown>(params: RequestParams.IndicesFlush, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - forcemerge, TContext = unknown>(params?: RequestParams.IndicesForcemerge, options?: TransportRequestOptions): TransportRequestPromise> - forcemerge, TContext = unknown>(callback: callbackFn): TransportRequestCallback - forcemerge, TContext = unknown>(params: RequestParams.IndicesForcemerge, callback: callbackFn): TransportRequestCallback - forcemerge, TContext = unknown>(params: RequestParams.IndicesForcemerge, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - freeze, TContext = unknown>(params?: RequestParams.IndicesFreeze, options?: TransportRequestOptions): TransportRequestPromise> - freeze, TContext = unknown>(callback: callbackFn): TransportRequestCallback - freeze, TContext = unknown>(params: RequestParams.IndicesFreeze, callback: callbackFn): TransportRequestCallback - freeze, TContext = unknown>(params: RequestParams.IndicesFreeze, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get, TContext = unknown>(params?: RequestParams.IndicesGet, options?: TransportRequestOptions): TransportRequestPromise> - get, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get, TContext = unknown>(params: RequestParams.IndicesGet, callback: callbackFn): TransportRequestCallback - get, TContext = unknown>(params: RequestParams.IndicesGet, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_alias, TContext = unknown>(params?: RequestParams.IndicesGetAlias, options?: TransportRequestOptions): TransportRequestPromise> - get_alias, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_alias, TContext = unknown>(params: RequestParams.IndicesGetAlias, callback: callbackFn): TransportRequestCallback - get_alias, TContext = unknown>(params: RequestParams.IndicesGetAlias, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getAlias, TContext = unknown>(params?: RequestParams.IndicesGetAlias, options?: TransportRequestOptions): TransportRequestPromise> - getAlias, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getAlias, TContext = unknown>(params: RequestParams.IndicesGetAlias, callback: callbackFn): TransportRequestCallback - getAlias, TContext = unknown>(params: RequestParams.IndicesGetAlias, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_data_stream, TContext = unknown>(params?: RequestParams.IndicesGetDataStream, options?: TransportRequestOptions): TransportRequestPromise> - get_data_stream, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_data_stream, TContext = unknown>(params: RequestParams.IndicesGetDataStream, callback: callbackFn): TransportRequestCallback - get_data_stream, TContext = unknown>(params: RequestParams.IndicesGetDataStream, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getDataStream, TContext = unknown>(params?: RequestParams.IndicesGetDataStream, options?: TransportRequestOptions): TransportRequestPromise> - getDataStream, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getDataStream, TContext = unknown>(params: RequestParams.IndicesGetDataStream, callback: callbackFn): TransportRequestCallback - getDataStream, TContext = unknown>(params: RequestParams.IndicesGetDataStream, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_field_mapping, TContext = unknown>(params?: RequestParams.IndicesGetFieldMapping, options?: TransportRequestOptions): TransportRequestPromise> - get_field_mapping, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_field_mapping, TContext = unknown>(params: RequestParams.IndicesGetFieldMapping, callback: callbackFn): TransportRequestCallback - get_field_mapping, TContext = unknown>(params: RequestParams.IndicesGetFieldMapping, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getFieldMapping, TContext = unknown>(params?: RequestParams.IndicesGetFieldMapping, options?: TransportRequestOptions): TransportRequestPromise> - getFieldMapping, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getFieldMapping, TContext = unknown>(params: RequestParams.IndicesGetFieldMapping, callback: callbackFn): TransportRequestCallback - getFieldMapping, TContext = unknown>(params: RequestParams.IndicesGetFieldMapping, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_index_template, TContext = unknown>(params?: RequestParams.IndicesGetIndexTemplate, options?: TransportRequestOptions): TransportRequestPromise> - get_index_template, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_index_template, TContext = unknown>(params: RequestParams.IndicesGetIndexTemplate, callback: callbackFn): TransportRequestCallback - get_index_template, TContext = unknown>(params: RequestParams.IndicesGetIndexTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getIndexTemplate, TContext = unknown>(params?: RequestParams.IndicesGetIndexTemplate, options?: TransportRequestOptions): TransportRequestPromise> - getIndexTemplate, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getIndexTemplate, TContext = unknown>(params: RequestParams.IndicesGetIndexTemplate, callback: callbackFn): TransportRequestCallback - getIndexTemplate, TContext = unknown>(params: RequestParams.IndicesGetIndexTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_mapping, TContext = unknown>(params?: RequestParams.IndicesGetMapping, options?: TransportRequestOptions): TransportRequestPromise> - get_mapping, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_mapping, TContext = unknown>(params: RequestParams.IndicesGetMapping, callback: callbackFn): TransportRequestCallback - get_mapping, TContext = unknown>(params: RequestParams.IndicesGetMapping, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getMapping, TContext = unknown>(params?: RequestParams.IndicesGetMapping, options?: TransportRequestOptions): TransportRequestPromise> - getMapping, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getMapping, TContext = unknown>(params: RequestParams.IndicesGetMapping, callback: callbackFn): TransportRequestCallback - getMapping, TContext = unknown>(params: RequestParams.IndicesGetMapping, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_settings, TContext = unknown>(params?: RequestParams.IndicesGetSettings, options?: TransportRequestOptions): TransportRequestPromise> - get_settings, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_settings, TContext = unknown>(params: RequestParams.IndicesGetSettings, callback: callbackFn): TransportRequestCallback - get_settings, TContext = unknown>(params: RequestParams.IndicesGetSettings, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getSettings, TContext = unknown>(params?: RequestParams.IndicesGetSettings, options?: TransportRequestOptions): TransportRequestPromise> - getSettings, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getSettings, TContext = unknown>(params: RequestParams.IndicesGetSettings, callback: callbackFn): TransportRequestCallback - getSettings, TContext = unknown>(params: RequestParams.IndicesGetSettings, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_template, TContext = unknown>(params?: RequestParams.IndicesGetTemplate, options?: TransportRequestOptions): TransportRequestPromise> - get_template, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_template, TContext = unknown>(params: RequestParams.IndicesGetTemplate, callback: callbackFn): TransportRequestCallback - get_template, TContext = unknown>(params: RequestParams.IndicesGetTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getTemplate, TContext = unknown>(params?: RequestParams.IndicesGetTemplate, options?: TransportRequestOptions): TransportRequestPromise> - getTemplate, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getTemplate, TContext = unknown>(params: RequestParams.IndicesGetTemplate, callback: callbackFn): TransportRequestCallback - getTemplate, TContext = unknown>(params: RequestParams.IndicesGetTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_upgrade, TContext = unknown>(params?: RequestParams.IndicesGetUpgrade, options?: TransportRequestOptions): TransportRequestPromise> - get_upgrade, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_upgrade, TContext = unknown>(params: RequestParams.IndicesGetUpgrade, callback: callbackFn): TransportRequestCallback - get_upgrade, TContext = unknown>(params: RequestParams.IndicesGetUpgrade, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getUpgrade, TContext = unknown>(params?: RequestParams.IndicesGetUpgrade, options?: TransportRequestOptions): TransportRequestPromise> - getUpgrade, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getUpgrade, TContext = unknown>(params: RequestParams.IndicesGetUpgrade, callback: callbackFn): TransportRequestCallback - getUpgrade, TContext = unknown>(params: RequestParams.IndicesGetUpgrade, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - open, TContext = unknown>(params?: RequestParams.IndicesOpen, options?: TransportRequestOptions): TransportRequestPromise> - open, TContext = unknown>(callback: callbackFn): TransportRequestCallback - open, TContext = unknown>(params: RequestParams.IndicesOpen, callback: callbackFn): TransportRequestCallback - open, TContext = unknown>(params: RequestParams.IndicesOpen, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_alias, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.IndicesPutAlias, options?: TransportRequestOptions): TransportRequestPromise> - put_alias, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - put_alias, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesPutAlias, callback: callbackFn): TransportRequestCallback - put_alias, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesPutAlias, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putAlias, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.IndicesPutAlias, options?: TransportRequestOptions): TransportRequestPromise> - putAlias, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - putAlias, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesPutAlias, callback: callbackFn): TransportRequestCallback - putAlias, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesPutAlias, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_index_template, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.IndicesPutIndexTemplate, options?: TransportRequestOptions): TransportRequestPromise> - put_index_template, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - put_index_template, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesPutIndexTemplate, callback: callbackFn): TransportRequestCallback - put_index_template, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesPutIndexTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putIndexTemplate, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.IndicesPutIndexTemplate, options?: TransportRequestOptions): TransportRequestPromise> - putIndexTemplate, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - putIndexTemplate, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesPutIndexTemplate, callback: callbackFn): TransportRequestCallback - putIndexTemplate, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesPutIndexTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_mapping, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.IndicesPutMapping, options?: TransportRequestOptions): TransportRequestPromise> - put_mapping, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - put_mapping, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesPutMapping, callback: callbackFn): TransportRequestCallback - put_mapping, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesPutMapping, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putMapping, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.IndicesPutMapping, options?: TransportRequestOptions): TransportRequestPromise> - putMapping, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - putMapping, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesPutMapping, callback: callbackFn): TransportRequestCallback - putMapping, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesPutMapping, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_settings, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.IndicesPutSettings, options?: TransportRequestOptions): TransportRequestPromise> - put_settings, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - put_settings, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesPutSettings, callback: callbackFn): TransportRequestCallback - put_settings, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesPutSettings, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putSettings, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.IndicesPutSettings, options?: TransportRequestOptions): TransportRequestPromise> - putSettings, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - putSettings, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesPutSettings, callback: callbackFn): TransportRequestCallback - putSettings, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesPutSettings, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_template, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.IndicesPutTemplate, options?: TransportRequestOptions): TransportRequestPromise> - put_template, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - put_template, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesPutTemplate, callback: callbackFn): TransportRequestCallback - put_template, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesPutTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putTemplate, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.IndicesPutTemplate, options?: TransportRequestOptions): TransportRequestPromise> - putTemplate, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - putTemplate, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesPutTemplate, callback: callbackFn): TransportRequestCallback - putTemplate, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesPutTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - recovery, TContext = unknown>(params?: RequestParams.IndicesRecovery, options?: TransportRequestOptions): TransportRequestPromise> - recovery, TContext = unknown>(callback: callbackFn): TransportRequestCallback - recovery, TContext = unknown>(params: RequestParams.IndicesRecovery, callback: callbackFn): TransportRequestCallback - recovery, TContext = unknown>(params: RequestParams.IndicesRecovery, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - refresh, TContext = unknown>(params?: RequestParams.IndicesRefresh, options?: TransportRequestOptions): TransportRequestPromise> - refresh, TContext = unknown>(callback: callbackFn): TransportRequestCallback - refresh, TContext = unknown>(params: RequestParams.IndicesRefresh, callback: callbackFn): TransportRequestCallback - refresh, TContext = unknown>(params: RequestParams.IndicesRefresh, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - reload_search_analyzers, TContext = unknown>(params?: RequestParams.IndicesReloadSearchAnalyzers, options?: TransportRequestOptions): TransportRequestPromise> - reload_search_analyzers, TContext = unknown>(callback: callbackFn): TransportRequestCallback - reload_search_analyzers, TContext = unknown>(params: RequestParams.IndicesReloadSearchAnalyzers, callback: callbackFn): TransportRequestCallback - reload_search_analyzers, TContext = unknown>(params: RequestParams.IndicesReloadSearchAnalyzers, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - reloadSearchAnalyzers, TContext = unknown>(params?: RequestParams.IndicesReloadSearchAnalyzers, options?: TransportRequestOptions): TransportRequestPromise> - reloadSearchAnalyzers, TContext = unknown>(callback: callbackFn): TransportRequestCallback - reloadSearchAnalyzers, TContext = unknown>(params: RequestParams.IndicesReloadSearchAnalyzers, callback: callbackFn): TransportRequestCallback - reloadSearchAnalyzers, TContext = unknown>(params: RequestParams.IndicesReloadSearchAnalyzers, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - rollover, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.IndicesRollover, options?: TransportRequestOptions): TransportRequestPromise> - rollover, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - rollover, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesRollover, callback: callbackFn): TransportRequestCallback - rollover, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesRollover, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - segments, TContext = unknown>(params?: RequestParams.IndicesSegments, options?: TransportRequestOptions): TransportRequestPromise> - segments, TContext = unknown>(callback: callbackFn): TransportRequestCallback - segments, TContext = unknown>(params: RequestParams.IndicesSegments, callback: callbackFn): TransportRequestCallback - segments, TContext = unknown>(params: RequestParams.IndicesSegments, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - shard_stores, TContext = unknown>(params?: RequestParams.IndicesShardStores, options?: TransportRequestOptions): TransportRequestPromise> - shard_stores, TContext = unknown>(callback: callbackFn): TransportRequestCallback - shard_stores, TContext = unknown>(params: RequestParams.IndicesShardStores, callback: callbackFn): TransportRequestCallback - shard_stores, TContext = unknown>(params: RequestParams.IndicesShardStores, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - shardStores, TContext = unknown>(params?: RequestParams.IndicesShardStores, options?: TransportRequestOptions): TransportRequestPromise> - shardStores, TContext = unknown>(callback: callbackFn): TransportRequestCallback - shardStores, TContext = unknown>(params: RequestParams.IndicesShardStores, callback: callbackFn): TransportRequestCallback - shardStores, TContext = unknown>(params: RequestParams.IndicesShardStores, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - shrink, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.IndicesShrink, options?: TransportRequestOptions): TransportRequestPromise> - shrink, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - shrink, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesShrink, callback: callbackFn): TransportRequestCallback - shrink, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesShrink, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - simulate_index_template, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.IndicesSimulateIndexTemplate, options?: TransportRequestOptions): TransportRequestPromise> - simulate_index_template, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - simulate_index_template, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesSimulateIndexTemplate, callback: callbackFn): TransportRequestCallback - simulate_index_template, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesSimulateIndexTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - simulateIndexTemplate, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.IndicesSimulateIndexTemplate, options?: TransportRequestOptions): TransportRequestPromise> - simulateIndexTemplate, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - simulateIndexTemplate, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesSimulateIndexTemplate, callback: callbackFn): TransportRequestCallback - simulateIndexTemplate, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesSimulateIndexTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - simulate_template, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.IndicesSimulateTemplate, options?: TransportRequestOptions): TransportRequestPromise> - simulate_template, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - simulate_template, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesSimulateTemplate, callback: callbackFn): TransportRequestCallback - simulate_template, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesSimulateTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - simulateTemplate, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.IndicesSimulateTemplate, options?: TransportRequestOptions): TransportRequestPromise> - simulateTemplate, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - simulateTemplate, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesSimulateTemplate, callback: callbackFn): TransportRequestCallback - simulateTemplate, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesSimulateTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - split, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.IndicesSplit, options?: TransportRequestOptions): TransportRequestPromise> - split, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - split, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesSplit, callback: callbackFn): TransportRequestCallback - split, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesSplit, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stats, TContext = unknown>(params?: RequestParams.IndicesStats, options?: TransportRequestOptions): TransportRequestPromise> - stats, TContext = unknown>(callback: callbackFn): TransportRequestCallback - stats, TContext = unknown>(params: RequestParams.IndicesStats, callback: callbackFn): TransportRequestCallback - stats, TContext = unknown>(params: RequestParams.IndicesStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - unfreeze, TContext = unknown>(params?: RequestParams.IndicesUnfreeze, options?: TransportRequestOptions): TransportRequestPromise> - unfreeze, TContext = unknown>(callback: callbackFn): TransportRequestCallback - unfreeze, TContext = unknown>(params: RequestParams.IndicesUnfreeze, callback: callbackFn): TransportRequestCallback - unfreeze, TContext = unknown>(params: RequestParams.IndicesUnfreeze, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - update_aliases, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.IndicesUpdateAliases, options?: TransportRequestOptions): TransportRequestPromise> - update_aliases, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - update_aliases, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesUpdateAliases, callback: callbackFn): TransportRequestCallback - update_aliases, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesUpdateAliases, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - updateAliases, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.IndicesUpdateAliases, options?: TransportRequestOptions): TransportRequestPromise> - updateAliases, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - updateAliases, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesUpdateAliases, callback: callbackFn): TransportRequestCallback - updateAliases, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesUpdateAliases, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - upgrade, TContext = unknown>(params?: RequestParams.IndicesUpgrade, options?: TransportRequestOptions): TransportRequestPromise> - upgrade, TContext = unknown>(callback: callbackFn): TransportRequestCallback - upgrade, TContext = unknown>(params: RequestParams.IndicesUpgrade, callback: callbackFn): TransportRequestCallback - upgrade, TContext = unknown>(params: RequestParams.IndicesUpgrade, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - validate_query, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.IndicesValidateQuery, options?: TransportRequestOptions): TransportRequestPromise> - validate_query, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - validate_query, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesValidateQuery, callback: callbackFn): TransportRequestCallback - validate_query, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesValidateQuery, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - validateQuery, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.IndicesValidateQuery, options?: TransportRequestOptions): TransportRequestPromise> - validateQuery, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - validateQuery, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesValidateQuery, callback: callbackFn): TransportRequestCallback - validateQuery, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IndicesValidateQuery, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - info, TContext = unknown>(params?: RequestParams.Info, options?: TransportRequestOptions): TransportRequestPromise> - info, TContext = unknown>(callback: callbackFn): TransportRequestCallback - info, TContext = unknown>(params: RequestParams.Info, callback: callbackFn): TransportRequestCallback - info, TContext = unknown>(params: RequestParams.Info, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - ingest: { - delete_pipeline, TContext = unknown>(params?: RequestParams.IngestDeletePipeline, options?: TransportRequestOptions): TransportRequestPromise> - delete_pipeline, TContext = unknown>(callback: callbackFn): TransportRequestCallback - delete_pipeline, TContext = unknown>(params: RequestParams.IngestDeletePipeline, callback: callbackFn): TransportRequestCallback - delete_pipeline, TContext = unknown>(params: RequestParams.IngestDeletePipeline, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deletePipeline, TContext = unknown>(params?: RequestParams.IngestDeletePipeline, options?: TransportRequestOptions): TransportRequestPromise> - deletePipeline, TContext = unknown>(callback: callbackFn): TransportRequestCallback - deletePipeline, TContext = unknown>(params: RequestParams.IngestDeletePipeline, callback: callbackFn): TransportRequestCallback - deletePipeline, TContext = unknown>(params: RequestParams.IngestDeletePipeline, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_pipeline, TContext = unknown>(params?: RequestParams.IngestGetPipeline, options?: TransportRequestOptions): TransportRequestPromise> - get_pipeline, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_pipeline, TContext = unknown>(params: RequestParams.IngestGetPipeline, callback: callbackFn): TransportRequestCallback - get_pipeline, TContext = unknown>(params: RequestParams.IngestGetPipeline, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getPipeline, TContext = unknown>(params?: RequestParams.IngestGetPipeline, options?: TransportRequestOptions): TransportRequestPromise> - getPipeline, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getPipeline, TContext = unknown>(params: RequestParams.IngestGetPipeline, callback: callbackFn): TransportRequestCallback - getPipeline, TContext = unknown>(params: RequestParams.IngestGetPipeline, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - processor_grok, TContext = unknown>(params?: RequestParams.IngestProcessorGrok, options?: TransportRequestOptions): TransportRequestPromise> - processor_grok, TContext = unknown>(callback: callbackFn): TransportRequestCallback - processor_grok, TContext = unknown>(params: RequestParams.IngestProcessorGrok, callback: callbackFn): TransportRequestCallback - processor_grok, TContext = unknown>(params: RequestParams.IngestProcessorGrok, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - processorGrok, TContext = unknown>(params?: RequestParams.IngestProcessorGrok, options?: TransportRequestOptions): TransportRequestPromise> - processorGrok, TContext = unknown>(callback: callbackFn): TransportRequestCallback - processorGrok, TContext = unknown>(params: RequestParams.IngestProcessorGrok, callback: callbackFn): TransportRequestCallback - processorGrok, TContext = unknown>(params: RequestParams.IngestProcessorGrok, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_pipeline, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.IngestPutPipeline, options?: TransportRequestOptions): TransportRequestPromise> - put_pipeline, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - put_pipeline, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IngestPutPipeline, callback: callbackFn): TransportRequestCallback - put_pipeline, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IngestPutPipeline, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putPipeline, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.IngestPutPipeline, options?: TransportRequestOptions): TransportRequestPromise> - putPipeline, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - putPipeline, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IngestPutPipeline, callback: callbackFn): TransportRequestCallback - putPipeline, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IngestPutPipeline, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - simulate, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.IngestSimulate, options?: TransportRequestOptions): TransportRequestPromise> - simulate, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - simulate, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IngestSimulate, callback: callbackFn): TransportRequestCallback - simulate, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.IngestSimulate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - license: { - delete, TContext = unknown>(params?: RequestParams.LicenseDelete, options?: TransportRequestOptions): TransportRequestPromise> - delete, TContext = unknown>(callback: callbackFn): TransportRequestCallback - delete, TContext = unknown>(params: RequestParams.LicenseDelete, callback: callbackFn): TransportRequestCallback - delete, TContext = unknown>(params: RequestParams.LicenseDelete, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get, TContext = unknown>(params?: RequestParams.LicenseGet, options?: TransportRequestOptions): TransportRequestPromise> - get, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get, TContext = unknown>(params: RequestParams.LicenseGet, callback: callbackFn): TransportRequestCallback - get, TContext = unknown>(params: RequestParams.LicenseGet, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_basic_status, TContext = unknown>(params?: RequestParams.LicenseGetBasicStatus, options?: TransportRequestOptions): TransportRequestPromise> - get_basic_status, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_basic_status, TContext = unknown>(params: RequestParams.LicenseGetBasicStatus, callback: callbackFn): TransportRequestCallback - get_basic_status, TContext = unknown>(params: RequestParams.LicenseGetBasicStatus, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getBasicStatus, TContext = unknown>(params?: RequestParams.LicenseGetBasicStatus, options?: TransportRequestOptions): TransportRequestPromise> - getBasicStatus, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getBasicStatus, TContext = unknown>(params: RequestParams.LicenseGetBasicStatus, callback: callbackFn): TransportRequestCallback - getBasicStatus, TContext = unknown>(params: RequestParams.LicenseGetBasicStatus, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_trial_status, TContext = unknown>(params?: RequestParams.LicenseGetTrialStatus, options?: TransportRequestOptions): TransportRequestPromise> - get_trial_status, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_trial_status, TContext = unknown>(params: RequestParams.LicenseGetTrialStatus, callback: callbackFn): TransportRequestCallback - get_trial_status, TContext = unknown>(params: RequestParams.LicenseGetTrialStatus, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getTrialStatus, TContext = unknown>(params?: RequestParams.LicenseGetTrialStatus, options?: TransportRequestOptions): TransportRequestPromise> - getTrialStatus, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getTrialStatus, TContext = unknown>(params: RequestParams.LicenseGetTrialStatus, callback: callbackFn): TransportRequestCallback - getTrialStatus, TContext = unknown>(params: RequestParams.LicenseGetTrialStatus, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - post, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.LicensePost, options?: TransportRequestOptions): TransportRequestPromise> - post, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - post, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.LicensePost, callback: callbackFn): TransportRequestCallback - post, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.LicensePost, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - post_start_basic, TContext = unknown>(params?: RequestParams.LicensePostStartBasic, options?: TransportRequestOptions): TransportRequestPromise> - post_start_basic, TContext = unknown>(callback: callbackFn): TransportRequestCallback - post_start_basic, TContext = unknown>(params: RequestParams.LicensePostStartBasic, callback: callbackFn): TransportRequestCallback - post_start_basic, TContext = unknown>(params: RequestParams.LicensePostStartBasic, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - postStartBasic, TContext = unknown>(params?: RequestParams.LicensePostStartBasic, options?: TransportRequestOptions): TransportRequestPromise> - postStartBasic, TContext = unknown>(callback: callbackFn): TransportRequestCallback - postStartBasic, TContext = unknown>(params: RequestParams.LicensePostStartBasic, callback: callbackFn): TransportRequestCallback - postStartBasic, TContext = unknown>(params: RequestParams.LicensePostStartBasic, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - post_start_trial, TContext = unknown>(params?: RequestParams.LicensePostStartTrial, options?: TransportRequestOptions): TransportRequestPromise> - post_start_trial, TContext = unknown>(callback: callbackFn): TransportRequestCallback - post_start_trial, TContext = unknown>(params: RequestParams.LicensePostStartTrial, callback: callbackFn): TransportRequestCallback - post_start_trial, TContext = unknown>(params: RequestParams.LicensePostStartTrial, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - postStartTrial, TContext = unknown>(params?: RequestParams.LicensePostStartTrial, options?: TransportRequestOptions): TransportRequestPromise> - postStartTrial, TContext = unknown>(callback: callbackFn): TransportRequestCallback - postStartTrial, TContext = unknown>(params: RequestParams.LicensePostStartTrial, callback: callbackFn): TransportRequestCallback - postStartTrial, TContext = unknown>(params: RequestParams.LicensePostStartTrial, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - mget, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.Mget, options?: TransportRequestOptions): TransportRequestPromise> - mget, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - mget, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.Mget, callback: callbackFn): TransportRequestCallback - mget, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.Mget, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - migration: { - deprecations, TContext = unknown>(params?: RequestParams.MigrationDeprecations, options?: TransportRequestOptions): TransportRequestPromise> - deprecations, TContext = unknown>(callback: callbackFn): TransportRequestCallback - deprecations, TContext = unknown>(params: RequestParams.MigrationDeprecations, callback: callbackFn): TransportRequestCallback - deprecations, TContext = unknown>(params: RequestParams.MigrationDeprecations, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - ml: { - close_job, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlCloseJob, options?: TransportRequestOptions): TransportRequestPromise> - close_job, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - close_job, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlCloseJob, callback: callbackFn): TransportRequestCallback - close_job, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlCloseJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - closeJob, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlCloseJob, options?: TransportRequestOptions): TransportRequestPromise> - closeJob, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - closeJob, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlCloseJob, callback: callbackFn): TransportRequestCallback - closeJob, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlCloseJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_calendar, TContext = unknown>(params?: RequestParams.MlDeleteCalendar, options?: TransportRequestOptions): TransportRequestPromise> - delete_calendar, TContext = unknown>(callback: callbackFn): TransportRequestCallback - delete_calendar, TContext = unknown>(params: RequestParams.MlDeleteCalendar, callback: callbackFn): TransportRequestCallback - delete_calendar, TContext = unknown>(params: RequestParams.MlDeleteCalendar, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteCalendar, TContext = unknown>(params?: RequestParams.MlDeleteCalendar, options?: TransportRequestOptions): TransportRequestPromise> - deleteCalendar, TContext = unknown>(callback: callbackFn): TransportRequestCallback - deleteCalendar, TContext = unknown>(params: RequestParams.MlDeleteCalendar, callback: callbackFn): TransportRequestCallback - deleteCalendar, TContext = unknown>(params: RequestParams.MlDeleteCalendar, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_calendar_event, TContext = unknown>(params?: RequestParams.MlDeleteCalendarEvent, options?: TransportRequestOptions): TransportRequestPromise> - delete_calendar_event, TContext = unknown>(callback: callbackFn): TransportRequestCallback - delete_calendar_event, TContext = unknown>(params: RequestParams.MlDeleteCalendarEvent, callback: callbackFn): TransportRequestCallback - delete_calendar_event, TContext = unknown>(params: RequestParams.MlDeleteCalendarEvent, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteCalendarEvent, TContext = unknown>(params?: RequestParams.MlDeleteCalendarEvent, options?: TransportRequestOptions): TransportRequestPromise> - deleteCalendarEvent, TContext = unknown>(callback: callbackFn): TransportRequestCallback - deleteCalendarEvent, TContext = unknown>(params: RequestParams.MlDeleteCalendarEvent, callback: callbackFn): TransportRequestCallback - deleteCalendarEvent, TContext = unknown>(params: RequestParams.MlDeleteCalendarEvent, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_calendar_job, TContext = unknown>(params?: RequestParams.MlDeleteCalendarJob, options?: TransportRequestOptions): TransportRequestPromise> - delete_calendar_job, TContext = unknown>(callback: callbackFn): TransportRequestCallback - delete_calendar_job, TContext = unknown>(params: RequestParams.MlDeleteCalendarJob, callback: callbackFn): TransportRequestCallback - delete_calendar_job, TContext = unknown>(params: RequestParams.MlDeleteCalendarJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteCalendarJob, TContext = unknown>(params?: RequestParams.MlDeleteCalendarJob, options?: TransportRequestOptions): TransportRequestPromise> - deleteCalendarJob, TContext = unknown>(callback: callbackFn): TransportRequestCallback - deleteCalendarJob, TContext = unknown>(params: RequestParams.MlDeleteCalendarJob, callback: callbackFn): TransportRequestCallback - deleteCalendarJob, TContext = unknown>(params: RequestParams.MlDeleteCalendarJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_data_frame_analytics, TContext = unknown>(params?: RequestParams.MlDeleteDataFrameAnalytics, options?: TransportRequestOptions): TransportRequestPromise> - delete_data_frame_analytics, TContext = unknown>(callback: callbackFn): TransportRequestCallback - delete_data_frame_analytics, TContext = unknown>(params: RequestParams.MlDeleteDataFrameAnalytics, callback: callbackFn): TransportRequestCallback - delete_data_frame_analytics, TContext = unknown>(params: RequestParams.MlDeleteDataFrameAnalytics, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteDataFrameAnalytics, TContext = unknown>(params?: RequestParams.MlDeleteDataFrameAnalytics, options?: TransportRequestOptions): TransportRequestPromise> - deleteDataFrameAnalytics, TContext = unknown>(callback: callbackFn): TransportRequestCallback - deleteDataFrameAnalytics, TContext = unknown>(params: RequestParams.MlDeleteDataFrameAnalytics, callback: callbackFn): TransportRequestCallback - deleteDataFrameAnalytics, TContext = unknown>(params: RequestParams.MlDeleteDataFrameAnalytics, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_datafeed, TContext = unknown>(params?: RequestParams.MlDeleteDatafeed, options?: TransportRequestOptions): TransportRequestPromise> - delete_datafeed, TContext = unknown>(callback: callbackFn): TransportRequestCallback - delete_datafeed, TContext = unknown>(params: RequestParams.MlDeleteDatafeed, callback: callbackFn): TransportRequestCallback - delete_datafeed, TContext = unknown>(params: RequestParams.MlDeleteDatafeed, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteDatafeed, TContext = unknown>(params?: RequestParams.MlDeleteDatafeed, options?: TransportRequestOptions): TransportRequestPromise> - deleteDatafeed, TContext = unknown>(callback: callbackFn): TransportRequestCallback - deleteDatafeed, TContext = unknown>(params: RequestParams.MlDeleteDatafeed, callback: callbackFn): TransportRequestCallback - deleteDatafeed, TContext = unknown>(params: RequestParams.MlDeleteDatafeed, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_expired_data, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlDeleteExpiredData, options?: TransportRequestOptions): TransportRequestPromise> - delete_expired_data, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - delete_expired_data, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlDeleteExpiredData, callback: callbackFn): TransportRequestCallback - delete_expired_data, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlDeleteExpiredData, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteExpiredData, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlDeleteExpiredData, options?: TransportRequestOptions): TransportRequestPromise> - deleteExpiredData, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - deleteExpiredData, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlDeleteExpiredData, callback: callbackFn): TransportRequestCallback - deleteExpiredData, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlDeleteExpiredData, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_filter, TContext = unknown>(params?: RequestParams.MlDeleteFilter, options?: TransportRequestOptions): TransportRequestPromise> - delete_filter, TContext = unknown>(callback: callbackFn): TransportRequestCallback - delete_filter, TContext = unknown>(params: RequestParams.MlDeleteFilter, callback: callbackFn): TransportRequestCallback - delete_filter, TContext = unknown>(params: RequestParams.MlDeleteFilter, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteFilter, TContext = unknown>(params?: RequestParams.MlDeleteFilter, options?: TransportRequestOptions): TransportRequestPromise> - deleteFilter, TContext = unknown>(callback: callbackFn): TransportRequestCallback - deleteFilter, TContext = unknown>(params: RequestParams.MlDeleteFilter, callback: callbackFn): TransportRequestCallback - deleteFilter, TContext = unknown>(params: RequestParams.MlDeleteFilter, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_forecast, TContext = unknown>(params?: RequestParams.MlDeleteForecast, options?: TransportRequestOptions): TransportRequestPromise> - delete_forecast, TContext = unknown>(callback: callbackFn): TransportRequestCallback - delete_forecast, TContext = unknown>(params: RequestParams.MlDeleteForecast, callback: callbackFn): TransportRequestCallback - delete_forecast, TContext = unknown>(params: RequestParams.MlDeleteForecast, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteForecast, TContext = unknown>(params?: RequestParams.MlDeleteForecast, options?: TransportRequestOptions): TransportRequestPromise> - deleteForecast, TContext = unknown>(callback: callbackFn): TransportRequestCallback - deleteForecast, TContext = unknown>(params: RequestParams.MlDeleteForecast, callback: callbackFn): TransportRequestCallback - deleteForecast, TContext = unknown>(params: RequestParams.MlDeleteForecast, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_job, TContext = unknown>(params?: RequestParams.MlDeleteJob, options?: TransportRequestOptions): TransportRequestPromise> - delete_job, TContext = unknown>(callback: callbackFn): TransportRequestCallback - delete_job, TContext = unknown>(params: RequestParams.MlDeleteJob, callback: callbackFn): TransportRequestCallback - delete_job, TContext = unknown>(params: RequestParams.MlDeleteJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteJob, TContext = unknown>(params?: RequestParams.MlDeleteJob, options?: TransportRequestOptions): TransportRequestPromise> - deleteJob, TContext = unknown>(callback: callbackFn): TransportRequestCallback - deleteJob, TContext = unknown>(params: RequestParams.MlDeleteJob, callback: callbackFn): TransportRequestCallback - deleteJob, TContext = unknown>(params: RequestParams.MlDeleteJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_model_snapshot, TContext = unknown>(params?: RequestParams.MlDeleteModelSnapshot, options?: TransportRequestOptions): TransportRequestPromise> - delete_model_snapshot, TContext = unknown>(callback: callbackFn): TransportRequestCallback - delete_model_snapshot, TContext = unknown>(params: RequestParams.MlDeleteModelSnapshot, callback: callbackFn): TransportRequestCallback - delete_model_snapshot, TContext = unknown>(params: RequestParams.MlDeleteModelSnapshot, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteModelSnapshot, TContext = unknown>(params?: RequestParams.MlDeleteModelSnapshot, options?: TransportRequestOptions): TransportRequestPromise> - deleteModelSnapshot, TContext = unknown>(callback: callbackFn): TransportRequestCallback - deleteModelSnapshot, TContext = unknown>(params: RequestParams.MlDeleteModelSnapshot, callback: callbackFn): TransportRequestCallback - deleteModelSnapshot, TContext = unknown>(params: RequestParams.MlDeleteModelSnapshot, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_trained_model, TContext = unknown>(params?: RequestParams.MlDeleteTrainedModel, options?: TransportRequestOptions): TransportRequestPromise> - delete_trained_model, TContext = unknown>(callback: callbackFn): TransportRequestCallback - delete_trained_model, TContext = unknown>(params: RequestParams.MlDeleteTrainedModel, callback: callbackFn): TransportRequestCallback - delete_trained_model, TContext = unknown>(params: RequestParams.MlDeleteTrainedModel, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteTrainedModel, TContext = unknown>(params?: RequestParams.MlDeleteTrainedModel, options?: TransportRequestOptions): TransportRequestPromise> - deleteTrainedModel, TContext = unknown>(callback: callbackFn): TransportRequestCallback - deleteTrainedModel, TContext = unknown>(params: RequestParams.MlDeleteTrainedModel, callback: callbackFn): TransportRequestCallback - deleteTrainedModel, TContext = unknown>(params: RequestParams.MlDeleteTrainedModel, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - estimate_model_memory, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlEstimateModelMemory, options?: TransportRequestOptions): TransportRequestPromise> - estimate_model_memory, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - estimate_model_memory, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlEstimateModelMemory, callback: callbackFn): TransportRequestCallback - estimate_model_memory, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlEstimateModelMemory, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - estimateModelMemory, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlEstimateModelMemory, options?: TransportRequestOptions): TransportRequestPromise> - estimateModelMemory, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - estimateModelMemory, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlEstimateModelMemory, callback: callbackFn): TransportRequestCallback - estimateModelMemory, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlEstimateModelMemory, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - evaluate_data_frame, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlEvaluateDataFrame, options?: TransportRequestOptions): TransportRequestPromise> - evaluate_data_frame, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - evaluate_data_frame, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlEvaluateDataFrame, callback: callbackFn): TransportRequestCallback - evaluate_data_frame, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlEvaluateDataFrame, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - evaluateDataFrame, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlEvaluateDataFrame, options?: TransportRequestOptions): TransportRequestPromise> - evaluateDataFrame, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - evaluateDataFrame, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlEvaluateDataFrame, callback: callbackFn): TransportRequestCallback - evaluateDataFrame, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlEvaluateDataFrame, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - explain_data_frame_analytics, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlExplainDataFrameAnalytics, options?: TransportRequestOptions): TransportRequestPromise> - explain_data_frame_analytics, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - explain_data_frame_analytics, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlExplainDataFrameAnalytics, callback: callbackFn): TransportRequestCallback - explain_data_frame_analytics, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlExplainDataFrameAnalytics, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - explainDataFrameAnalytics, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlExplainDataFrameAnalytics, options?: TransportRequestOptions): TransportRequestPromise> - explainDataFrameAnalytics, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - explainDataFrameAnalytics, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlExplainDataFrameAnalytics, callback: callbackFn): TransportRequestCallback - explainDataFrameAnalytics, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlExplainDataFrameAnalytics, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - find_file_structure, TRequestBody extends RequestNDBody = Record[], TContext = unknown>(params?: RequestParams.MlFindFileStructure, options?: TransportRequestOptions): TransportRequestPromise> - find_file_structure, TRequestBody extends RequestNDBody = Record[], TContext = unknown>(callback: callbackFn): TransportRequestCallback - find_file_structure, TRequestBody extends RequestNDBody = Record[], TContext = unknown>(params: RequestParams.MlFindFileStructure, callback: callbackFn): TransportRequestCallback - find_file_structure, TRequestBody extends RequestNDBody = Record[], TContext = unknown>(params: RequestParams.MlFindFileStructure, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - findFileStructure, TRequestBody extends RequestNDBody = Record[], TContext = unknown>(params?: RequestParams.MlFindFileStructure, options?: TransportRequestOptions): TransportRequestPromise> - findFileStructure, TRequestBody extends RequestNDBody = Record[], TContext = unknown>(callback: callbackFn): TransportRequestCallback - findFileStructure, TRequestBody extends RequestNDBody = Record[], TContext = unknown>(params: RequestParams.MlFindFileStructure, callback: callbackFn): TransportRequestCallback - findFileStructure, TRequestBody extends RequestNDBody = Record[], TContext = unknown>(params: RequestParams.MlFindFileStructure, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - flush_job, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlFlushJob, options?: TransportRequestOptions): TransportRequestPromise> - flush_job, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - flush_job, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlFlushJob, callback: callbackFn): TransportRequestCallback - flush_job, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlFlushJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - flushJob, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlFlushJob, options?: TransportRequestOptions): TransportRequestPromise> - flushJob, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - flushJob, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlFlushJob, callback: callbackFn): TransportRequestCallback - flushJob, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlFlushJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - forecast, TContext = unknown>(params?: RequestParams.MlForecast, options?: TransportRequestOptions): TransportRequestPromise> - forecast, TContext = unknown>(callback: callbackFn): TransportRequestCallback - forecast, TContext = unknown>(params: RequestParams.MlForecast, callback: callbackFn): TransportRequestCallback - forecast, TContext = unknown>(params: RequestParams.MlForecast, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_buckets, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlGetBuckets, options?: TransportRequestOptions): TransportRequestPromise> - get_buckets, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_buckets, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlGetBuckets, callback: callbackFn): TransportRequestCallback - get_buckets, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlGetBuckets, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getBuckets, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlGetBuckets, options?: TransportRequestOptions): TransportRequestPromise> - getBuckets, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getBuckets, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlGetBuckets, callback: callbackFn): TransportRequestCallback - getBuckets, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlGetBuckets, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_calendar_events, TContext = unknown>(params?: RequestParams.MlGetCalendarEvents, options?: TransportRequestOptions): TransportRequestPromise> - get_calendar_events, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_calendar_events, TContext = unknown>(params: RequestParams.MlGetCalendarEvents, callback: callbackFn): TransportRequestCallback - get_calendar_events, TContext = unknown>(params: RequestParams.MlGetCalendarEvents, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getCalendarEvents, TContext = unknown>(params?: RequestParams.MlGetCalendarEvents, options?: TransportRequestOptions): TransportRequestPromise> - getCalendarEvents, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getCalendarEvents, TContext = unknown>(params: RequestParams.MlGetCalendarEvents, callback: callbackFn): TransportRequestCallback - getCalendarEvents, TContext = unknown>(params: RequestParams.MlGetCalendarEvents, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_calendars, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlGetCalendars, options?: TransportRequestOptions): TransportRequestPromise> - get_calendars, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_calendars, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlGetCalendars, callback: callbackFn): TransportRequestCallback - get_calendars, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlGetCalendars, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getCalendars, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlGetCalendars, options?: TransportRequestOptions): TransportRequestPromise> - getCalendars, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getCalendars, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlGetCalendars, callback: callbackFn): TransportRequestCallback - getCalendars, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlGetCalendars, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_categories, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlGetCategories, options?: TransportRequestOptions): TransportRequestPromise> - get_categories, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_categories, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlGetCategories, callback: callbackFn): TransportRequestCallback - get_categories, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlGetCategories, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getCategories, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlGetCategories, options?: TransportRequestOptions): TransportRequestPromise> - getCategories, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getCategories, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlGetCategories, callback: callbackFn): TransportRequestCallback - getCategories, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlGetCategories, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_data_frame_analytics, TContext = unknown>(params?: RequestParams.MlGetDataFrameAnalytics, options?: TransportRequestOptions): TransportRequestPromise> - get_data_frame_analytics, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_data_frame_analytics, TContext = unknown>(params: RequestParams.MlGetDataFrameAnalytics, callback: callbackFn): TransportRequestCallback - get_data_frame_analytics, TContext = unknown>(params: RequestParams.MlGetDataFrameAnalytics, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getDataFrameAnalytics, TContext = unknown>(params?: RequestParams.MlGetDataFrameAnalytics, options?: TransportRequestOptions): TransportRequestPromise> - getDataFrameAnalytics, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getDataFrameAnalytics, TContext = unknown>(params: RequestParams.MlGetDataFrameAnalytics, callback: callbackFn): TransportRequestCallback - getDataFrameAnalytics, TContext = unknown>(params: RequestParams.MlGetDataFrameAnalytics, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_data_frame_analytics_stats, TContext = unknown>(params?: RequestParams.MlGetDataFrameAnalyticsStats, options?: TransportRequestOptions): TransportRequestPromise> - get_data_frame_analytics_stats, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_data_frame_analytics_stats, TContext = unknown>(params: RequestParams.MlGetDataFrameAnalyticsStats, callback: callbackFn): TransportRequestCallback - get_data_frame_analytics_stats, TContext = unknown>(params: RequestParams.MlGetDataFrameAnalyticsStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getDataFrameAnalyticsStats, TContext = unknown>(params?: RequestParams.MlGetDataFrameAnalyticsStats, options?: TransportRequestOptions): TransportRequestPromise> - getDataFrameAnalyticsStats, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getDataFrameAnalyticsStats, TContext = unknown>(params: RequestParams.MlGetDataFrameAnalyticsStats, callback: callbackFn): TransportRequestCallback - getDataFrameAnalyticsStats, TContext = unknown>(params: RequestParams.MlGetDataFrameAnalyticsStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_datafeed_stats, TContext = unknown>(params?: RequestParams.MlGetDatafeedStats, options?: TransportRequestOptions): TransportRequestPromise> - get_datafeed_stats, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_datafeed_stats, TContext = unknown>(params: RequestParams.MlGetDatafeedStats, callback: callbackFn): TransportRequestCallback - get_datafeed_stats, TContext = unknown>(params: RequestParams.MlGetDatafeedStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getDatafeedStats, TContext = unknown>(params?: RequestParams.MlGetDatafeedStats, options?: TransportRequestOptions): TransportRequestPromise> - getDatafeedStats, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getDatafeedStats, TContext = unknown>(params: RequestParams.MlGetDatafeedStats, callback: callbackFn): TransportRequestCallback - getDatafeedStats, TContext = unknown>(params: RequestParams.MlGetDatafeedStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_datafeeds, TContext = unknown>(params?: RequestParams.MlGetDatafeeds, options?: TransportRequestOptions): TransportRequestPromise> - get_datafeeds, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_datafeeds, TContext = unknown>(params: RequestParams.MlGetDatafeeds, callback: callbackFn): TransportRequestCallback - get_datafeeds, TContext = unknown>(params: RequestParams.MlGetDatafeeds, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getDatafeeds, TContext = unknown>(params?: RequestParams.MlGetDatafeeds, options?: TransportRequestOptions): TransportRequestPromise> - getDatafeeds, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getDatafeeds, TContext = unknown>(params: RequestParams.MlGetDatafeeds, callback: callbackFn): TransportRequestCallback - getDatafeeds, TContext = unknown>(params: RequestParams.MlGetDatafeeds, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_filters, TContext = unknown>(params?: RequestParams.MlGetFilters, options?: TransportRequestOptions): TransportRequestPromise> - get_filters, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_filters, TContext = unknown>(params: RequestParams.MlGetFilters, callback: callbackFn): TransportRequestCallback - get_filters, TContext = unknown>(params: RequestParams.MlGetFilters, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getFilters, TContext = unknown>(params?: RequestParams.MlGetFilters, options?: TransportRequestOptions): TransportRequestPromise> - getFilters, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getFilters, TContext = unknown>(params: RequestParams.MlGetFilters, callback: callbackFn): TransportRequestCallback - getFilters, TContext = unknown>(params: RequestParams.MlGetFilters, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_influencers, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlGetInfluencers, options?: TransportRequestOptions): TransportRequestPromise> - get_influencers, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_influencers, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlGetInfluencers, callback: callbackFn): TransportRequestCallback - get_influencers, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlGetInfluencers, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getInfluencers, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlGetInfluencers, options?: TransportRequestOptions): TransportRequestPromise> - getInfluencers, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getInfluencers, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlGetInfluencers, callback: callbackFn): TransportRequestCallback - getInfluencers, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlGetInfluencers, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_job_stats, TContext = unknown>(params?: RequestParams.MlGetJobStats, options?: TransportRequestOptions): TransportRequestPromise> - get_job_stats, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_job_stats, TContext = unknown>(params: RequestParams.MlGetJobStats, callback: callbackFn): TransportRequestCallback - get_job_stats, TContext = unknown>(params: RequestParams.MlGetJobStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getJobStats, TContext = unknown>(params?: RequestParams.MlGetJobStats, options?: TransportRequestOptions): TransportRequestPromise> - getJobStats, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getJobStats, TContext = unknown>(params: RequestParams.MlGetJobStats, callback: callbackFn): TransportRequestCallback - getJobStats, TContext = unknown>(params: RequestParams.MlGetJobStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_jobs, TContext = unknown>(params?: RequestParams.MlGetJobs, options?: TransportRequestOptions): TransportRequestPromise> - get_jobs, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_jobs, TContext = unknown>(params: RequestParams.MlGetJobs, callback: callbackFn): TransportRequestCallback - get_jobs, TContext = unknown>(params: RequestParams.MlGetJobs, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getJobs, TContext = unknown>(params?: RequestParams.MlGetJobs, options?: TransportRequestOptions): TransportRequestPromise> - getJobs, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getJobs, TContext = unknown>(params: RequestParams.MlGetJobs, callback: callbackFn): TransportRequestCallback - getJobs, TContext = unknown>(params: RequestParams.MlGetJobs, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_model_snapshots, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlGetModelSnapshots, options?: TransportRequestOptions): TransportRequestPromise> - get_model_snapshots, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_model_snapshots, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlGetModelSnapshots, callback: callbackFn): TransportRequestCallback - get_model_snapshots, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlGetModelSnapshots, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getModelSnapshots, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlGetModelSnapshots, options?: TransportRequestOptions): TransportRequestPromise> - getModelSnapshots, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getModelSnapshots, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlGetModelSnapshots, callback: callbackFn): TransportRequestCallback - getModelSnapshots, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlGetModelSnapshots, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_overall_buckets, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlGetOverallBuckets, options?: TransportRequestOptions): TransportRequestPromise> - get_overall_buckets, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_overall_buckets, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlGetOverallBuckets, callback: callbackFn): TransportRequestCallback - get_overall_buckets, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlGetOverallBuckets, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getOverallBuckets, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlGetOverallBuckets, options?: TransportRequestOptions): TransportRequestPromise> - getOverallBuckets, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getOverallBuckets, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlGetOverallBuckets, callback: callbackFn): TransportRequestCallback - getOverallBuckets, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlGetOverallBuckets, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_records, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlGetRecords, options?: TransportRequestOptions): TransportRequestPromise> - get_records, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_records, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlGetRecords, callback: callbackFn): TransportRequestCallback - get_records, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlGetRecords, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getRecords, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlGetRecords, options?: TransportRequestOptions): TransportRequestPromise> - getRecords, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getRecords, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlGetRecords, callback: callbackFn): TransportRequestCallback - getRecords, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlGetRecords, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_trained_models, TContext = unknown>(params?: RequestParams.MlGetTrainedModels, options?: TransportRequestOptions): TransportRequestPromise> - get_trained_models, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_trained_models, TContext = unknown>(params: RequestParams.MlGetTrainedModels, callback: callbackFn): TransportRequestCallback - get_trained_models, TContext = unknown>(params: RequestParams.MlGetTrainedModels, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getTrainedModels, TContext = unknown>(params?: RequestParams.MlGetTrainedModels, options?: TransportRequestOptions): TransportRequestPromise> - getTrainedModels, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getTrainedModels, TContext = unknown>(params: RequestParams.MlGetTrainedModels, callback: callbackFn): TransportRequestCallback - getTrainedModels, TContext = unknown>(params: RequestParams.MlGetTrainedModels, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_trained_models_stats, TContext = unknown>(params?: RequestParams.MlGetTrainedModelsStats, options?: TransportRequestOptions): TransportRequestPromise> - get_trained_models_stats, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_trained_models_stats, TContext = unknown>(params: RequestParams.MlGetTrainedModelsStats, callback: callbackFn): TransportRequestCallback - get_trained_models_stats, TContext = unknown>(params: RequestParams.MlGetTrainedModelsStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getTrainedModelsStats, TContext = unknown>(params?: RequestParams.MlGetTrainedModelsStats, options?: TransportRequestOptions): TransportRequestPromise> - getTrainedModelsStats, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getTrainedModelsStats, TContext = unknown>(params: RequestParams.MlGetTrainedModelsStats, callback: callbackFn): TransportRequestCallback - getTrainedModelsStats, TContext = unknown>(params: RequestParams.MlGetTrainedModelsStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - info, TContext = unknown>(params?: RequestParams.MlInfo, options?: TransportRequestOptions): TransportRequestPromise> - info, TContext = unknown>(callback: callbackFn): TransportRequestCallback - info, TContext = unknown>(params: RequestParams.MlInfo, callback: callbackFn): TransportRequestCallback - info, TContext = unknown>(params: RequestParams.MlInfo, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - open_job, TContext = unknown>(params?: RequestParams.MlOpenJob, options?: TransportRequestOptions): TransportRequestPromise> - open_job, TContext = unknown>(callback: callbackFn): TransportRequestCallback - open_job, TContext = unknown>(params: RequestParams.MlOpenJob, callback: callbackFn): TransportRequestCallback - open_job, TContext = unknown>(params: RequestParams.MlOpenJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - openJob, TContext = unknown>(params?: RequestParams.MlOpenJob, options?: TransportRequestOptions): TransportRequestPromise> - openJob, TContext = unknown>(callback: callbackFn): TransportRequestCallback - openJob, TContext = unknown>(params: RequestParams.MlOpenJob, callback: callbackFn): TransportRequestCallback - openJob, TContext = unknown>(params: RequestParams.MlOpenJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - post_calendar_events, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlPostCalendarEvents, options?: TransportRequestOptions): TransportRequestPromise> - post_calendar_events, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - post_calendar_events, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlPostCalendarEvents, callback: callbackFn): TransportRequestCallback - post_calendar_events, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlPostCalendarEvents, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - postCalendarEvents, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlPostCalendarEvents, options?: TransportRequestOptions): TransportRequestPromise> - postCalendarEvents, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - postCalendarEvents, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlPostCalendarEvents, callback: callbackFn): TransportRequestCallback - postCalendarEvents, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlPostCalendarEvents, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - post_data, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlPostData, options?: TransportRequestOptions): TransportRequestPromise> - post_data, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - post_data, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlPostData, callback: callbackFn): TransportRequestCallback - post_data, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlPostData, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - postData, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlPostData, options?: TransportRequestOptions): TransportRequestPromise> - postData, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - postData, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlPostData, callback: callbackFn): TransportRequestCallback - postData, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlPostData, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - preview_datafeed, TContext = unknown>(params?: RequestParams.MlPreviewDatafeed, options?: TransportRequestOptions): TransportRequestPromise> - preview_datafeed, TContext = unknown>(callback: callbackFn): TransportRequestCallback - preview_datafeed, TContext = unknown>(params: RequestParams.MlPreviewDatafeed, callback: callbackFn): TransportRequestCallback - preview_datafeed, TContext = unknown>(params: RequestParams.MlPreviewDatafeed, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - previewDatafeed, TContext = unknown>(params?: RequestParams.MlPreviewDatafeed, options?: TransportRequestOptions): TransportRequestPromise> - previewDatafeed, TContext = unknown>(callback: callbackFn): TransportRequestCallback - previewDatafeed, TContext = unknown>(params: RequestParams.MlPreviewDatafeed, callback: callbackFn): TransportRequestCallback - previewDatafeed, TContext = unknown>(params: RequestParams.MlPreviewDatafeed, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_calendar, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlPutCalendar, options?: TransportRequestOptions): TransportRequestPromise> - put_calendar, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - put_calendar, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlPutCalendar, callback: callbackFn): TransportRequestCallback - put_calendar, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlPutCalendar, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putCalendar, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlPutCalendar, options?: TransportRequestOptions): TransportRequestPromise> - putCalendar, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - putCalendar, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlPutCalendar, callback: callbackFn): TransportRequestCallback - putCalendar, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlPutCalendar, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_calendar_job, TContext = unknown>(params?: RequestParams.MlPutCalendarJob, options?: TransportRequestOptions): TransportRequestPromise> - put_calendar_job, TContext = unknown>(callback: callbackFn): TransportRequestCallback - put_calendar_job, TContext = unknown>(params: RequestParams.MlPutCalendarJob, callback: callbackFn): TransportRequestCallback - put_calendar_job, TContext = unknown>(params: RequestParams.MlPutCalendarJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putCalendarJob, TContext = unknown>(params?: RequestParams.MlPutCalendarJob, options?: TransportRequestOptions): TransportRequestPromise> - putCalendarJob, TContext = unknown>(callback: callbackFn): TransportRequestCallback - putCalendarJob, TContext = unknown>(params: RequestParams.MlPutCalendarJob, callback: callbackFn): TransportRequestCallback - putCalendarJob, TContext = unknown>(params: RequestParams.MlPutCalendarJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_data_frame_analytics, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlPutDataFrameAnalytics, options?: TransportRequestOptions): TransportRequestPromise> - put_data_frame_analytics, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - put_data_frame_analytics, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlPutDataFrameAnalytics, callback: callbackFn): TransportRequestCallback - put_data_frame_analytics, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlPutDataFrameAnalytics, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putDataFrameAnalytics, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlPutDataFrameAnalytics, options?: TransportRequestOptions): TransportRequestPromise> - putDataFrameAnalytics, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - putDataFrameAnalytics, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlPutDataFrameAnalytics, callback: callbackFn): TransportRequestCallback - putDataFrameAnalytics, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlPutDataFrameAnalytics, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_datafeed, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlPutDatafeed, options?: TransportRequestOptions): TransportRequestPromise> - put_datafeed, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - put_datafeed, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlPutDatafeed, callback: callbackFn): TransportRequestCallback - put_datafeed, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlPutDatafeed, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putDatafeed, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlPutDatafeed, options?: TransportRequestOptions): TransportRequestPromise> - putDatafeed, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - putDatafeed, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlPutDatafeed, callback: callbackFn): TransportRequestCallback - putDatafeed, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlPutDatafeed, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_filter, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlPutFilter, options?: TransportRequestOptions): TransportRequestPromise> - put_filter, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - put_filter, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlPutFilter, callback: callbackFn): TransportRequestCallback - put_filter, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlPutFilter, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putFilter, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlPutFilter, options?: TransportRequestOptions): TransportRequestPromise> - putFilter, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - putFilter, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlPutFilter, callback: callbackFn): TransportRequestCallback - putFilter, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlPutFilter, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_job, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlPutJob, options?: TransportRequestOptions): TransportRequestPromise> - put_job, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - put_job, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlPutJob, callback: callbackFn): TransportRequestCallback - put_job, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlPutJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putJob, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlPutJob, options?: TransportRequestOptions): TransportRequestPromise> - putJob, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - putJob, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlPutJob, callback: callbackFn): TransportRequestCallback - putJob, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlPutJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_trained_model, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlPutTrainedModel, options?: TransportRequestOptions): TransportRequestPromise> - put_trained_model, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - put_trained_model, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlPutTrainedModel, callback: callbackFn): TransportRequestCallback - put_trained_model, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlPutTrainedModel, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putTrainedModel, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlPutTrainedModel, options?: TransportRequestOptions): TransportRequestPromise> - putTrainedModel, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - putTrainedModel, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlPutTrainedModel, callback: callbackFn): TransportRequestCallback - putTrainedModel, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlPutTrainedModel, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - revert_model_snapshot, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlRevertModelSnapshot, options?: TransportRequestOptions): TransportRequestPromise> - revert_model_snapshot, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - revert_model_snapshot, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlRevertModelSnapshot, callback: callbackFn): TransportRequestCallback - revert_model_snapshot, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlRevertModelSnapshot, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - revertModelSnapshot, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlRevertModelSnapshot, options?: TransportRequestOptions): TransportRequestPromise> - revertModelSnapshot, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - revertModelSnapshot, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlRevertModelSnapshot, callback: callbackFn): TransportRequestCallback - revertModelSnapshot, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlRevertModelSnapshot, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - set_upgrade_mode, TContext = unknown>(params?: RequestParams.MlSetUpgradeMode, options?: TransportRequestOptions): TransportRequestPromise> - set_upgrade_mode, TContext = unknown>(callback: callbackFn): TransportRequestCallback - set_upgrade_mode, TContext = unknown>(params: RequestParams.MlSetUpgradeMode, callback: callbackFn): TransportRequestCallback - set_upgrade_mode, TContext = unknown>(params: RequestParams.MlSetUpgradeMode, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - setUpgradeMode, TContext = unknown>(params?: RequestParams.MlSetUpgradeMode, options?: TransportRequestOptions): TransportRequestPromise> - setUpgradeMode, TContext = unknown>(callback: callbackFn): TransportRequestCallback - setUpgradeMode, TContext = unknown>(params: RequestParams.MlSetUpgradeMode, callback: callbackFn): TransportRequestCallback - setUpgradeMode, TContext = unknown>(params: RequestParams.MlSetUpgradeMode, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - start_data_frame_analytics, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlStartDataFrameAnalytics, options?: TransportRequestOptions): TransportRequestPromise> - start_data_frame_analytics, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - start_data_frame_analytics, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlStartDataFrameAnalytics, callback: callbackFn): TransportRequestCallback - start_data_frame_analytics, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlStartDataFrameAnalytics, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - startDataFrameAnalytics, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlStartDataFrameAnalytics, options?: TransportRequestOptions): TransportRequestPromise> - startDataFrameAnalytics, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - startDataFrameAnalytics, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlStartDataFrameAnalytics, callback: callbackFn): TransportRequestCallback - startDataFrameAnalytics, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlStartDataFrameAnalytics, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - start_datafeed, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlStartDatafeed, options?: TransportRequestOptions): TransportRequestPromise> - start_datafeed, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - start_datafeed, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlStartDatafeed, callback: callbackFn): TransportRequestCallback - start_datafeed, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlStartDatafeed, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - startDatafeed, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlStartDatafeed, options?: TransportRequestOptions): TransportRequestPromise> - startDatafeed, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - startDatafeed, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlStartDatafeed, callback: callbackFn): TransportRequestCallback - startDatafeed, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlStartDatafeed, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stop_data_frame_analytics, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlStopDataFrameAnalytics, options?: TransportRequestOptions): TransportRequestPromise> - stop_data_frame_analytics, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - stop_data_frame_analytics, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlStopDataFrameAnalytics, callback: callbackFn): TransportRequestCallback - stop_data_frame_analytics, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlStopDataFrameAnalytics, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stopDataFrameAnalytics, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlStopDataFrameAnalytics, options?: TransportRequestOptions): TransportRequestPromise> - stopDataFrameAnalytics, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - stopDataFrameAnalytics, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlStopDataFrameAnalytics, callback: callbackFn): TransportRequestCallback - stopDataFrameAnalytics, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlStopDataFrameAnalytics, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stop_datafeed, TContext = unknown>(params?: RequestParams.MlStopDatafeed, options?: TransportRequestOptions): TransportRequestPromise> - stop_datafeed, TContext = unknown>(callback: callbackFn): TransportRequestCallback - stop_datafeed, TContext = unknown>(params: RequestParams.MlStopDatafeed, callback: callbackFn): TransportRequestCallback - stop_datafeed, TContext = unknown>(params: RequestParams.MlStopDatafeed, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stopDatafeed, TContext = unknown>(params?: RequestParams.MlStopDatafeed, options?: TransportRequestOptions): TransportRequestPromise> - stopDatafeed, TContext = unknown>(callback: callbackFn): TransportRequestCallback - stopDatafeed, TContext = unknown>(params: RequestParams.MlStopDatafeed, callback: callbackFn): TransportRequestCallback - stopDatafeed, TContext = unknown>(params: RequestParams.MlStopDatafeed, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - update_datafeed, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlUpdateDatafeed, options?: TransportRequestOptions): TransportRequestPromise> - update_datafeed, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - update_datafeed, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlUpdateDatafeed, callback: callbackFn): TransportRequestCallback - update_datafeed, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlUpdateDatafeed, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - updateDatafeed, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlUpdateDatafeed, options?: TransportRequestOptions): TransportRequestPromise> - updateDatafeed, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - updateDatafeed, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlUpdateDatafeed, callback: callbackFn): TransportRequestCallback - updateDatafeed, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlUpdateDatafeed, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - update_filter, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlUpdateFilter, options?: TransportRequestOptions): TransportRequestPromise> - update_filter, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - update_filter, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlUpdateFilter, callback: callbackFn): TransportRequestCallback - update_filter, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlUpdateFilter, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - updateFilter, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlUpdateFilter, options?: TransportRequestOptions): TransportRequestPromise> - updateFilter, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - updateFilter, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlUpdateFilter, callback: callbackFn): TransportRequestCallback - updateFilter, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlUpdateFilter, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - update_job, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlUpdateJob, options?: TransportRequestOptions): TransportRequestPromise> - update_job, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - update_job, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlUpdateJob, callback: callbackFn): TransportRequestCallback - update_job, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlUpdateJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - updateJob, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlUpdateJob, options?: TransportRequestOptions): TransportRequestPromise> - updateJob, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - updateJob, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlUpdateJob, callback: callbackFn): TransportRequestCallback - updateJob, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlUpdateJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - update_model_snapshot, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlUpdateModelSnapshot, options?: TransportRequestOptions): TransportRequestPromise> - update_model_snapshot, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - update_model_snapshot, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlUpdateModelSnapshot, callback: callbackFn): TransportRequestCallback - update_model_snapshot, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlUpdateModelSnapshot, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - updateModelSnapshot, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlUpdateModelSnapshot, options?: TransportRequestOptions): TransportRequestPromise> - updateModelSnapshot, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - updateModelSnapshot, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlUpdateModelSnapshot, callback: callbackFn): TransportRequestCallback - updateModelSnapshot, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlUpdateModelSnapshot, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - validate, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlValidate, options?: TransportRequestOptions): TransportRequestPromise> - validate, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - validate, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlValidate, callback: callbackFn): TransportRequestCallback - validate, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlValidate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - validate_detector, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlValidateDetector, options?: TransportRequestOptions): TransportRequestPromise> - validate_detector, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - validate_detector, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlValidateDetector, callback: callbackFn): TransportRequestCallback - validate_detector, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlValidateDetector, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - validateDetector, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.MlValidateDetector, options?: TransportRequestOptions): TransportRequestPromise> - validateDetector, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - validateDetector, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlValidateDetector, callback: callbackFn): TransportRequestCallback - validateDetector, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.MlValidateDetector, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - monitoring: { - bulk, TRequestBody extends RequestNDBody = Record[], TContext = unknown>(params?: RequestParams.MonitoringBulk, options?: TransportRequestOptions): TransportRequestPromise> - bulk, TRequestBody extends RequestNDBody = Record[], TContext = unknown>(callback: callbackFn): TransportRequestCallback - bulk, TRequestBody extends RequestNDBody = Record[], TContext = unknown>(params: RequestParams.MonitoringBulk, callback: callbackFn): TransportRequestCallback - bulk, TRequestBody extends RequestNDBody = Record[], TContext = unknown>(params: RequestParams.MonitoringBulk, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - msearch, TRequestBody extends RequestNDBody = Record[], TContext = unknown>(params?: RequestParams.Msearch, options?: TransportRequestOptions): TransportRequestPromise> - msearch, TRequestBody extends RequestNDBody = Record[], TContext = unknown>(callback: callbackFn): TransportRequestCallback - msearch, TRequestBody extends RequestNDBody = Record[], TContext = unknown>(params: RequestParams.Msearch, callback: callbackFn): TransportRequestCallback - msearch, TRequestBody extends RequestNDBody = Record[], TContext = unknown>(params: RequestParams.Msearch, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - msearch_template, TRequestBody extends RequestNDBody = Record[], TContext = unknown>(params?: RequestParams.MsearchTemplate, options?: TransportRequestOptions): TransportRequestPromise> - msearch_template, TRequestBody extends RequestNDBody = Record[], TContext = unknown>(callback: callbackFn): TransportRequestCallback - msearch_template, TRequestBody extends RequestNDBody = Record[], TContext = unknown>(params: RequestParams.MsearchTemplate, callback: callbackFn): TransportRequestCallback - msearch_template, TRequestBody extends RequestNDBody = Record[], TContext = unknown>(params: RequestParams.MsearchTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - msearchTemplate, TRequestBody extends RequestNDBody = Record[], TContext = unknown>(params?: RequestParams.MsearchTemplate, options?: TransportRequestOptions): TransportRequestPromise> - msearchTemplate, TRequestBody extends RequestNDBody = Record[], TContext = unknown>(callback: callbackFn): TransportRequestCallback - msearchTemplate, TRequestBody extends RequestNDBody = Record[], TContext = unknown>(params: RequestParams.MsearchTemplate, callback: callbackFn): TransportRequestCallback - msearchTemplate, TRequestBody extends RequestNDBody = Record[], TContext = unknown>(params: RequestParams.MsearchTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - mtermvectors, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.Mtermvectors, options?: TransportRequestOptions): TransportRequestPromise> - mtermvectors, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - mtermvectors, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.Mtermvectors, callback: callbackFn): TransportRequestCallback - mtermvectors, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.Mtermvectors, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - nodes: { - hot_threads, TContext = unknown>(params?: RequestParams.NodesHotThreads, options?: TransportRequestOptions): TransportRequestPromise> - hot_threads, TContext = unknown>(callback: callbackFn): TransportRequestCallback - hot_threads, TContext = unknown>(params: RequestParams.NodesHotThreads, callback: callbackFn): TransportRequestCallback - hot_threads, TContext = unknown>(params: RequestParams.NodesHotThreads, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - hotThreads, TContext = unknown>(params?: RequestParams.NodesHotThreads, options?: TransportRequestOptions): TransportRequestPromise> - hotThreads, TContext = unknown>(callback: callbackFn): TransportRequestCallback - hotThreads, TContext = unknown>(params: RequestParams.NodesHotThreads, callback: callbackFn): TransportRequestCallback - hotThreads, TContext = unknown>(params: RequestParams.NodesHotThreads, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - info, TContext = unknown>(params?: RequestParams.NodesInfo, options?: TransportRequestOptions): TransportRequestPromise> - info, TContext = unknown>(callback: callbackFn): TransportRequestCallback - info, TContext = unknown>(params: RequestParams.NodesInfo, callback: callbackFn): TransportRequestCallback - info, TContext = unknown>(params: RequestParams.NodesInfo, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - reload_secure_settings, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.NodesReloadSecureSettings, options?: TransportRequestOptions): TransportRequestPromise> - reload_secure_settings, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - reload_secure_settings, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.NodesReloadSecureSettings, callback: callbackFn): TransportRequestCallback - reload_secure_settings, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.NodesReloadSecureSettings, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - reloadSecureSettings, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.NodesReloadSecureSettings, options?: TransportRequestOptions): TransportRequestPromise> - reloadSecureSettings, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - reloadSecureSettings, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.NodesReloadSecureSettings, callback: callbackFn): TransportRequestCallback - reloadSecureSettings, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.NodesReloadSecureSettings, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stats, TContext = unknown>(params?: RequestParams.NodesStats, options?: TransportRequestOptions): TransportRequestPromise> - stats, TContext = unknown>(callback: callbackFn): TransportRequestCallback - stats, TContext = unknown>(params: RequestParams.NodesStats, callback: callbackFn): TransportRequestCallback - stats, TContext = unknown>(params: RequestParams.NodesStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - usage, TContext = unknown>(params?: RequestParams.NodesUsage, options?: TransportRequestOptions): TransportRequestPromise> - usage, TContext = unknown>(callback: callbackFn): TransportRequestCallback - usage, TContext = unknown>(params: RequestParams.NodesUsage, callback: callbackFn): TransportRequestCallback - usage, TContext = unknown>(params: RequestParams.NodesUsage, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - ping, TContext = unknown>(params?: RequestParams.Ping, options?: TransportRequestOptions): TransportRequestPromise> - ping, TContext = unknown>(callback: callbackFn): TransportRequestCallback - ping, TContext = unknown>(params: RequestParams.Ping, callback: callbackFn): TransportRequestCallback - ping, TContext = unknown>(params: RequestParams.Ping, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_script, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.PutScript, options?: TransportRequestOptions): TransportRequestPromise> - put_script, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - put_script, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.PutScript, callback: callbackFn): TransportRequestCallback - put_script, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.PutScript, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putScript, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.PutScript, options?: TransportRequestOptions): TransportRequestPromise> - putScript, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - putScript, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.PutScript, callback: callbackFn): TransportRequestCallback - putScript, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.PutScript, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - rank_eval, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.RankEval, options?: TransportRequestOptions): TransportRequestPromise> - rank_eval, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - rank_eval, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.RankEval, callback: callbackFn): TransportRequestCallback - rank_eval, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.RankEval, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - rankEval, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.RankEval, options?: TransportRequestOptions): TransportRequestPromise> - rankEval, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - rankEval, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.RankEval, callback: callbackFn): TransportRequestCallback - rankEval, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.RankEval, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - reindex, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.Reindex, options?: TransportRequestOptions): TransportRequestPromise> - reindex, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - reindex, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.Reindex, callback: callbackFn): TransportRequestCallback - reindex, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.Reindex, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - reindex_rethrottle, TContext = unknown>(params?: RequestParams.ReindexRethrottle, options?: TransportRequestOptions): TransportRequestPromise> - reindex_rethrottle, TContext = unknown>(callback: callbackFn): TransportRequestCallback - reindex_rethrottle, TContext = unknown>(params: RequestParams.ReindexRethrottle, callback: callbackFn): TransportRequestCallback - reindex_rethrottle, TContext = unknown>(params: RequestParams.ReindexRethrottle, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - reindexRethrottle, TContext = unknown>(params?: RequestParams.ReindexRethrottle, options?: TransportRequestOptions): TransportRequestPromise> - reindexRethrottle, TContext = unknown>(callback: callbackFn): TransportRequestCallback - reindexRethrottle, TContext = unknown>(params: RequestParams.ReindexRethrottle, callback: callbackFn): TransportRequestCallback - reindexRethrottle, TContext = unknown>(params: RequestParams.ReindexRethrottle, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - render_search_template, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.RenderSearchTemplate, options?: TransportRequestOptions): TransportRequestPromise> - render_search_template, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - render_search_template, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.RenderSearchTemplate, callback: callbackFn): TransportRequestCallback - render_search_template, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.RenderSearchTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - renderSearchTemplate, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.RenderSearchTemplate, options?: TransportRequestOptions): TransportRequestPromise> - renderSearchTemplate, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - renderSearchTemplate, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.RenderSearchTemplate, callback: callbackFn): TransportRequestCallback - renderSearchTemplate, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.RenderSearchTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - rollup: { - delete_job, TContext = unknown>(params?: RequestParams.RollupDeleteJob, options?: TransportRequestOptions): TransportRequestPromise> - delete_job, TContext = unknown>(callback: callbackFn): TransportRequestCallback - delete_job, TContext = unknown>(params: RequestParams.RollupDeleteJob, callback: callbackFn): TransportRequestCallback - delete_job, TContext = unknown>(params: RequestParams.RollupDeleteJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteJob, TContext = unknown>(params?: RequestParams.RollupDeleteJob, options?: TransportRequestOptions): TransportRequestPromise> - deleteJob, TContext = unknown>(callback: callbackFn): TransportRequestCallback - deleteJob, TContext = unknown>(params: RequestParams.RollupDeleteJob, callback: callbackFn): TransportRequestCallback - deleteJob, TContext = unknown>(params: RequestParams.RollupDeleteJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_jobs, TContext = unknown>(params?: RequestParams.RollupGetJobs, options?: TransportRequestOptions): TransportRequestPromise> - get_jobs, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_jobs, TContext = unknown>(params: RequestParams.RollupGetJobs, callback: callbackFn): TransportRequestCallback - get_jobs, TContext = unknown>(params: RequestParams.RollupGetJobs, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getJobs, TContext = unknown>(params?: RequestParams.RollupGetJobs, options?: TransportRequestOptions): TransportRequestPromise> - getJobs, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getJobs, TContext = unknown>(params: RequestParams.RollupGetJobs, callback: callbackFn): TransportRequestCallback - getJobs, TContext = unknown>(params: RequestParams.RollupGetJobs, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_rollup_caps, TContext = unknown>(params?: RequestParams.RollupGetRollupCaps, options?: TransportRequestOptions): TransportRequestPromise> - get_rollup_caps, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_rollup_caps, TContext = unknown>(params: RequestParams.RollupGetRollupCaps, callback: callbackFn): TransportRequestCallback - get_rollup_caps, TContext = unknown>(params: RequestParams.RollupGetRollupCaps, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getRollupCaps, TContext = unknown>(params?: RequestParams.RollupGetRollupCaps, options?: TransportRequestOptions): TransportRequestPromise> - getRollupCaps, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getRollupCaps, TContext = unknown>(params: RequestParams.RollupGetRollupCaps, callback: callbackFn): TransportRequestCallback - getRollupCaps, TContext = unknown>(params: RequestParams.RollupGetRollupCaps, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_rollup_index_caps, TContext = unknown>(params?: RequestParams.RollupGetRollupIndexCaps, options?: TransportRequestOptions): TransportRequestPromise> - get_rollup_index_caps, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_rollup_index_caps, TContext = unknown>(params: RequestParams.RollupGetRollupIndexCaps, callback: callbackFn): TransportRequestCallback - get_rollup_index_caps, TContext = unknown>(params: RequestParams.RollupGetRollupIndexCaps, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getRollupIndexCaps, TContext = unknown>(params?: RequestParams.RollupGetRollupIndexCaps, options?: TransportRequestOptions): TransportRequestPromise> - getRollupIndexCaps, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getRollupIndexCaps, TContext = unknown>(params: RequestParams.RollupGetRollupIndexCaps, callback: callbackFn): TransportRequestCallback - getRollupIndexCaps, TContext = unknown>(params: RequestParams.RollupGetRollupIndexCaps, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_job, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.RollupPutJob, options?: TransportRequestOptions): TransportRequestPromise> - put_job, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - put_job, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.RollupPutJob, callback: callbackFn): TransportRequestCallback - put_job, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.RollupPutJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putJob, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.RollupPutJob, options?: TransportRequestOptions): TransportRequestPromise> - putJob, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - putJob, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.RollupPutJob, callback: callbackFn): TransportRequestCallback - putJob, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.RollupPutJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - rollup_search, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.RollupRollupSearch, options?: TransportRequestOptions): TransportRequestPromise> - rollup_search, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - rollup_search, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.RollupRollupSearch, callback: callbackFn): TransportRequestCallback - rollup_search, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.RollupRollupSearch, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - rollupSearch, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.RollupRollupSearch, options?: TransportRequestOptions): TransportRequestPromise> - rollupSearch, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - rollupSearch, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.RollupRollupSearch, callback: callbackFn): TransportRequestCallback - rollupSearch, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.RollupRollupSearch, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - start_job, TContext = unknown>(params?: RequestParams.RollupStartJob, options?: TransportRequestOptions): TransportRequestPromise> - start_job, TContext = unknown>(callback: callbackFn): TransportRequestCallback - start_job, TContext = unknown>(params: RequestParams.RollupStartJob, callback: callbackFn): TransportRequestCallback - start_job, TContext = unknown>(params: RequestParams.RollupStartJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - startJob, TContext = unknown>(params?: RequestParams.RollupStartJob, options?: TransportRequestOptions): TransportRequestPromise> - startJob, TContext = unknown>(callback: callbackFn): TransportRequestCallback - startJob, TContext = unknown>(params: RequestParams.RollupStartJob, callback: callbackFn): TransportRequestCallback - startJob, TContext = unknown>(params: RequestParams.RollupStartJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stop_job, TContext = unknown>(params?: RequestParams.RollupStopJob, options?: TransportRequestOptions): TransportRequestPromise> - stop_job, TContext = unknown>(callback: callbackFn): TransportRequestCallback - stop_job, TContext = unknown>(params: RequestParams.RollupStopJob, callback: callbackFn): TransportRequestCallback - stop_job, TContext = unknown>(params: RequestParams.RollupStopJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stopJob, TContext = unknown>(params?: RequestParams.RollupStopJob, options?: TransportRequestOptions): TransportRequestPromise> - stopJob, TContext = unknown>(callback: callbackFn): TransportRequestCallback - stopJob, TContext = unknown>(params: RequestParams.RollupStopJob, callback: callbackFn): TransportRequestCallback - stopJob, TContext = unknown>(params: RequestParams.RollupStopJob, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - scripts_painless_execute, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.ScriptsPainlessExecute, options?: TransportRequestOptions): TransportRequestPromise> - scripts_painless_execute, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - scripts_painless_execute, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.ScriptsPainlessExecute, callback: callbackFn): TransportRequestCallback - scripts_painless_execute, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.ScriptsPainlessExecute, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - scriptsPainlessExecute, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.ScriptsPainlessExecute, options?: TransportRequestOptions): TransportRequestPromise> - scriptsPainlessExecute, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - scriptsPainlessExecute, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.ScriptsPainlessExecute, callback: callbackFn): TransportRequestCallback - scriptsPainlessExecute, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.ScriptsPainlessExecute, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - scroll, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.Scroll, options?: TransportRequestOptions): TransportRequestPromise> - scroll, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - scroll, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.Scroll, callback: callbackFn): TransportRequestCallback - scroll, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.Scroll, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - search, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.Search, options?: TransportRequestOptions): TransportRequestPromise> - search, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - search, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.Search, callback: callbackFn): TransportRequestCallback - search, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.Search, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - search_shards, TContext = unknown>(params?: RequestParams.SearchShards, options?: TransportRequestOptions): TransportRequestPromise> - search_shards, TContext = unknown>(callback: callbackFn): TransportRequestCallback - search_shards, TContext = unknown>(params: RequestParams.SearchShards, callback: callbackFn): TransportRequestCallback - search_shards, TContext = unknown>(params: RequestParams.SearchShards, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - searchShards, TContext = unknown>(params?: RequestParams.SearchShards, options?: TransportRequestOptions): TransportRequestPromise> - searchShards, TContext = unknown>(callback: callbackFn): TransportRequestCallback - searchShards, TContext = unknown>(params: RequestParams.SearchShards, callback: callbackFn): TransportRequestCallback - searchShards, TContext = unknown>(params: RequestParams.SearchShards, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - search_template, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.SearchTemplate, options?: TransportRequestOptions): TransportRequestPromise> - search_template, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - search_template, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SearchTemplate, callback: callbackFn): TransportRequestCallback - search_template, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SearchTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - searchTemplate, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.SearchTemplate, options?: TransportRequestOptions): TransportRequestPromise> - searchTemplate, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - searchTemplate, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SearchTemplate, callback: callbackFn): TransportRequestCallback - searchTemplate, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SearchTemplate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - searchable_snapshots: { - clear_cache, TContext = unknown>(params?: RequestParams.SearchableSnapshotsClearCache, options?: TransportRequestOptions): TransportRequestPromise> - clear_cache, TContext = unknown>(callback: callbackFn): TransportRequestCallback - clear_cache, TContext = unknown>(params: RequestParams.SearchableSnapshotsClearCache, callback: callbackFn): TransportRequestCallback - clear_cache, TContext = unknown>(params: RequestParams.SearchableSnapshotsClearCache, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - clearCache, TContext = unknown>(params?: RequestParams.SearchableSnapshotsClearCache, options?: TransportRequestOptions): TransportRequestPromise> - clearCache, TContext = unknown>(callback: callbackFn): TransportRequestCallback - clearCache, TContext = unknown>(params: RequestParams.SearchableSnapshotsClearCache, callback: callbackFn): TransportRequestCallback - clearCache, TContext = unknown>(params: RequestParams.SearchableSnapshotsClearCache, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - mount, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.SearchableSnapshotsMount, options?: TransportRequestOptions): TransportRequestPromise> - mount, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - mount, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SearchableSnapshotsMount, callback: callbackFn): TransportRequestCallback - mount, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SearchableSnapshotsMount, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - repository_stats, TContext = unknown>(params?: RequestParams.SearchableSnapshotsRepositoryStats, options?: TransportRequestOptions): TransportRequestPromise> - repository_stats, TContext = unknown>(callback: callbackFn): TransportRequestCallback - repository_stats, TContext = unknown>(params: RequestParams.SearchableSnapshotsRepositoryStats, callback: callbackFn): TransportRequestCallback - repository_stats, TContext = unknown>(params: RequestParams.SearchableSnapshotsRepositoryStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - repositoryStats, TContext = unknown>(params?: RequestParams.SearchableSnapshotsRepositoryStats, options?: TransportRequestOptions): TransportRequestPromise> - repositoryStats, TContext = unknown>(callback: callbackFn): TransportRequestCallback - repositoryStats, TContext = unknown>(params: RequestParams.SearchableSnapshotsRepositoryStats, callback: callbackFn): TransportRequestCallback - repositoryStats, TContext = unknown>(params: RequestParams.SearchableSnapshotsRepositoryStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stats, TContext = unknown>(params?: RequestParams.SearchableSnapshotsStats, options?: TransportRequestOptions): TransportRequestPromise> - stats, TContext = unknown>(callback: callbackFn): TransportRequestCallback - stats, TContext = unknown>(params: RequestParams.SearchableSnapshotsStats, callback: callbackFn): TransportRequestCallback - stats, TContext = unknown>(params: RequestParams.SearchableSnapshotsStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - searchableSnapshots: { - clear_cache, TContext = unknown>(params?: RequestParams.SearchableSnapshotsClearCache, options?: TransportRequestOptions): TransportRequestPromise> - clear_cache, TContext = unknown>(callback: callbackFn): TransportRequestCallback - clear_cache, TContext = unknown>(params: RequestParams.SearchableSnapshotsClearCache, callback: callbackFn): TransportRequestCallback - clear_cache, TContext = unknown>(params: RequestParams.SearchableSnapshotsClearCache, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - clearCache, TContext = unknown>(params?: RequestParams.SearchableSnapshotsClearCache, options?: TransportRequestOptions): TransportRequestPromise> - clearCache, TContext = unknown>(callback: callbackFn): TransportRequestCallback - clearCache, TContext = unknown>(params: RequestParams.SearchableSnapshotsClearCache, callback: callbackFn): TransportRequestCallback - clearCache, TContext = unknown>(params: RequestParams.SearchableSnapshotsClearCache, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - mount, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.SearchableSnapshotsMount, options?: TransportRequestOptions): TransportRequestPromise> - mount, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - mount, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SearchableSnapshotsMount, callback: callbackFn): TransportRequestCallback - mount, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SearchableSnapshotsMount, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - repository_stats, TContext = unknown>(params?: RequestParams.SearchableSnapshotsRepositoryStats, options?: TransportRequestOptions): TransportRequestPromise> - repository_stats, TContext = unknown>(callback: callbackFn): TransportRequestCallback - repository_stats, TContext = unknown>(params: RequestParams.SearchableSnapshotsRepositoryStats, callback: callbackFn): TransportRequestCallback - repository_stats, TContext = unknown>(params: RequestParams.SearchableSnapshotsRepositoryStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - repositoryStats, TContext = unknown>(params?: RequestParams.SearchableSnapshotsRepositoryStats, options?: TransportRequestOptions): TransportRequestPromise> - repositoryStats, TContext = unknown>(callback: callbackFn): TransportRequestCallback - repositoryStats, TContext = unknown>(params: RequestParams.SearchableSnapshotsRepositoryStats, callback: callbackFn): TransportRequestCallback - repositoryStats, TContext = unknown>(params: RequestParams.SearchableSnapshotsRepositoryStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stats, TContext = unknown>(params?: RequestParams.SearchableSnapshotsStats, options?: TransportRequestOptions): TransportRequestPromise> - stats, TContext = unknown>(callback: callbackFn): TransportRequestCallback - stats, TContext = unknown>(params: RequestParams.SearchableSnapshotsStats, callback: callbackFn): TransportRequestCallback - stats, TContext = unknown>(params: RequestParams.SearchableSnapshotsStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - security: { - authenticate, TContext = unknown>(params?: RequestParams.SecurityAuthenticate, options?: TransportRequestOptions): TransportRequestPromise> - authenticate, TContext = unknown>(callback: callbackFn): TransportRequestCallback - authenticate, TContext = unknown>(params: RequestParams.SecurityAuthenticate, callback: callbackFn): TransportRequestCallback - authenticate, TContext = unknown>(params: RequestParams.SecurityAuthenticate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - change_password, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.SecurityChangePassword, options?: TransportRequestOptions): TransportRequestPromise> - change_password, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - change_password, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SecurityChangePassword, callback: callbackFn): TransportRequestCallback - change_password, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SecurityChangePassword, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - changePassword, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.SecurityChangePassword, options?: TransportRequestOptions): TransportRequestPromise> - changePassword, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - changePassword, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SecurityChangePassword, callback: callbackFn): TransportRequestCallback - changePassword, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SecurityChangePassword, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - clear_cached_realms, TContext = unknown>(params?: RequestParams.SecurityClearCachedRealms, options?: TransportRequestOptions): TransportRequestPromise> - clear_cached_realms, TContext = unknown>(callback: callbackFn): TransportRequestCallback - clear_cached_realms, TContext = unknown>(params: RequestParams.SecurityClearCachedRealms, callback: callbackFn): TransportRequestCallback - clear_cached_realms, TContext = unknown>(params: RequestParams.SecurityClearCachedRealms, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - clearCachedRealms, TContext = unknown>(params?: RequestParams.SecurityClearCachedRealms, options?: TransportRequestOptions): TransportRequestPromise> - clearCachedRealms, TContext = unknown>(callback: callbackFn): TransportRequestCallback - clearCachedRealms, TContext = unknown>(params: RequestParams.SecurityClearCachedRealms, callback: callbackFn): TransportRequestCallback - clearCachedRealms, TContext = unknown>(params: RequestParams.SecurityClearCachedRealms, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - clear_cached_roles, TContext = unknown>(params?: RequestParams.SecurityClearCachedRoles, options?: TransportRequestOptions): TransportRequestPromise> - clear_cached_roles, TContext = unknown>(callback: callbackFn): TransportRequestCallback - clear_cached_roles, TContext = unknown>(params: RequestParams.SecurityClearCachedRoles, callback: callbackFn): TransportRequestCallback - clear_cached_roles, TContext = unknown>(params: RequestParams.SecurityClearCachedRoles, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - clearCachedRoles, TContext = unknown>(params?: RequestParams.SecurityClearCachedRoles, options?: TransportRequestOptions): TransportRequestPromise> - clearCachedRoles, TContext = unknown>(callback: callbackFn): TransportRequestCallback - clearCachedRoles, TContext = unknown>(params: RequestParams.SecurityClearCachedRoles, callback: callbackFn): TransportRequestCallback - clearCachedRoles, TContext = unknown>(params: RequestParams.SecurityClearCachedRoles, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - create_api_key, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.SecurityCreateApiKey, options?: TransportRequestOptions): TransportRequestPromise> - create_api_key, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - create_api_key, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SecurityCreateApiKey, callback: callbackFn): TransportRequestCallback - create_api_key, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SecurityCreateApiKey, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - createApiKey, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.SecurityCreateApiKey, options?: TransportRequestOptions): TransportRequestPromise> - createApiKey, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - createApiKey, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SecurityCreateApiKey, callback: callbackFn): TransportRequestCallback - createApiKey, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SecurityCreateApiKey, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_privileges, TContext = unknown>(params?: RequestParams.SecurityDeletePrivileges, options?: TransportRequestOptions): TransportRequestPromise> - delete_privileges, TContext = unknown>(callback: callbackFn): TransportRequestCallback - delete_privileges, TContext = unknown>(params: RequestParams.SecurityDeletePrivileges, callback: callbackFn): TransportRequestCallback - delete_privileges, TContext = unknown>(params: RequestParams.SecurityDeletePrivileges, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deletePrivileges, TContext = unknown>(params?: RequestParams.SecurityDeletePrivileges, options?: TransportRequestOptions): TransportRequestPromise> - deletePrivileges, TContext = unknown>(callback: callbackFn): TransportRequestCallback - deletePrivileges, TContext = unknown>(params: RequestParams.SecurityDeletePrivileges, callback: callbackFn): TransportRequestCallback - deletePrivileges, TContext = unknown>(params: RequestParams.SecurityDeletePrivileges, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_role, TContext = unknown>(params?: RequestParams.SecurityDeleteRole, options?: TransportRequestOptions): TransportRequestPromise> - delete_role, TContext = unknown>(callback: callbackFn): TransportRequestCallback - delete_role, TContext = unknown>(params: RequestParams.SecurityDeleteRole, callback: callbackFn): TransportRequestCallback - delete_role, TContext = unknown>(params: RequestParams.SecurityDeleteRole, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteRole, TContext = unknown>(params?: RequestParams.SecurityDeleteRole, options?: TransportRequestOptions): TransportRequestPromise> - deleteRole, TContext = unknown>(callback: callbackFn): TransportRequestCallback - deleteRole, TContext = unknown>(params: RequestParams.SecurityDeleteRole, callback: callbackFn): TransportRequestCallback - deleteRole, TContext = unknown>(params: RequestParams.SecurityDeleteRole, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_role_mapping, TContext = unknown>(params?: RequestParams.SecurityDeleteRoleMapping, options?: TransportRequestOptions): TransportRequestPromise> - delete_role_mapping, TContext = unknown>(callback: callbackFn): TransportRequestCallback - delete_role_mapping, TContext = unknown>(params: RequestParams.SecurityDeleteRoleMapping, callback: callbackFn): TransportRequestCallback - delete_role_mapping, TContext = unknown>(params: RequestParams.SecurityDeleteRoleMapping, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteRoleMapping, TContext = unknown>(params?: RequestParams.SecurityDeleteRoleMapping, options?: TransportRequestOptions): TransportRequestPromise> - deleteRoleMapping, TContext = unknown>(callback: callbackFn): TransportRequestCallback - deleteRoleMapping, TContext = unknown>(params: RequestParams.SecurityDeleteRoleMapping, callback: callbackFn): TransportRequestCallback - deleteRoleMapping, TContext = unknown>(params: RequestParams.SecurityDeleteRoleMapping, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_user, TContext = unknown>(params?: RequestParams.SecurityDeleteUser, options?: TransportRequestOptions): TransportRequestPromise> - delete_user, TContext = unknown>(callback: callbackFn): TransportRequestCallback - delete_user, TContext = unknown>(params: RequestParams.SecurityDeleteUser, callback: callbackFn): TransportRequestCallback - delete_user, TContext = unknown>(params: RequestParams.SecurityDeleteUser, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteUser, TContext = unknown>(params?: RequestParams.SecurityDeleteUser, options?: TransportRequestOptions): TransportRequestPromise> - deleteUser, TContext = unknown>(callback: callbackFn): TransportRequestCallback - deleteUser, TContext = unknown>(params: RequestParams.SecurityDeleteUser, callback: callbackFn): TransportRequestCallback - deleteUser, TContext = unknown>(params: RequestParams.SecurityDeleteUser, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - disable_user, TContext = unknown>(params?: RequestParams.SecurityDisableUser, options?: TransportRequestOptions): TransportRequestPromise> - disable_user, TContext = unknown>(callback: callbackFn): TransportRequestCallback - disable_user, TContext = unknown>(params: RequestParams.SecurityDisableUser, callback: callbackFn): TransportRequestCallback - disable_user, TContext = unknown>(params: RequestParams.SecurityDisableUser, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - disableUser, TContext = unknown>(params?: RequestParams.SecurityDisableUser, options?: TransportRequestOptions): TransportRequestPromise> - disableUser, TContext = unknown>(callback: callbackFn): TransportRequestCallback - disableUser, TContext = unknown>(params: RequestParams.SecurityDisableUser, callback: callbackFn): TransportRequestCallback - disableUser, TContext = unknown>(params: RequestParams.SecurityDisableUser, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - enable_user, TContext = unknown>(params?: RequestParams.SecurityEnableUser, options?: TransportRequestOptions): TransportRequestPromise> - enable_user, TContext = unknown>(callback: callbackFn): TransportRequestCallback - enable_user, TContext = unknown>(params: RequestParams.SecurityEnableUser, callback: callbackFn): TransportRequestCallback - enable_user, TContext = unknown>(params: RequestParams.SecurityEnableUser, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - enableUser, TContext = unknown>(params?: RequestParams.SecurityEnableUser, options?: TransportRequestOptions): TransportRequestPromise> - enableUser, TContext = unknown>(callback: callbackFn): TransportRequestCallback - enableUser, TContext = unknown>(params: RequestParams.SecurityEnableUser, callback: callbackFn): TransportRequestCallback - enableUser, TContext = unknown>(params: RequestParams.SecurityEnableUser, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_api_key, TContext = unknown>(params?: RequestParams.SecurityGetApiKey, options?: TransportRequestOptions): TransportRequestPromise> - get_api_key, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_api_key, TContext = unknown>(params: RequestParams.SecurityGetApiKey, callback: callbackFn): TransportRequestCallback - get_api_key, TContext = unknown>(params: RequestParams.SecurityGetApiKey, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getApiKey, TContext = unknown>(params?: RequestParams.SecurityGetApiKey, options?: TransportRequestOptions): TransportRequestPromise> - getApiKey, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getApiKey, TContext = unknown>(params: RequestParams.SecurityGetApiKey, callback: callbackFn): TransportRequestCallback - getApiKey, TContext = unknown>(params: RequestParams.SecurityGetApiKey, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_builtin_privileges, TContext = unknown>(params?: RequestParams.SecurityGetBuiltinPrivileges, options?: TransportRequestOptions): TransportRequestPromise> - get_builtin_privileges, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_builtin_privileges, TContext = unknown>(params: RequestParams.SecurityGetBuiltinPrivileges, callback: callbackFn): TransportRequestCallback - get_builtin_privileges, TContext = unknown>(params: RequestParams.SecurityGetBuiltinPrivileges, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getBuiltinPrivileges, TContext = unknown>(params?: RequestParams.SecurityGetBuiltinPrivileges, options?: TransportRequestOptions): TransportRequestPromise> - getBuiltinPrivileges, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getBuiltinPrivileges, TContext = unknown>(params: RequestParams.SecurityGetBuiltinPrivileges, callback: callbackFn): TransportRequestCallback - getBuiltinPrivileges, TContext = unknown>(params: RequestParams.SecurityGetBuiltinPrivileges, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_privileges, TContext = unknown>(params?: RequestParams.SecurityGetPrivileges, options?: TransportRequestOptions): TransportRequestPromise> - get_privileges, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_privileges, TContext = unknown>(params: RequestParams.SecurityGetPrivileges, callback: callbackFn): TransportRequestCallback - get_privileges, TContext = unknown>(params: RequestParams.SecurityGetPrivileges, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getPrivileges, TContext = unknown>(params?: RequestParams.SecurityGetPrivileges, options?: TransportRequestOptions): TransportRequestPromise> - getPrivileges, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getPrivileges, TContext = unknown>(params: RequestParams.SecurityGetPrivileges, callback: callbackFn): TransportRequestCallback - getPrivileges, TContext = unknown>(params: RequestParams.SecurityGetPrivileges, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_role, TContext = unknown>(params?: RequestParams.SecurityGetRole, options?: TransportRequestOptions): TransportRequestPromise> - get_role, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_role, TContext = unknown>(params: RequestParams.SecurityGetRole, callback: callbackFn): TransportRequestCallback - get_role, TContext = unknown>(params: RequestParams.SecurityGetRole, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getRole, TContext = unknown>(params?: RequestParams.SecurityGetRole, options?: TransportRequestOptions): TransportRequestPromise> - getRole, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getRole, TContext = unknown>(params: RequestParams.SecurityGetRole, callback: callbackFn): TransportRequestCallback - getRole, TContext = unknown>(params: RequestParams.SecurityGetRole, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_role_mapping, TContext = unknown>(params?: RequestParams.SecurityGetRoleMapping, options?: TransportRequestOptions): TransportRequestPromise> - get_role_mapping, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_role_mapping, TContext = unknown>(params: RequestParams.SecurityGetRoleMapping, callback: callbackFn): TransportRequestCallback - get_role_mapping, TContext = unknown>(params: RequestParams.SecurityGetRoleMapping, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getRoleMapping, TContext = unknown>(params?: RequestParams.SecurityGetRoleMapping, options?: TransportRequestOptions): TransportRequestPromise> - getRoleMapping, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getRoleMapping, TContext = unknown>(params: RequestParams.SecurityGetRoleMapping, callback: callbackFn): TransportRequestCallback - getRoleMapping, TContext = unknown>(params: RequestParams.SecurityGetRoleMapping, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_token, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.SecurityGetToken, options?: TransportRequestOptions): TransportRequestPromise> - get_token, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_token, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SecurityGetToken, callback: callbackFn): TransportRequestCallback - get_token, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SecurityGetToken, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getToken, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.SecurityGetToken, options?: TransportRequestOptions): TransportRequestPromise> - getToken, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getToken, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SecurityGetToken, callback: callbackFn): TransportRequestCallback - getToken, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SecurityGetToken, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_user, TContext = unknown>(params?: RequestParams.SecurityGetUser, options?: TransportRequestOptions): TransportRequestPromise> - get_user, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_user, TContext = unknown>(params: RequestParams.SecurityGetUser, callback: callbackFn): TransportRequestCallback - get_user, TContext = unknown>(params: RequestParams.SecurityGetUser, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getUser, TContext = unknown>(params?: RequestParams.SecurityGetUser, options?: TransportRequestOptions): TransportRequestPromise> - getUser, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getUser, TContext = unknown>(params: RequestParams.SecurityGetUser, callback: callbackFn): TransportRequestCallback - getUser, TContext = unknown>(params: RequestParams.SecurityGetUser, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_user_privileges, TContext = unknown>(params?: RequestParams.SecurityGetUserPrivileges, options?: TransportRequestOptions): TransportRequestPromise> - get_user_privileges, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_user_privileges, TContext = unknown>(params: RequestParams.SecurityGetUserPrivileges, callback: callbackFn): TransportRequestCallback - get_user_privileges, TContext = unknown>(params: RequestParams.SecurityGetUserPrivileges, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getUserPrivileges, TContext = unknown>(params?: RequestParams.SecurityGetUserPrivileges, options?: TransportRequestOptions): TransportRequestPromise> - getUserPrivileges, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getUserPrivileges, TContext = unknown>(params: RequestParams.SecurityGetUserPrivileges, callback: callbackFn): TransportRequestCallback - getUserPrivileges, TContext = unknown>(params: RequestParams.SecurityGetUserPrivileges, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - has_privileges, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.SecurityHasPrivileges, options?: TransportRequestOptions): TransportRequestPromise> - has_privileges, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - has_privileges, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SecurityHasPrivileges, callback: callbackFn): TransportRequestCallback - has_privileges, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SecurityHasPrivileges, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - hasPrivileges, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.SecurityHasPrivileges, options?: TransportRequestOptions): TransportRequestPromise> - hasPrivileges, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - hasPrivileges, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SecurityHasPrivileges, callback: callbackFn): TransportRequestCallback - hasPrivileges, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SecurityHasPrivileges, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - invalidate_api_key, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.SecurityInvalidateApiKey, options?: TransportRequestOptions): TransportRequestPromise> - invalidate_api_key, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - invalidate_api_key, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SecurityInvalidateApiKey, callback: callbackFn): TransportRequestCallback - invalidate_api_key, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SecurityInvalidateApiKey, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - invalidateApiKey, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.SecurityInvalidateApiKey, options?: TransportRequestOptions): TransportRequestPromise> - invalidateApiKey, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - invalidateApiKey, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SecurityInvalidateApiKey, callback: callbackFn): TransportRequestCallback - invalidateApiKey, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SecurityInvalidateApiKey, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - invalidate_token, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.SecurityInvalidateToken, options?: TransportRequestOptions): TransportRequestPromise> - invalidate_token, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - invalidate_token, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SecurityInvalidateToken, callback: callbackFn): TransportRequestCallback - invalidate_token, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SecurityInvalidateToken, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - invalidateToken, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.SecurityInvalidateToken, options?: TransportRequestOptions): TransportRequestPromise> - invalidateToken, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - invalidateToken, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SecurityInvalidateToken, callback: callbackFn): TransportRequestCallback - invalidateToken, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SecurityInvalidateToken, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_privileges, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.SecurityPutPrivileges, options?: TransportRequestOptions): TransportRequestPromise> - put_privileges, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - put_privileges, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SecurityPutPrivileges, callback: callbackFn): TransportRequestCallback - put_privileges, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SecurityPutPrivileges, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putPrivileges, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.SecurityPutPrivileges, options?: TransportRequestOptions): TransportRequestPromise> - putPrivileges, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - putPrivileges, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SecurityPutPrivileges, callback: callbackFn): TransportRequestCallback - putPrivileges, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SecurityPutPrivileges, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_role, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.SecurityPutRole, options?: TransportRequestOptions): TransportRequestPromise> - put_role, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - put_role, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SecurityPutRole, callback: callbackFn): TransportRequestCallback - put_role, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SecurityPutRole, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putRole, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.SecurityPutRole, options?: TransportRequestOptions): TransportRequestPromise> - putRole, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - putRole, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SecurityPutRole, callback: callbackFn): TransportRequestCallback - putRole, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SecurityPutRole, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_role_mapping, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.SecurityPutRoleMapping, options?: TransportRequestOptions): TransportRequestPromise> - put_role_mapping, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - put_role_mapping, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SecurityPutRoleMapping, callback: callbackFn): TransportRequestCallback - put_role_mapping, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SecurityPutRoleMapping, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putRoleMapping, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.SecurityPutRoleMapping, options?: TransportRequestOptions): TransportRequestPromise> - putRoleMapping, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - putRoleMapping, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SecurityPutRoleMapping, callback: callbackFn): TransportRequestCallback - putRoleMapping, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SecurityPutRoleMapping, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_user, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.SecurityPutUser, options?: TransportRequestOptions): TransportRequestPromise> - put_user, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - put_user, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SecurityPutUser, callback: callbackFn): TransportRequestCallback - put_user, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SecurityPutUser, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putUser, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.SecurityPutUser, options?: TransportRequestOptions): TransportRequestPromise> - putUser, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - putUser, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SecurityPutUser, callback: callbackFn): TransportRequestCallback - putUser, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SecurityPutUser, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - slm: { - delete_lifecycle, TContext = unknown>(params?: RequestParams.SlmDeleteLifecycle, options?: TransportRequestOptions): TransportRequestPromise> - delete_lifecycle, TContext = unknown>(callback: callbackFn): TransportRequestCallback - delete_lifecycle, TContext = unknown>(params: RequestParams.SlmDeleteLifecycle, callback: callbackFn): TransportRequestCallback - delete_lifecycle, TContext = unknown>(params: RequestParams.SlmDeleteLifecycle, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteLifecycle, TContext = unknown>(params?: RequestParams.SlmDeleteLifecycle, options?: TransportRequestOptions): TransportRequestPromise> - deleteLifecycle, TContext = unknown>(callback: callbackFn): TransportRequestCallback - deleteLifecycle, TContext = unknown>(params: RequestParams.SlmDeleteLifecycle, callback: callbackFn): TransportRequestCallback - deleteLifecycle, TContext = unknown>(params: RequestParams.SlmDeleteLifecycle, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - execute_lifecycle, TContext = unknown>(params?: RequestParams.SlmExecuteLifecycle, options?: TransportRequestOptions): TransportRequestPromise> - execute_lifecycle, TContext = unknown>(callback: callbackFn): TransportRequestCallback - execute_lifecycle, TContext = unknown>(params: RequestParams.SlmExecuteLifecycle, callback: callbackFn): TransportRequestCallback - execute_lifecycle, TContext = unknown>(params: RequestParams.SlmExecuteLifecycle, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - executeLifecycle, TContext = unknown>(params?: RequestParams.SlmExecuteLifecycle, options?: TransportRequestOptions): TransportRequestPromise> - executeLifecycle, TContext = unknown>(callback: callbackFn): TransportRequestCallback - executeLifecycle, TContext = unknown>(params: RequestParams.SlmExecuteLifecycle, callback: callbackFn): TransportRequestCallback - executeLifecycle, TContext = unknown>(params: RequestParams.SlmExecuteLifecycle, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - execute_retention, TContext = unknown>(params?: RequestParams.SlmExecuteRetention, options?: TransportRequestOptions): TransportRequestPromise> - execute_retention, TContext = unknown>(callback: callbackFn): TransportRequestCallback - execute_retention, TContext = unknown>(params: RequestParams.SlmExecuteRetention, callback: callbackFn): TransportRequestCallback - execute_retention, TContext = unknown>(params: RequestParams.SlmExecuteRetention, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - executeRetention, TContext = unknown>(params?: RequestParams.SlmExecuteRetention, options?: TransportRequestOptions): TransportRequestPromise> - executeRetention, TContext = unknown>(callback: callbackFn): TransportRequestCallback - executeRetention, TContext = unknown>(params: RequestParams.SlmExecuteRetention, callback: callbackFn): TransportRequestCallback - executeRetention, TContext = unknown>(params: RequestParams.SlmExecuteRetention, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_lifecycle, TContext = unknown>(params?: RequestParams.SlmGetLifecycle, options?: TransportRequestOptions): TransportRequestPromise> - get_lifecycle, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_lifecycle, TContext = unknown>(params: RequestParams.SlmGetLifecycle, callback: callbackFn): TransportRequestCallback - get_lifecycle, TContext = unknown>(params: RequestParams.SlmGetLifecycle, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getLifecycle, TContext = unknown>(params?: RequestParams.SlmGetLifecycle, options?: TransportRequestOptions): TransportRequestPromise> - getLifecycle, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getLifecycle, TContext = unknown>(params: RequestParams.SlmGetLifecycle, callback: callbackFn): TransportRequestCallback - getLifecycle, TContext = unknown>(params: RequestParams.SlmGetLifecycle, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_stats, TContext = unknown>(params?: RequestParams.SlmGetStats, options?: TransportRequestOptions): TransportRequestPromise> - get_stats, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_stats, TContext = unknown>(params: RequestParams.SlmGetStats, callback: callbackFn): TransportRequestCallback - get_stats, TContext = unknown>(params: RequestParams.SlmGetStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getStats, TContext = unknown>(params?: RequestParams.SlmGetStats, options?: TransportRequestOptions): TransportRequestPromise> - getStats, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getStats, TContext = unknown>(params: RequestParams.SlmGetStats, callback: callbackFn): TransportRequestCallback - getStats, TContext = unknown>(params: RequestParams.SlmGetStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_status, TContext = unknown>(params?: RequestParams.SlmGetStatus, options?: TransportRequestOptions): TransportRequestPromise> - get_status, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_status, TContext = unknown>(params: RequestParams.SlmGetStatus, callback: callbackFn): TransportRequestCallback - get_status, TContext = unknown>(params: RequestParams.SlmGetStatus, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getStatus, TContext = unknown>(params?: RequestParams.SlmGetStatus, options?: TransportRequestOptions): TransportRequestPromise> - getStatus, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getStatus, TContext = unknown>(params: RequestParams.SlmGetStatus, callback: callbackFn): TransportRequestCallback - getStatus, TContext = unknown>(params: RequestParams.SlmGetStatus, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_lifecycle, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.SlmPutLifecycle, options?: TransportRequestOptions): TransportRequestPromise> - put_lifecycle, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - put_lifecycle, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SlmPutLifecycle, callback: callbackFn): TransportRequestCallback - put_lifecycle, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SlmPutLifecycle, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putLifecycle, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.SlmPutLifecycle, options?: TransportRequestOptions): TransportRequestPromise> - putLifecycle, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - putLifecycle, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SlmPutLifecycle, callback: callbackFn): TransportRequestCallback - putLifecycle, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SlmPutLifecycle, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - start, TContext = unknown>(params?: RequestParams.SlmStart, options?: TransportRequestOptions): TransportRequestPromise> - start, TContext = unknown>(callback: callbackFn): TransportRequestCallback - start, TContext = unknown>(params: RequestParams.SlmStart, callback: callbackFn): TransportRequestCallback - start, TContext = unknown>(params: RequestParams.SlmStart, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stop, TContext = unknown>(params?: RequestParams.SlmStop, options?: TransportRequestOptions): TransportRequestPromise> - stop, TContext = unknown>(callback: callbackFn): TransportRequestCallback - stop, TContext = unknown>(params: RequestParams.SlmStop, callback: callbackFn): TransportRequestCallback - stop, TContext = unknown>(params: RequestParams.SlmStop, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - snapshot: { - cleanup_repository, TContext = unknown>(params?: RequestParams.SnapshotCleanupRepository, options?: TransportRequestOptions): TransportRequestPromise> - cleanup_repository, TContext = unknown>(callback: callbackFn): TransportRequestCallback - cleanup_repository, TContext = unknown>(params: RequestParams.SnapshotCleanupRepository, callback: callbackFn): TransportRequestCallback - cleanup_repository, TContext = unknown>(params: RequestParams.SnapshotCleanupRepository, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - cleanupRepository, TContext = unknown>(params?: RequestParams.SnapshotCleanupRepository, options?: TransportRequestOptions): TransportRequestPromise> - cleanupRepository, TContext = unknown>(callback: callbackFn): TransportRequestCallback - cleanupRepository, TContext = unknown>(params: RequestParams.SnapshotCleanupRepository, callback: callbackFn): TransportRequestCallback - cleanupRepository, TContext = unknown>(params: RequestParams.SnapshotCleanupRepository, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - create, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.SnapshotCreate, options?: TransportRequestOptions): TransportRequestPromise> - create, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - create, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SnapshotCreate, callback: callbackFn): TransportRequestCallback - create, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SnapshotCreate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - create_repository, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.SnapshotCreateRepository, options?: TransportRequestOptions): TransportRequestPromise> - create_repository, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - create_repository, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SnapshotCreateRepository, callback: callbackFn): TransportRequestCallback - create_repository, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SnapshotCreateRepository, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - createRepository, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.SnapshotCreateRepository, options?: TransportRequestOptions): TransportRequestPromise> - createRepository, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - createRepository, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SnapshotCreateRepository, callback: callbackFn): TransportRequestCallback - createRepository, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SnapshotCreateRepository, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete, TContext = unknown>(params?: RequestParams.SnapshotDelete, options?: TransportRequestOptions): TransportRequestPromise> - delete, TContext = unknown>(callback: callbackFn): TransportRequestCallback - delete, TContext = unknown>(params: RequestParams.SnapshotDelete, callback: callbackFn): TransportRequestCallback - delete, TContext = unknown>(params: RequestParams.SnapshotDelete, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_repository, TContext = unknown>(params?: RequestParams.SnapshotDeleteRepository, options?: TransportRequestOptions): TransportRequestPromise> - delete_repository, TContext = unknown>(callback: callbackFn): TransportRequestCallback - delete_repository, TContext = unknown>(params: RequestParams.SnapshotDeleteRepository, callback: callbackFn): TransportRequestCallback - delete_repository, TContext = unknown>(params: RequestParams.SnapshotDeleteRepository, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteRepository, TContext = unknown>(params?: RequestParams.SnapshotDeleteRepository, options?: TransportRequestOptions): TransportRequestPromise> - deleteRepository, TContext = unknown>(callback: callbackFn): TransportRequestCallback - deleteRepository, TContext = unknown>(params: RequestParams.SnapshotDeleteRepository, callback: callbackFn): TransportRequestCallback - deleteRepository, TContext = unknown>(params: RequestParams.SnapshotDeleteRepository, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get, TContext = unknown>(params?: RequestParams.SnapshotGet, options?: TransportRequestOptions): TransportRequestPromise> - get, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get, TContext = unknown>(params: RequestParams.SnapshotGet, callback: callbackFn): TransportRequestCallback - get, TContext = unknown>(params: RequestParams.SnapshotGet, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_repository, TContext = unknown>(params?: RequestParams.SnapshotGetRepository, options?: TransportRequestOptions): TransportRequestPromise> - get_repository, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_repository, TContext = unknown>(params: RequestParams.SnapshotGetRepository, callback: callbackFn): TransportRequestCallback - get_repository, TContext = unknown>(params: RequestParams.SnapshotGetRepository, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getRepository, TContext = unknown>(params?: RequestParams.SnapshotGetRepository, options?: TransportRequestOptions): TransportRequestPromise> - getRepository, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getRepository, TContext = unknown>(params: RequestParams.SnapshotGetRepository, callback: callbackFn): TransportRequestCallback - getRepository, TContext = unknown>(params: RequestParams.SnapshotGetRepository, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - restore, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.SnapshotRestore, options?: TransportRequestOptions): TransportRequestPromise> - restore, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - restore, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SnapshotRestore, callback: callbackFn): TransportRequestCallback - restore, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SnapshotRestore, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - status, TContext = unknown>(params?: RequestParams.SnapshotStatus, options?: TransportRequestOptions): TransportRequestPromise> - status, TContext = unknown>(callback: callbackFn): TransportRequestCallback - status, TContext = unknown>(params: RequestParams.SnapshotStatus, callback: callbackFn): TransportRequestCallback - status, TContext = unknown>(params: RequestParams.SnapshotStatus, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - verify_repository, TContext = unknown>(params?: RequestParams.SnapshotVerifyRepository, options?: TransportRequestOptions): TransportRequestPromise> - verify_repository, TContext = unknown>(callback: callbackFn): TransportRequestCallback - verify_repository, TContext = unknown>(params: RequestParams.SnapshotVerifyRepository, callback: callbackFn): TransportRequestCallback - verify_repository, TContext = unknown>(params: RequestParams.SnapshotVerifyRepository, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - verifyRepository, TContext = unknown>(params?: RequestParams.SnapshotVerifyRepository, options?: TransportRequestOptions): TransportRequestPromise> - verifyRepository, TContext = unknown>(callback: callbackFn): TransportRequestCallback - verifyRepository, TContext = unknown>(params: RequestParams.SnapshotVerifyRepository, callback: callbackFn): TransportRequestCallback - verifyRepository, TContext = unknown>(params: RequestParams.SnapshotVerifyRepository, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - sql: { - clear_cursor, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.SqlClearCursor, options?: TransportRequestOptions): TransportRequestPromise> - clear_cursor, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - clear_cursor, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SqlClearCursor, callback: callbackFn): TransportRequestCallback - clear_cursor, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SqlClearCursor, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - clearCursor, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.SqlClearCursor, options?: TransportRequestOptions): TransportRequestPromise> - clearCursor, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - clearCursor, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SqlClearCursor, callback: callbackFn): TransportRequestCallback - clearCursor, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SqlClearCursor, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - query, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.SqlQuery, options?: TransportRequestOptions): TransportRequestPromise> - query, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - query, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SqlQuery, callback: callbackFn): TransportRequestCallback - query, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SqlQuery, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - translate, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.SqlTranslate, options?: TransportRequestOptions): TransportRequestPromise> - translate, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - translate, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SqlTranslate, callback: callbackFn): TransportRequestCallback - translate, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.SqlTranslate, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - ssl: { - certificates, TContext = unknown>(params?: RequestParams.SslCertificates, options?: TransportRequestOptions): TransportRequestPromise> - certificates, TContext = unknown>(callback: callbackFn): TransportRequestCallback - certificates, TContext = unknown>(params: RequestParams.SslCertificates, callback: callbackFn): TransportRequestCallback - certificates, TContext = unknown>(params: RequestParams.SslCertificates, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - tasks: { - cancel, TContext = unknown>(params?: RequestParams.TasksCancel, options?: TransportRequestOptions): TransportRequestPromise> - cancel, TContext = unknown>(callback: callbackFn): TransportRequestCallback - cancel, TContext = unknown>(params: RequestParams.TasksCancel, callback: callbackFn): TransportRequestCallback - cancel, TContext = unknown>(params: RequestParams.TasksCancel, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get, TContext = unknown>(params?: RequestParams.TasksGet, options?: TransportRequestOptions): TransportRequestPromise> - get, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get, TContext = unknown>(params: RequestParams.TasksGet, callback: callbackFn): TransportRequestCallback - get, TContext = unknown>(params: RequestParams.TasksGet, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - list, TContext = unknown>(params?: RequestParams.TasksList, options?: TransportRequestOptions): TransportRequestPromise> - list, TContext = unknown>(callback: callbackFn): TransportRequestCallback - list, TContext = unknown>(params: RequestParams.TasksList, callback: callbackFn): TransportRequestCallback - list, TContext = unknown>(params: RequestParams.TasksList, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - termvectors, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.Termvectors, options?: TransportRequestOptions): TransportRequestPromise> - termvectors, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - termvectors, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.Termvectors, callback: callbackFn): TransportRequestCallback - termvectors, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.Termvectors, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - transform: { - delete_transform, TContext = unknown>(params?: RequestParams.TransformDeleteTransform, options?: TransportRequestOptions): TransportRequestPromise> - delete_transform, TContext = unknown>(callback: callbackFn): TransportRequestCallback - delete_transform, TContext = unknown>(params: RequestParams.TransformDeleteTransform, callback: callbackFn): TransportRequestCallback - delete_transform, TContext = unknown>(params: RequestParams.TransformDeleteTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteTransform, TContext = unknown>(params?: RequestParams.TransformDeleteTransform, options?: TransportRequestOptions): TransportRequestPromise> - deleteTransform, TContext = unknown>(callback: callbackFn): TransportRequestCallback - deleteTransform, TContext = unknown>(params: RequestParams.TransformDeleteTransform, callback: callbackFn): TransportRequestCallback - deleteTransform, TContext = unknown>(params: RequestParams.TransformDeleteTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_transform, TContext = unknown>(params?: RequestParams.TransformGetTransform, options?: TransportRequestOptions): TransportRequestPromise> - get_transform, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_transform, TContext = unknown>(params: RequestParams.TransformGetTransform, callback: callbackFn): TransportRequestCallback - get_transform, TContext = unknown>(params: RequestParams.TransformGetTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getTransform, TContext = unknown>(params?: RequestParams.TransformGetTransform, options?: TransportRequestOptions): TransportRequestPromise> - getTransform, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getTransform, TContext = unknown>(params: RequestParams.TransformGetTransform, callback: callbackFn): TransportRequestCallback - getTransform, TContext = unknown>(params: RequestParams.TransformGetTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_transform_stats, TContext = unknown>(params?: RequestParams.TransformGetTransformStats, options?: TransportRequestOptions): TransportRequestPromise> - get_transform_stats, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_transform_stats, TContext = unknown>(params: RequestParams.TransformGetTransformStats, callback: callbackFn): TransportRequestCallback - get_transform_stats, TContext = unknown>(params: RequestParams.TransformGetTransformStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getTransformStats, TContext = unknown>(params?: RequestParams.TransformGetTransformStats, options?: TransportRequestOptions): TransportRequestPromise> - getTransformStats, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getTransformStats, TContext = unknown>(params: RequestParams.TransformGetTransformStats, callback: callbackFn): TransportRequestCallback - getTransformStats, TContext = unknown>(params: RequestParams.TransformGetTransformStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - preview_transform, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.TransformPreviewTransform, options?: TransportRequestOptions): TransportRequestPromise> - preview_transform, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - preview_transform, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.TransformPreviewTransform, callback: callbackFn): TransportRequestCallback - preview_transform, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.TransformPreviewTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - previewTransform, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.TransformPreviewTransform, options?: TransportRequestOptions): TransportRequestPromise> - previewTransform, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - previewTransform, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.TransformPreviewTransform, callback: callbackFn): TransportRequestCallback - previewTransform, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.TransformPreviewTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_transform, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.TransformPutTransform, options?: TransportRequestOptions): TransportRequestPromise> - put_transform, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - put_transform, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.TransformPutTransform, callback: callbackFn): TransportRequestCallback - put_transform, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.TransformPutTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putTransform, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.TransformPutTransform, options?: TransportRequestOptions): TransportRequestPromise> - putTransform, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - putTransform, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.TransformPutTransform, callback: callbackFn): TransportRequestCallback - putTransform, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.TransformPutTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - start_transform, TContext = unknown>(params?: RequestParams.TransformStartTransform, options?: TransportRequestOptions): TransportRequestPromise> - start_transform, TContext = unknown>(callback: callbackFn): TransportRequestCallback - start_transform, TContext = unknown>(params: RequestParams.TransformStartTransform, callback: callbackFn): TransportRequestCallback - start_transform, TContext = unknown>(params: RequestParams.TransformStartTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - startTransform, TContext = unknown>(params?: RequestParams.TransformStartTransform, options?: TransportRequestOptions): TransportRequestPromise> - startTransform, TContext = unknown>(callback: callbackFn): TransportRequestCallback - startTransform, TContext = unknown>(params: RequestParams.TransformStartTransform, callback: callbackFn): TransportRequestCallback - startTransform, TContext = unknown>(params: RequestParams.TransformStartTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stop_transform, TContext = unknown>(params?: RequestParams.TransformStopTransform, options?: TransportRequestOptions): TransportRequestPromise> - stop_transform, TContext = unknown>(callback: callbackFn): TransportRequestCallback - stop_transform, TContext = unknown>(params: RequestParams.TransformStopTransform, callback: callbackFn): TransportRequestCallback - stop_transform, TContext = unknown>(params: RequestParams.TransformStopTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stopTransform, TContext = unknown>(params?: RequestParams.TransformStopTransform, options?: TransportRequestOptions): TransportRequestPromise> - stopTransform, TContext = unknown>(callback: callbackFn): TransportRequestCallback - stopTransform, TContext = unknown>(params: RequestParams.TransformStopTransform, callback: callbackFn): TransportRequestCallback - stopTransform, TContext = unknown>(params: RequestParams.TransformStopTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - update_transform, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.TransformUpdateTransform, options?: TransportRequestOptions): TransportRequestPromise> - update_transform, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - update_transform, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.TransformUpdateTransform, callback: callbackFn): TransportRequestCallback - update_transform, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.TransformUpdateTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - updateTransform, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.TransformUpdateTransform, options?: TransportRequestOptions): TransportRequestPromise> - updateTransform, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - updateTransform, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.TransformUpdateTransform, callback: callbackFn): TransportRequestCallback - updateTransform, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.TransformUpdateTransform, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - update, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.Update, options?: TransportRequestOptions): TransportRequestPromise> - update, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - update, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.Update, callback: callbackFn): TransportRequestCallback - update, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.Update, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - update_by_query, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.UpdateByQuery, options?: TransportRequestOptions): TransportRequestPromise> - update_by_query, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - update_by_query, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.UpdateByQuery, callback: callbackFn): TransportRequestCallback - update_by_query, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.UpdateByQuery, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - updateByQuery, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.UpdateByQuery, options?: TransportRequestOptions): TransportRequestPromise> - updateByQuery, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - updateByQuery, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.UpdateByQuery, callback: callbackFn): TransportRequestCallback - updateByQuery, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.UpdateByQuery, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - update_by_query_rethrottle, TContext = unknown>(params?: RequestParams.UpdateByQueryRethrottle, options?: TransportRequestOptions): TransportRequestPromise> - update_by_query_rethrottle, TContext = unknown>(callback: callbackFn): TransportRequestCallback - update_by_query_rethrottle, TContext = unknown>(params: RequestParams.UpdateByQueryRethrottle, callback: callbackFn): TransportRequestCallback - update_by_query_rethrottle, TContext = unknown>(params: RequestParams.UpdateByQueryRethrottle, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - updateByQueryRethrottle, TContext = unknown>(params?: RequestParams.UpdateByQueryRethrottle, options?: TransportRequestOptions): TransportRequestPromise> - updateByQueryRethrottle, TContext = unknown>(callback: callbackFn): TransportRequestCallback - updateByQueryRethrottle, TContext = unknown>(params: RequestParams.UpdateByQueryRethrottle, callback: callbackFn): TransportRequestCallback - updateByQueryRethrottle, TContext = unknown>(params: RequestParams.UpdateByQueryRethrottle, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - watcher: { - ack_watch, TContext = unknown>(params?: RequestParams.WatcherAckWatch, options?: TransportRequestOptions): TransportRequestPromise> - ack_watch, TContext = unknown>(callback: callbackFn): TransportRequestCallback - ack_watch, TContext = unknown>(params: RequestParams.WatcherAckWatch, callback: callbackFn): TransportRequestCallback - ack_watch, TContext = unknown>(params: RequestParams.WatcherAckWatch, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - ackWatch, TContext = unknown>(params?: RequestParams.WatcherAckWatch, options?: TransportRequestOptions): TransportRequestPromise> - ackWatch, TContext = unknown>(callback: callbackFn): TransportRequestCallback - ackWatch, TContext = unknown>(params: RequestParams.WatcherAckWatch, callback: callbackFn): TransportRequestCallback - ackWatch, TContext = unknown>(params: RequestParams.WatcherAckWatch, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - activate_watch, TContext = unknown>(params?: RequestParams.WatcherActivateWatch, options?: TransportRequestOptions): TransportRequestPromise> - activate_watch, TContext = unknown>(callback: callbackFn): TransportRequestCallback - activate_watch, TContext = unknown>(params: RequestParams.WatcherActivateWatch, callback: callbackFn): TransportRequestCallback - activate_watch, TContext = unknown>(params: RequestParams.WatcherActivateWatch, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - activateWatch, TContext = unknown>(params?: RequestParams.WatcherActivateWatch, options?: TransportRequestOptions): TransportRequestPromise> - activateWatch, TContext = unknown>(callback: callbackFn): TransportRequestCallback - activateWatch, TContext = unknown>(params: RequestParams.WatcherActivateWatch, callback: callbackFn): TransportRequestCallback - activateWatch, TContext = unknown>(params: RequestParams.WatcherActivateWatch, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deactivate_watch, TContext = unknown>(params?: RequestParams.WatcherDeactivateWatch, options?: TransportRequestOptions): TransportRequestPromise> - deactivate_watch, TContext = unknown>(callback: callbackFn): TransportRequestCallback - deactivate_watch, TContext = unknown>(params: RequestParams.WatcherDeactivateWatch, callback: callbackFn): TransportRequestCallback - deactivate_watch, TContext = unknown>(params: RequestParams.WatcherDeactivateWatch, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deactivateWatch, TContext = unknown>(params?: RequestParams.WatcherDeactivateWatch, options?: TransportRequestOptions): TransportRequestPromise> - deactivateWatch, TContext = unknown>(callback: callbackFn): TransportRequestCallback - deactivateWatch, TContext = unknown>(params: RequestParams.WatcherDeactivateWatch, callback: callbackFn): TransportRequestCallback - deactivateWatch, TContext = unknown>(params: RequestParams.WatcherDeactivateWatch, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - delete_watch, TContext = unknown>(params?: RequestParams.WatcherDeleteWatch, options?: TransportRequestOptions): TransportRequestPromise> - delete_watch, TContext = unknown>(callback: callbackFn): TransportRequestCallback - delete_watch, TContext = unknown>(params: RequestParams.WatcherDeleteWatch, callback: callbackFn): TransportRequestCallback - delete_watch, TContext = unknown>(params: RequestParams.WatcherDeleteWatch, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - deleteWatch, TContext = unknown>(params?: RequestParams.WatcherDeleteWatch, options?: TransportRequestOptions): TransportRequestPromise> - deleteWatch, TContext = unknown>(callback: callbackFn): TransportRequestCallback - deleteWatch, TContext = unknown>(params: RequestParams.WatcherDeleteWatch, callback: callbackFn): TransportRequestCallback - deleteWatch, TContext = unknown>(params: RequestParams.WatcherDeleteWatch, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - execute_watch, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.WatcherExecuteWatch, options?: TransportRequestOptions): TransportRequestPromise> - execute_watch, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - execute_watch, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.WatcherExecuteWatch, callback: callbackFn): TransportRequestCallback - execute_watch, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.WatcherExecuteWatch, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - executeWatch, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.WatcherExecuteWatch, options?: TransportRequestOptions): TransportRequestPromise> - executeWatch, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - executeWatch, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.WatcherExecuteWatch, callback: callbackFn): TransportRequestCallback - executeWatch, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.WatcherExecuteWatch, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - get_watch, TContext = unknown>(params?: RequestParams.WatcherGetWatch, options?: TransportRequestOptions): TransportRequestPromise> - get_watch, TContext = unknown>(callback: callbackFn): TransportRequestCallback - get_watch, TContext = unknown>(params: RequestParams.WatcherGetWatch, callback: callbackFn): TransportRequestCallback - get_watch, TContext = unknown>(params: RequestParams.WatcherGetWatch, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - getWatch, TContext = unknown>(params?: RequestParams.WatcherGetWatch, options?: TransportRequestOptions): TransportRequestPromise> - getWatch, TContext = unknown>(callback: callbackFn): TransportRequestCallback - getWatch, TContext = unknown>(params: RequestParams.WatcherGetWatch, callback: callbackFn): TransportRequestCallback - getWatch, TContext = unknown>(params: RequestParams.WatcherGetWatch, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - put_watch, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.WatcherPutWatch, options?: TransportRequestOptions): TransportRequestPromise> - put_watch, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - put_watch, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.WatcherPutWatch, callback: callbackFn): TransportRequestCallback - put_watch, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.WatcherPutWatch, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - putWatch, TRequestBody extends RequestBody = Record, TContext = unknown>(params?: RequestParams.WatcherPutWatch, options?: TransportRequestOptions): TransportRequestPromise> - putWatch, TRequestBody extends RequestBody = Record, TContext = unknown>(callback: callbackFn): TransportRequestCallback - putWatch, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.WatcherPutWatch, callback: callbackFn): TransportRequestCallback - putWatch, TRequestBody extends RequestBody = Record, TContext = unknown>(params: RequestParams.WatcherPutWatch, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - start, TContext = unknown>(params?: RequestParams.WatcherStart, options?: TransportRequestOptions): TransportRequestPromise> - start, TContext = unknown>(callback: callbackFn): TransportRequestCallback - start, TContext = unknown>(params: RequestParams.WatcherStart, callback: callbackFn): TransportRequestCallback - start, TContext = unknown>(params: RequestParams.WatcherStart, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stats, TContext = unknown>(params?: RequestParams.WatcherStats, options?: TransportRequestOptions): TransportRequestPromise> - stats, TContext = unknown>(callback: callbackFn): TransportRequestCallback - stats, TContext = unknown>(params: RequestParams.WatcherStats, callback: callbackFn): TransportRequestCallback - stats, TContext = unknown>(params: RequestParams.WatcherStats, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - stop, TContext = unknown>(params?: RequestParams.WatcherStop, options?: TransportRequestOptions): TransportRequestPromise> - stop, TContext = unknown>(callback: callbackFn): TransportRequestCallback - stop, TContext = unknown>(params: RequestParams.WatcherStop, callback: callbackFn): TransportRequestCallback - stop, TContext = unknown>(params: RequestParams.WatcherStop, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - xpack: { - info, TContext = unknown>(params?: RequestParams.XpackInfo, options?: TransportRequestOptions): TransportRequestPromise> - info, TContext = unknown>(callback: callbackFn): TransportRequestCallback - info, TContext = unknown>(params: RequestParams.XpackInfo, callback: callbackFn): TransportRequestCallback - info, TContext = unknown>(params: RequestParams.XpackInfo, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - usage, TContext = unknown>(params?: RequestParams.XpackUsage, options?: TransportRequestOptions): TransportRequestPromise> - usage, TContext = unknown>(callback: callbackFn): TransportRequestCallback - usage, TContext = unknown>(params: RequestParams.XpackUsage, callback: callbackFn): TransportRequestCallback - usage, TContext = unknown>(params: RequestParams.XpackUsage, options: TransportRequestOptions, callback: callbackFn): TransportRequestCallback - } - /* /GENERATED */ -} - -// We must redeclare the EventEmitter class so we can provide -// better type definitions for our events, otherwise the default -// signature is `(event: string | symbol, listener: (...args: any[]) => void): this;` -declare class EventEmitter { - addListener(event: string | symbol, listener: (...args: any[]) => void): this; - on(event: 'request', listener: (err: ApiError, meta: RequestEvent) => void): this; - on(event: 'response', listener: (err: ApiError, meta: RequestEvent) => void): this; - on(event: 'sniff', listener: (err: ApiError, meta: RequestEvent) => void): this; - on(event: 'resurrect', listener: (err: null, meta: ResurrectEvent) => void): this; - once(event: 'request', listener: (err: ApiError, meta: RequestEvent) => void): this; - once(event: 'response', listener: (err: ApiError, meta: RequestEvent) => void): this; - once(event: 'sniff', listener: (err: ApiError, meta: RequestEvent) => void): this; - once(event: 'resurrect', listener: (err: null, meta: ResurrectEvent) => void): this; - removeListener(event: string | symbol, listener: (...args: any[]) => void): this; - off(event: string | symbol, listener: (...args: any[]) => void): this; - removeAllListeners(event?: string | symbol): this; - setMaxListeners(n: number): this; - getMaxListeners(): number; - listeners(event: string | symbol): Function[]; - rawListeners(event: string | symbol): Function[]; - emit(event: string | symbol, ...args: any[]): boolean; - listenerCount(type: string | symbol): number; - // Added in Node 6... - prependListener(event: string | symbol, listener: (...args: any[]) => void): this; - prependOnceListener(event: string | symbol, listener: (...args: any[]) => void): this; - eventNames(): Array; -} - -declare const events: { - RESPONSE: string; - REQUEST: string; - SNIFF: string; - RESURRECT: string; -}; - -export { - Client, - Transport, - ConnectionPool, - BaseConnectionPool, - CloudConnectionPool, - Connection, - Serializer, - events, - errors, - ApiError, - ApiResponse, - RequestEvent, - ResurrectEvent, - RequestParams, - ClientOptions, - NodeOptions, - ClientExtendsCallbackOptions -}; +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +import Client from './lib/client' +import SniffingTransport from './lib/sniffingTransport' + +export * from '@elastic/transport' +export * as estypes from './lib/api/types' +export { Client, SniffingTransport } +export type { ClientOptions, NodeOptions } from './lib/client' +export * as helpers from './lib/helpers' diff --git a/index.js b/index.js index 07a737fc6..eb12ae5f4 100644 --- a/index.js +++ b/index.js @@ -1,270 +1,41 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ 'use strict' -const nodeMajor = Number(process.versions.node.split('.')[0]) - -const { EventEmitter } = require('events') -const { URL } = require('url') -const debug = require('debug')('elasticsearch') -const Transport = require('./lib/Transport') -const Connection = require('./lib/Connection') -const { ConnectionPool, CloudConnectionPool } = require('./lib/pool') -// Helpers works only in Node.js >= 10 -const Helpers = nodeMajor < 10 ? null : require('./lib/Helpers') -const Serializer = require('./lib/Serializer') -const errors = require('./lib/errors') -const { ConfigurationError } = errors - -const kInitialOptions = Symbol('elasticsearchjs-initial-options') -const kChild = Symbol('elasticsearchjs-child') -const kExtensions = Symbol('elasticsearchjs-extensions') - -const buildApi = require('./api') - -class Client extends EventEmitter { - constructor (opts = {}) { - super() - if (opts.cloud) { - const { id, username, password } = opts.cloud - // the cloud id is `cluster-name:base64encodedurl` - // the url is a string divided by two '$', the first is the cloud url - // the second the elasticsearch instance, the third the kibana instance - const cloudUrls = Buffer.from(id.split(':')[1], 'base64').toString().split('$') - - // TODO: remove username and password here in 8 - if (username && password) { - opts.auth = Object.assign({}, opts.auth, { username, password }) - } - opts.node = `https://${cloudUrls[1]}.${cloudUrls[0]}` - - // Cloud has better performances with compression enabled - // see https://github.com/elastic/elasticsearch-py/pull/704. - // So unless the user specifies otherwise, we enable compression. - if (opts.compression == null) opts.compression = 'gzip' - if (opts.suggestCompression == null) opts.suggestCompression = true - if (opts.ssl == null || - (opts.ssl && opts.ssl.secureProtocol == null)) { - opts.ssl = opts.ssl || {} - opts.ssl.secureProtocol = 'TLSv1_2_method' - } - } - - if (!opts.node && !opts.nodes) { - throw new ConfigurationError('Missing node(s) option') - } - - const checkAuth = getAuth(opts.node || opts.nodes) - if (checkAuth && checkAuth.username && checkAuth.password) { - opts.auth = Object.assign({}, opts.auth, { username: checkAuth.username, password: checkAuth.password }) - } - - const options = Object.assign({}, { - Connection, - Transport, - Serializer, - ConnectionPool: opts.cloud ? CloudConnectionPool : ConnectionPool, - maxRetries: 3, - requestTimeout: 30000, - pingTimeout: 3000, - sniffInterval: false, - sniffOnStart: false, - sniffEndpoint: '_nodes/_all/http', - sniffOnConnectionFault: false, - resurrectStrategy: 'ping', - suggestCompression: false, - compression: false, - ssl: null, - agent: null, - headers: {}, - nodeFilter: null, - nodeSelector: 'round-robin', - generateRequestId: null, - name: 'elasticsearch-js', - auth: null, - opaqueIdPrefix: null - }, opts) - - this[kInitialOptions] = options - this[kExtensions] = [] - - this.name = options.name - this.serializer = new options.Serializer() - this.connectionPool = new options.ConnectionPool({ - pingTimeout: options.pingTimeout, - resurrectStrategy: options.resurrectStrategy, - ssl: options.ssl, - agent: options.agent, - Connection: options.Connection, - auth: options.auth, - emit: this.emit.bind(this), - sniffEnabled: options.sniffInterval !== false || - options.sniffOnStart !== false || - options.sniffOnConnectionFault !== false - }) - - // Add the connections before initialize the Transport - if (opts[kChild] !== true) { - this.connectionPool.addConnection(options.node || options.nodes) - } - - this.transport = new options.Transport({ - emit: this.emit.bind(this), - connectionPool: this.connectionPool, - serializer: this.serializer, - maxRetries: options.maxRetries, - requestTimeout: options.requestTimeout, - sniffInterval: options.sniffInterval, - sniffOnStart: options.sniffOnStart, - sniffOnConnectionFault: options.sniffOnConnectionFault, - sniffEndpoint: options.sniffEndpoint, - suggestCompression: options.suggestCompression, - compression: options.compression, - headers: options.headers, - nodeFilter: options.nodeFilter, - nodeSelector: options.nodeSelector, - generateRequestId: options.generateRequestId, - name: options.name, - opaqueIdPrefix: options.opaqueIdPrefix - }) - - if (Helpers !== null) { - this.helpers = new Helpers({ client: this, maxRetries: options.maxRetries }) - } - - const apis = buildApi({ - makeRequest: this.transport.request.bind(this.transport), - result: { body: null, statusCode: null, headers: null, warnings: null }, - ConfigurationError - }) - - Object.keys(apis).forEach(api => { - this[api] = apis[api] - }) - } - - extend (name, opts, fn) { - if (typeof opts === 'function') { - fn = opts - opts = {} - } - - var [namespace, method] = name.split('.') - if (method == null) { - method = namespace - namespace = null - } - - if (namespace != null) { - if (this[namespace] != null && this[namespace][method] != null && opts.force !== true) { - throw new Error(`The method "${method}" already exists on namespace "${namespace}"`) - } - - this[namespace] = this[namespace] || {} - this[namespace][method] = fn({ - makeRequest: this.transport.request.bind(this.transport), - result: { body: null, statusCode: null, headers: null, warnings: null }, - ConfigurationError - }) - } else { - if (this[method] != null && opts.force !== true) { - throw new Error(`The method "${method}" already exists`) - } - - this[method] = fn({ - makeRequest: this.transport.request.bind(this.transport), - result: { body: null, statusCode: null, headers: null, warnings: null }, - ConfigurationError - }) - } - - this[kExtensions].push({ name, opts, fn }) - } - - child (opts) { - // Merge the new options with the initial ones - const initialOptions = Object.assign({}, this[kInitialOptions], opts) - // Tell to the client that we are creating a child client - initialOptions[kChild] = true - - const client = new Client(initialOptions) - // Reuse the same connection pool - client.connectionPool = this.connectionPool - client.transport.connectionPool = this.connectionPool - // Share event listener - const emitter = this.emit.bind(this) - client.emit = emitter - client.connectionPool.emit = emitter - client.transport.emit = emitter - client.on = this.on.bind(this) - // Add parent extensions - this[kExtensions].forEach(({ name, opts, fn }) => { - client.extend(name, opts, fn) - }) - return client - } - - close (callback) { - if (callback == null) { - return new Promise((resolve, reject) => { - this.close(resolve) - }) - } - debug('Closing the client') - this.connectionPool.empty(callback) - } -} - -function getAuth (node) { - if (Array.isArray(node)) { - for (const url of node) { - const auth = getUsernameAndPassword(url) - if (auth.username !== '' && auth.password !== '') { - return auth - } - } - - return null - } - - const auth = getUsernameAndPassword(node) - if (auth.username !== '' && auth.password !== '') { - return auth - } - - return null - - function getUsernameAndPassword (node) { - if (typeof node === 'string') { - const { username, password } = new URL(node) - return { - username: decodeURIComponent(username), - password: decodeURIComponent(password) - } - } else if (node.url instanceof URL) { - return { - username: decodeURIComponent(node.url.username), - password: decodeURIComponent(node.url.password) - } - } - } -} +const { + Diagnostic, + Transport, + WeightedConnectionPool, + ClusterConnectionPool, + BaseConnectionPool, + CloudConnectionPool, + BaseConnection, + HttpConnection, + UndiciConnection, + Serializer, + errors, + events +} = require('@elastic/transport') -const events = { - RESPONSE: 'response', - REQUEST: 'request', - SNIFF: 'sniff', - RESURRECT: 'resurrect' -} +const { default: Client } = require('./lib/client') +const { default: SniffingTransport } = require('./lib/sniffingTransport') module.exports = { Client, + SniffingTransport, + Diagnostic, Transport, - ConnectionPool, - Connection, + WeightedConnectionPool, + ClusterConnectionPool, + BaseConnectionPool, + CloudConnectionPool, + BaseConnection, + HttpConnection, + UndiciConnection, Serializer, - events, - errors + errors, + events } diff --git a/lib/Connection.d.ts b/lib/Connection.d.ts deleted file mode 100644 index dda06aec3..000000000 --- a/lib/Connection.d.ts +++ /dev/null @@ -1,80 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -/// - -import { URL } from 'url'; -import { inspect, InspectOptions } from 'util' -import { Readable as ReadableStream } from 'stream'; -import { ApiKeyAuth, BasicAuth } from './pool' -import * as http from 'http' -import { ConnectionOptions as TlsConnectionOptions } from 'tls' - -export declare type agentFn = () => any; - -interface ConnectionOptions { - url: URL; - ssl?: TlsConnectionOptions; - id?: string; - headers?: Record; - agent?: AgentOptions | agentFn; - status?: string; - roles?: ConnectionRoles; - auth?: BasicAuth | ApiKeyAuth; -} - -interface ConnectionRoles { - master?: boolean - data?: boolean - ingest?: boolean - ml?: boolean -} - -interface RequestOptions extends http.ClientRequestArgs { - asStream?: boolean; - body?: string | Buffer | ReadableStream; - querystring?: string; -} - -export interface AgentOptions { - keepAlive?: boolean; - keepAliveMsecs?: number; - maxSockets?: number; - maxFreeSockets?: number; -} - -export default class Connection { - static statuses: { - ALIVE: string; - DEAD: string; - }; - static roles: { - MASTER: string; - DATA: string; - INGEST: string; - ML: string; - }; - url: URL - ssl: TlsConnectionOptions | null - id: string - headers: Record - status: string - roles: ConnectionRoles - deadCount: number - resurrectTimeout: number - makeRequest: any - _openRequests: number - _status: string - _agent: http.Agent - constructor(opts?: ConnectionOptions) - request(params: RequestOptions, callback: (err: Error | null, response: http.IncomingMessage | null) => void): http.ClientRequest - close(): Connection - setRole(role: string, enabled: boolean): Connection - buildRequestObject(params: any): http.ClientRequestArgs - // @ts-ignore - [inspect.custom](object: any, options: InspectOptions): string - toJSON(): any -} - -export {}; diff --git a/lib/Connection.js b/lib/Connection.js deleted file mode 100644 index bfce0fa15..000000000 --- a/lib/Connection.js +++ /dev/null @@ -1,316 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -const assert = require('assert') -const { inspect } = require('util') -const http = require('http') -const https = require('https') -const debug = require('debug')('elasticsearch') -const decompressResponse = require('decompress-response') -const pump = require('pump') -const INVALID_PATH_REGEX = /[^\u0021-\u00ff]/ -const { - ConnectionError, - RequestAbortedError, - TimeoutError, - ConfigurationError -} = require('./errors') - -class Connection { - constructor (opts = {}) { - this.url = opts.url - this.ssl = opts.ssl || null - this.id = opts.id || stripAuth(opts.url.href) - this.headers = prepareHeaders(opts.headers, opts.auth) - this.deadCount = 0 - this.resurrectTimeout = 0 - - this._openRequests = 0 - this._status = opts.status || Connection.statuses.ALIVE - this.roles = Object.assign({}, defaultRoles, opts.roles) - - if (!['http:', 'https:'].includes(this.url.protocol)) { - throw new ConfigurationError(`Invalid protocol: '${this.url.protocol}'`) - } - - if (typeof opts.agent === 'function') { - this.agent = opts.agent() - } else { - const keepAliveFalse = opts.agent && opts.agent.keepAlive === false - const agentOptions = Object.assign({}, { - keepAlive: true, - keepAliveMsecs: 1000, - maxSockets: keepAliveFalse ? Infinity : 256, - maxFreeSockets: 256 - }, opts.agent) - this.agent = this.url.protocol === 'http:' - ? new http.Agent(agentOptions) - : new https.Agent(Object.assign({}, agentOptions, this.ssl)) - } - - this.makeRequest = this.url.protocol === 'http:' - ? http.request - : https.request - } - - request (params, callback) { - this._openRequests++ - var ended = false - - const requestParams = this.buildRequestObject(params) - // https://github.com/nodejs/node/commit/b961d9fd83 - if (INVALID_PATH_REGEX.test(requestParams.path) === true) { - callback(new TypeError(`ERR_UNESCAPED_CHARACTERS: ${requestParams.path}`), null) - return { abort: () => {} } - } - - debug('Starting a new request', params) - const request = this.makeRequest(requestParams) - - // listen for the response event - // TODO: handle redirects? - request.on('response', response => { - if (ended === false) { - ended = true - this._openRequests-- - - if (params.asStream === true) { - callback(null, response) - } else { - callback(null, decompressResponse(response)) - } - } - }) - - // handles request timeout - request.on('timeout', () => { - if (ended === false) { - ended = true - this._openRequests-- - request.abort() - callback(new TimeoutError('Request timed out', params), null) - } - }) - - // handles request error - request.on('error', err => { - if (ended === false) { - ended = true - this._openRequests-- - callback(new ConnectionError(err.message), null) - } - }) - - // updates the ended state - request.on('abort', () => { - debug('Request aborted', params) - if (ended === false) { - ended = true - this._openRequests-- - callback(new RequestAbortedError(), null) - } - }) - - // Disables the Nagle algorithm - request.setNoDelay(true) - - // starts the request - if (isStream(params.body) === true) { - pump(params.body, request, err => { - /* istanbul ignore if */ - if (err != null && ended === false) { - ended = true - this._openRequests-- - callback(err, null) - } - }) - } else { - request.end(params.body) - } - - return request - } - - // TODO: write a better closing logic - close (callback = () => {}) { - debug('Closing connection', this.id) - if (this._openRequests > 0) { - setTimeout(() => this.close(callback), 1000) - } else { - this.agent.destroy() - callback() - } - } - - setRole (role, enabled) { - if (validRoles.indexOf(role) === -1) { - throw new ConfigurationError(`Unsupported role: '${role}'`) - } - if (typeof enabled !== 'boolean') { - throw new ConfigurationError('enabled should be a boolean') - } - - this.roles[role] = enabled - return this - } - - get status () { - return this._status - } - - set status (status) { - assert( - ~validStatuses.indexOf(status), - `Unsupported status: '${status}'` - ) - this._status = status - } - - buildRequestObject (params) { - const url = this.url - const request = { - protocol: url.protocol, - hostname: url.hostname[0] === '[' - ? url.hostname.slice(1, -1) - : url.hostname, - hash: url.hash, - search: url.search, - pathname: url.pathname, - path: '', - href: url.href, - origin: url.origin, - // https://github.com/elastic/elasticsearch-js/issues/843 - port: url.port !== '' ? url.port : undefined, - headers: this.headers, - agent: this.agent - } - - const paramsKeys = Object.keys(params) - for (var i = 0, len = paramsKeys.length; i < len; i++) { - var key = paramsKeys[i] - if (key === 'path') { - request.pathname = resolve(request.pathname, params[key]) - } else if (key === 'querystring' && !!params[key] === true) { - if (request.search === '') { - request.search = '?' + params[key] - } else { - request.search += '&' + params[key] - } - } else if (key === 'headers') { - request.headers = Object.assign({}, request.headers, params.headers) - } else { - request[key] = params[key] - } - } - - request.path = request.pathname + request.search - - return request - } - - // Handles console.log and utils.inspect invocations. - // We want to hide `auth`, `agent` and `ssl` since they made - // the logs very hard to read. The user can still - // access them with `instance.agent` and `instance.ssl`. - [inspect.custom] (depth, options) { - const { - authorization, - ...headers - } = this.headers - - return { - url: stripAuth(this.url.toString()), - id: this.id, - headers, - deadCount: this.deadCount, - resurrectTimeout: this.resurrectTimeout, - _openRequests: this._openRequests, - status: this.status, - roles: this.roles - } - } - - toJSON () { - const { - authorization, - ...headers - } = this.headers - - return { - url: stripAuth(this.url.toString()), - id: this.id, - headers, - deadCount: this.deadCount, - resurrectTimeout: this.resurrectTimeout, - _openRequests: this._openRequests, - status: this.status, - roles: this.roles - } - } -} - -Connection.statuses = { - ALIVE: 'alive', - DEAD: 'dead' -} - -Connection.roles = { - MASTER: 'master', - DATA: 'data', - INGEST: 'ingest', - ML: 'ml' -} - -const defaultRoles = { - [Connection.roles.MASTER]: true, - [Connection.roles.DATA]: true, - [Connection.roles.INGEST]: true, - [Connection.roles.ML]: false -} - -const validStatuses = Object.keys(Connection.statuses) - .map(k => Connection.statuses[k]) -const validRoles = Object.keys(Connection.roles) - .map(k => Connection.roles[k]) - -function stripAuth (url) { - if (url.indexOf('@') === -1) return url - return url.slice(0, url.indexOf('//') + 2) + url.slice(url.indexOf('@') + 1) -} - -function isStream (obj) { - return obj != null && typeof obj.pipe === 'function' -} - -function resolve (host, path) { - const hostEndWithSlash = host[host.length - 1] === '/' - const pathStartsWithSlash = path[0] === '/' - - if (hostEndWithSlash === true && pathStartsWithSlash === true) { - return host + path.slice(1) - } else if (hostEndWithSlash !== pathStartsWithSlash) { - return host + path - } else { - return host + '/' + path - } -} - -function prepareHeaders (headers = {}, auth) { - if (auth != null && headers.authorization == null) { - if (auth.apiKey) { - if (typeof auth.apiKey === 'object') { - headers.authorization = 'ApiKey ' + Buffer.from(`${auth.apiKey.id}:${auth.apiKey.api_key}`).toString('base64') - } else { - headers.authorization = `ApiKey ${auth.apiKey}` - } - } else if (auth.username && auth.password) { - headers.authorization = 'Basic ' + Buffer.from(`${auth.username}:${auth.password}`).toString('base64') - } - } - return headers -} - -module.exports = Connection diff --git a/lib/Helpers.d.ts b/lib/Helpers.d.ts deleted file mode 100644 index e31839644..000000000 --- a/lib/Helpers.d.ts +++ /dev/null @@ -1,107 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -import { Readable as ReadableStream } from 'stream' -import { TransportRequestOptions, ApiError, ApiResponse, RequestBody } from './Transport' -import { Search, Msearch, Bulk } from '../api/requestParams' - -export default class Helpers { - search>(params: Search, options?: TransportRequestOptions): Promise - scrollSearch, TRequestBody extends RequestBody = Record, TContext = unknown>(params: Search, options?: TransportRequestOptions): AsyncIterable> - scrollDocuments>(params: Search, options?: TransportRequestOptions): AsyncIterable - msearch(options?: MsearchHelperOptions): MsearchHelper - bulk(options: BulkHelperOptions): BulkHelper -} - -export interface ScrollSearchResponse, TContext = unknown> extends ApiResponse { - clear: () => Promise - documents: TDocument[] -} - -export interface BulkHelper extends Promise { - abort: () => BulkHelper -} - -export interface BulkStats { - total: number - failed: number - retry: number - successful: number - time: number - bytes: number - aborted: boolean -} - -interface IndexAction { - index: { - _index: string - [key: string]: any - } -} - -interface CreateAction { - create: { - _index: string - [key: string]: any - } -} - -interface UpdateActionOperation { - update: { - _index: string - [key: string]: any - } -} - -interface DeleteAction { - delete: { - _index: string - [key: string]: any - } -} - -type UpdateAction = [UpdateActionOperation, Record] -type Action = IndexAction | CreateAction | UpdateAction | DeleteAction -type Omit = Pick> - -export interface BulkHelperOptions extends Omit { - datasource: TDocument[] | Buffer | ReadableStream | AsyncIterator - onDocument: (doc: TDocument) => Action - flushBytes?: number - flushInterval?: number - concurrency?: number - retries?: number - wait?: number - onDrop?: (doc: OnDropDocument) => void - refreshOnCompletion?: boolean | string -} - -export interface OnDropDocument { - status: number - error: { - type: string, - reason: string, - caused_by: { - type: string, - reason: string - } - } - document: TDocument - retried: boolean -} - -export interface MsearchHelperOptions extends Omit { - operations?: number - flushInterval?: number - concurrency?: number - retries?: number - wait?: number -} - -declare type callbackFn = (err: ApiError, result: ApiResponse) => void; -export interface MsearchHelper extends Promise { - stop(error?: Error): void - search, TRequestBody extends RequestBody = Record, TContext = unknown>(header: Omit, body: TRequestBody): Promise> - search, TRequestBody extends RequestBody = Record, TContext = unknown>(header: Omit, body: TRequestBody, callback: callbackFn): void -} diff --git a/lib/Helpers.js b/lib/Helpers.js deleted file mode 100644 index a8f33c6c0..000000000 --- a/lib/Helpers.js +++ /dev/null @@ -1,721 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ - -const { Readable } = require('stream') -const { promisify } = require('util') -const { ResponseError, ConfigurationError } = require('./errors') - -const pImmediate = promisify(setImmediate) -const sleep = promisify(setTimeout) -const kClient = Symbol('elasticsearch-client') -const noop = () => {} - -class Helpers { - constructor (opts) { - this[kClient] = opts.client - this.maxRetries = opts.maxRetries - } - - /** - * Runs a search operation. The only difference between client.search and this utility, - * is that we are only returning the hits to the user and not the full ES response. - * This helper automatically adds `filter_path=hits.hits._source` to the querystring, - * as it will only need the documents source. - * @param {object} params - The Elasticsearch's search parameters. - * @param {object} options - The client optional configuration for this request. - * @return {array} The documents that matched the request. - */ - async search (params, options) { - appendFilterPath('hits.hits._source', params, true) - const { body } = await this[kClient].search(params, options) - if (body.hits && body.hits.hits) { - return body.hits.hits.map(d => d._source) - } - return [] - } - - /** - * Runs a scroll search operation. This function returns an async iterator, allowing - * the user to use a for await loop to get all the results of a given search. - * ```js - * for await (const result of client.helpers.scrollSearch({ params })) { - * console.log(result) - * } - * ``` - * Each result represents the entire body of a single scroll search request, - * if you just need to scroll the results, use scrollDocuments. - * This function handles automatically retries on 429 status code. - * @param {object} params - The Elasticsearch's search parameters. - * @param {object} options - The client optional configuration for this request. - * @return {iterator} the async iterator - */ - async * scrollSearch (params, options = {}) { - // TODO: study scroll search slices - const wait = options.wait || 5000 - const maxRetries = options.maxRetries || this.maxRetries - if (Array.isArray(options.ignore)) { - options.ignore.push(429) - } else { - options.ignore = [429] - } - params.scroll = params.scroll || '1m' - appendFilterPath('_scroll_id', params, false) - const { method, body, index, ...querystring } = params - - let response = null - for (let i = 0; i < maxRetries; i++) { - response = await this[kClient].search(params, options) - if (response.statusCode !== 429) break - await sleep(wait) - } - if (response.statusCode === 429) { - throw new ResponseError(response) - } - - let scroll_id = response.body._scroll_id - let stop = false - const clear = async () => { - stop = true - await this[kClient].clearScroll( - { body: { scroll_id } }, - { ignore: [400] } - ) - } - - while (response.body.hits && response.body.hits.hits.length > 0) { - scroll_id = response.body._scroll_id - response.clear = clear - addDocumentsGetter(response) - - yield response - - if (!scroll_id || stop === true) { - break - } - - for (let i = 0; i < maxRetries; i++) { - response = await this[kClient].scroll({ - ...querystring, - body: { scroll_id } - }, options) - if (response.statusCode !== 429) break - await sleep(wait) - } - if (response.statusCode === 429) { - throw new ResponseError(response) - } - } - } - - /** - * Runs a scroll search operation. This function returns an async iterator, allowing - * the user to use a for await loop to get all the documents of a given search. - * ```js - * for await (const document of client.helpers.scrollSearch({ params })) { - * console.log(document) - * } - * ``` - * Each document is what you will find by running a scrollSearch and iterating on the hits array. - * This helper automatically adds `filter_path=hits.hits._source` to the querystring, - * as it will only need the documents source. - * @param {object} params - The Elasticsearch's search parameters. - * @param {object} options - The client optional configuration for this request. - * @return {iterator} the async iterator - */ - async * scrollDocuments (params, options) { - appendFilterPath('hits.hits._source', params, true) - for await (const { documents } of this.scrollSearch(params)) { - for (const document of documents) { - yield document - } - } - } - - /** - * Creates a msearch helper instance. Once you configure it, you can use the provided - * `search` method to add new searches in the queue. - * @param {object} options - The configuration of the msearch operations. - * @return {object} The possible operations to run. - */ - msearch (options = {}) { - const client = this[kClient] - const { - operations = 5, - concurrency = 5, - flushInterval = 500, - retries = this.maxRetries, - wait = 5000, - ...msearchOptions - } = options - - let stopReading = false - let stopError = null - let timeoutId = null - const operationsStream = new Readable({ - objectMode: true, - read (size) {} - }) - - const p = iterate() - const helper = { - then (onFulfilled, onRejected) { - return p.then(onFulfilled, onRejected) - }, - catch (onRejected) { - return p.catch(onRejected) - }, - stop (error = null) { - if (stopReading === true) return - stopReading = true - stopError = error - operationsStream.push(null) - }, - // TODO: support abort a single search? - // NOTE: the validation checks are synchronous and the callback/promise will - // be resolved in the same tick. We might want to fix this in the future. - search (header, body, callback) { - if (stopReading === true) { - const error = stopError === null - ? new ConfigurationError('The msearch processor has been stopped') - : stopError - return callback ? callback(error, {}) : Promise.reject(error) - } - - if (!(typeof header === 'object' && header !== null && !Array.isArray(header))) { - const error = new ConfigurationError('The header should be an object') - return callback ? callback(error, {}) : Promise.reject(error) - } - - if (!(typeof body === 'object' && body !== null && !Array.isArray(body))) { - const error = new ConfigurationError('The body should be an object') - return callback ? callback(error, {}) : Promise.reject(error) - } - - let promise = null - if (callback === undefined) { - let onFulfilled = null - let onRejected = null - promise = new Promise((resolve, reject) => { - onFulfilled = resolve - onRejected = reject - }) - callback = function callback (err, result) { - err ? onRejected(err) : onFulfilled(result) - } - } - - operationsStream.push([header, body, callback]) - - if (promise !== null) { - return promise - } - } - } - - return helper - - async function iterate () { - const { semaphore, finish } = buildSemaphore() - const msearchBody = [] - const callbacks = [] - let loadedOperations = 0 - - for await (const operation of operationsStream) { - clearTimeout(timeoutId) - loadedOperations += 1 - msearchBody.push(operation[0], operation[1]) - callbacks.push(operation[2]) - if (loadedOperations >= operations) { - const send = await semaphore() - send(msearchBody.slice(), callbacks.slice()) - msearchBody.length = 0 - callbacks.length = 0 - loadedOperations = 0 - } else { - timeoutId = setTimeout(onFlushTimeout, flushInterval) - } - } - - clearTimeout(timeoutId) - // In some cases the previos http call does not have finished, - // or we didn't reach the flush bytes threshold, so we force one last operation. - if (loadedOperations > 0) { - const send = await semaphore() - send(msearchBody, callbacks) - } - - await finish() - - if (stopError !== null) { - throw stopError - } - - async function onFlushTimeout () { - const msearchBodyCopy = msearchBody.slice() - const callbacksCopy = callbacks.slice() - msearchBody.length = 0 - callbacks.length = 0 - loadedOperations = 0 - try { - const send = await semaphore() - send(msearchBodyCopy, callbacksCopy) - } catch (err) { - /* istanbul ignore next */ - helper.stop(err) - } - } - } - - // This function builds a semaphore using the concurrency - // options of the msearch helper. It is used inside the iterator - // to guarantee that no more than the number of operations - // allowed to run at the same time are executed. - // It returns a semaphore function which resolves in the next tick - // if we didn't reach the maximim concurrency yet, otherwise it returns - // a promise that resolves as soon as one of the running request has finshed. - // The semaphore function resolves a send function, which will be used - // to send the actual msearch request. - // It also returns a finish function, which returns a promise that is resolved - // when there are no longer request running. - function buildSemaphore () { - let resolveSemaphore = null - let resolveFinish = null - let running = 0 - - return { semaphore, finish } - - function finish () { - return new Promise((resolve, reject) => { - if (running === 0) { - resolve() - } else { - resolveFinish = resolve - } - }) - } - - function semaphore () { - if (running < concurrency) { - return pImmediate(send) - } else { - return new Promise((resolve, reject) => { - resolveSemaphore = resolve - }) - } - } - - function send (msearchBody, callbacks) { - /* istanbul ignore if */ - if (running >= concurrency) { - throw new Error('Max concurrency reached') - } - running += 1 - msearchOperation(msearchBody, callbacks, () => { - running -= 1 - if (resolveSemaphore) { - resolveSemaphore(send) - resolveSemaphore = null - } else if (resolveFinish && running === 0) { - resolveFinish() - } - }) - } - } - - function msearchOperation (msearchBody, callbacks, done) { - let retryCount = retries - - // Instead of going full on async-await, which would make the code easier to read, - // we have decided to use callback style instead. - // This because every time we use async await, V8 will create multiple promises - // behind the scenes, making the code slightly slower. - tryMsearch(msearchBody, callbacks, retrySearch) - function retrySearch (msearchBody, callbacks) { - if (msearchBody.length > 0 && retryCount > 0) { - retryCount -= 1 - setTimeout(tryMsearch, wait, msearchBody, callbacks, retrySearch) - return - } - - done() - } - - // This function never returns an error, if the msearch operation fails, - // the error is dispatched to all search executors. - function tryMsearch (msearchBody, callbacks, done) { - client.msearch(Object.assign({}, msearchOptions, { body: msearchBody }), (err, results) => { - const retryBody = [] - const retryCallbacks = [] - if (err) { - addDocumentsGetter(results) - for (const callback of callbacks) { - callback(err, results) - } - return done(retryBody, retryCallbacks) - } - const { responses } = results.body - for (let i = 0, len = responses.length; i < len; i++) { - const response = responses[i] - if (response.status === 429 && retryCount > 0) { - retryBody.push(msearchBody[i * 2]) - retryBody.push(msearchBody[(i * 2) + 1]) - retryCallbacks.push(callbacks[i]) - continue - } - const result = { ...results, body: response } - addDocumentsGetter(result) - if (response.status >= 400) { - callbacks[i](new ResponseError(result), result) - } else { - callbacks[i](null, result) - } - } - done(retryBody, retryCallbacks) - }) - } - } - } - - /** - * Creates a bulk helper instance. Once you configure it, you can pick which operation - * to execute with the given dataset, index, create, update, and delete. - * @param {object} options - The configuration of the bulk operation. - * @return {object} The possible operations to run with the datasource. - */ - bulk (options) { - const client = this[kClient] - const { serialize, deserialize } = client.serializer - const { - datasource, - onDocument, - flushBytes = 5000000, - flushInterval = 30000, - concurrency = 5, - retries = this.maxRetries, - wait = 5000, - onDrop = noop, - refreshOnCompletion = false, - ...bulkOptions - } = options - - if (datasource === undefined) { - return Promise.reject(new ConfigurationError('bulk helper: the datasource is required')) - } - if (!(Array.isArray(datasource) || Buffer.isBuffer(datasource) || typeof datasource.pipe === 'function' || datasource[Symbol.asyncIterator])) { - return Promise.reject(new ConfigurationError('bulk helper: the datasource must be an array or a buffer or a readable stream or an async generator')) - } - if (onDocument === undefined) { - return Promise.reject(new ConfigurationError('bulk helper: the onDocument callback is required')) - } - - let shouldAbort = false - let timeoutId = null - const stats = { - total: 0, - failed: 0, - retry: 0, - successful: 0, - time: 0, - bytes: 0, - aborted: false - } - - const p = iterate() - const helper = { - then (onFulfilled, onRejected) { - return p.then(onFulfilled, onRejected) - }, - catch (onRejected) { - return p.catch(onRejected) - }, - abort () { - clearTimeout(timeoutId) - shouldAbort = true - stats.aborted = true - return this - } - } - - return helper - - /** - * Function that iterates over the given datasource and start a bulk operation as soon - * as it reaches the configured bulk size. It's designed to use the Node.js asynchronous - * model at this maximum capacity, as it will collect the next body to send while there is - * a running http call. In this way, the CPU time will be used carefully. - * The objects will be serialized right away, to approximate the byte length of the body. - * It creates an array of strings instead of a ndjson string because the bulkOperation - * will navigate the body for matching failed operations with the original document. - */ - async function iterate () { - const { semaphore, finish } = buildSemaphore() - const startTime = Date.now() - const bulkBody = [] - let actionBody = '' - let payloadBody = '' - let chunkBytes = 0 - - for await (const chunk of datasource) { - if (shouldAbort === true) break - clearTimeout(timeoutId) - const action = onDocument(chunk) - const operation = Array.isArray(action) - ? Object.keys(action[0])[0] - : Object.keys(action)[0] - if (operation === 'index' || operation === 'create') { - actionBody = serialize(action) - payloadBody = typeof chunk === 'string' ? chunk : serialize(chunk) - chunkBytes += Buffer.byteLength(actionBody) + Buffer.byteLength(payloadBody) - bulkBody.push(actionBody, payloadBody) - } else if (operation === 'update') { - actionBody = serialize(action[0]) - payloadBody = typeof chunk === 'string' - ? `{doc:${chunk}}` - : serialize({ doc: chunk, ...action[1] }) - chunkBytes += Buffer.byteLength(actionBody) + Buffer.byteLength(payloadBody) - bulkBody.push(actionBody, payloadBody) - } else if (operation === 'delete') { - actionBody = serialize(action) - chunkBytes += Buffer.byteLength(actionBody) - bulkBody.push(actionBody) - } else { - throw new ConfigurationError(`Bulk helper invalid action: '${operation}'`) - } - - if (chunkBytes >= flushBytes) { - stats.bytes += chunkBytes - const send = await semaphore() - send(bulkBody.slice()) - bulkBody.length = 0 - chunkBytes = 0 - } else { - timeoutId = setTimeout(onFlushTimeout, flushInterval) - } - } - - clearTimeout(timeoutId) - // In some cases the previos http call does not have finished, - // or we didn't reach the flush bytes threshold, so we force one last operation. - if (shouldAbort === false && chunkBytes > 0) { - const send = await semaphore() - stats.bytes += chunkBytes - send(bulkBody) - } - - await finish() - - if (refreshOnCompletion) { - await client.indices.refresh({ - index: typeof refreshOnCompletion === 'string' - ? refreshOnCompletion - : '_all' - }) - } - - stats.time = Date.now() - startTime - stats.total = stats.successful + stats.failed - - return stats - - async function onFlushTimeout () { - stats.bytes += chunkBytes - const bulkBodyCopy = bulkBody.slice() - bulkBody.length = 0 - chunkBytes = 0 - try { - const send = await semaphore() - send(bulkBodyCopy) - } catch (err) { - /* istanbul ignore next */ - helper.abort() - } - } - } - - // This function builds a semaphore using the concurrency - // options of the bulk helper. It is used inside the iterator - // to guarantee that no more than the number of operations - // allowed to run at the same time are executed. - // It returns a semaphore function which resolves in the next tick - // if we didn't reach the maximim concurrency yet, otherwise it returns - // a promise that resolves as soon as one of the running request has finshed. - // The semaphore function resolves a send function, which will be used - // to send the actual bulk request. - // It also returns a finish function, which returns a promise that is resolved - // when there are no longer request running. It rejects an error if one - // of the request has failed for some reason. - function buildSemaphore () { - let resolveSemaphore = null - let resolveFinish = null - let rejectFinish = null - let error = null - let running = 0 - - return { semaphore, finish } - - function finish () { - return new Promise((resolve, reject) => { - if (running === 0) { - if (error) { - reject(error) - } else { - resolve() - } - } else { - resolveFinish = resolve - rejectFinish = reject - } - }) - } - - function semaphore () { - if (running < concurrency) { - return pImmediate(send) - } else { - return new Promise((resolve, reject) => { - resolveSemaphore = resolve - }) - } - } - - function send (bulkBody) { - /* istanbul ignore if */ - if (running >= concurrency) { - throw new Error('Max concurrency reached') - } - running += 1 - bulkOperation(bulkBody, err => { - running -= 1 - if (err) { - shouldAbort = true - error = err - } - if (resolveSemaphore) { - resolveSemaphore(send) - resolveSemaphore = null - } else if (resolveFinish && running === 0) { - if (error) { - rejectFinish(error) - } else { - resolveFinish() - } - } - }) - } - } - - function bulkOperation (bulkBody, callback) { - let retryCount = retries - let isRetrying = false - - // Instead of going full on async-await, which would make the code easier to read, - // we have decided to use callback style instead. - // This because every time we use async await, V8 will create multiple promises - // behind the scenes, making the code slightly slower. - tryBulk(bulkBody, retryDocuments) - function retryDocuments (err, bulkBody) { - if (err) return callback(err) - if (shouldAbort === true) return callback() - - if (bulkBody.length > 0) { - if (retryCount > 0) { - isRetrying = true - retryCount -= 1 - stats.retry += bulkBody.length - setTimeout(tryBulk, wait, bulkBody, retryDocuments) - return - } - for (let i = 0, len = bulkBody.length; i < len; i = i + 2) { - const operation = Object.keys(deserialize(bulkBody[i]))[0] - onDrop({ - status: 429, - error: null, - operation: deserialize(bulkBody[i]), - document: operation !== 'delete' - ? deserialize(bulkBody[i + 1]) - : null, - retried: isRetrying - }) - stats.failed += 1 - } - } - callback() - } - - function tryBulk (bulkBody, callback) { - if (shouldAbort === true) return callback(null, []) - client.bulk(Object.assign({}, bulkOptions, { body: bulkBody }), (err, { body }) => { - if (err) return callback(err, null) - if (body.errors === false) { - stats.successful += body.items.length - return callback(null, []) - } - const retry = [] - const { items } = body - for (let i = 0, len = items.length; i < len; i++) { - const action = items[i] - const operation = Object.keys(action)[0] - const { status } = action[operation] - const indexSlice = operation !== 'delete' ? i * 2 : i - - if (status >= 400) { - // 429 is the only staus code where we might want to retry - // a document, because it was not an error in the document itself, - // but the ES node were handling too many operations. - if (status === 429) { - retry.push(bulkBody[indexSlice]) - if (operation !== 'delete') { - retry.push(bulkBody[indexSlice + 1]) - } - } else { - onDrop({ - status: status, - error: action[operation].error, - operation: deserialize(bulkBody[indexSlice]), - document: operation !== 'delete' - ? deserialize(bulkBody[indexSlice + 1]) - : null, - retried: isRetrying - }) - stats.failed += 1 - } - } else { - stats.successful += 1 - } - } - callback(null, retry) - }) - } - } - } -} - -// Using a getter will improve the overall performances of the code, -// as we will reed the documents only if needed. -function addDocumentsGetter (result) { - Object.defineProperty(result, 'documents', { - get () { - if (this.body.hits && this.body.hits.hits) { - return this.body.hits.hits.map(d => d._source) - } - return [] - } - }) -} - -function appendFilterPath (filter, params, force) { - if (params.filter_path !== undefined) { - params.filter_path += ',' + filter - } else if (params.filterPath !== undefined) { - params.filterPath += ',' + filter - } else if (force === true) { - params.filter_path = filter - } -} - -module.exports = Helpers diff --git a/lib/Serializer.d.ts b/lib/Serializer.d.ts deleted file mode 100644 index a5a8f8d27..000000000 --- a/lib/Serializer.d.ts +++ /dev/null @@ -1,10 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -export default class Serializer { - serialize(object: any): string; - deserialize(json: string): any; - ndserialize(array: any[]): string; - qserialize(object: any): string; -} diff --git a/lib/Serializer.js b/lib/Serializer.js deleted file mode 100644 index aa9d1065b..000000000 --- a/lib/Serializer.js +++ /dev/null @@ -1,68 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -const { stringify } = require('querystring') -const debug = require('debug')('elasticsearch') -const sjson = require('secure-json-parse') -const { SerializationError, DeserializationError } = require('./errors') - -class Serializer { - serialize (object) { - debug('Serializing', object) - try { - var json = JSON.stringify(object) - } catch (err) { - throw new SerializationError(err.message, object) - } - return json - } - - deserialize (json) { - debug('Deserializing', json) - try { - var object = sjson.parse(json) - } catch (err) { - throw new DeserializationError(err.message, json) - } - return object - } - - ndserialize (array) { - debug('ndserialize', array) - if (Array.isArray(array) === false) { - throw new SerializationError('The argument provided is not an array') - } - var ndjson = '' - for (var i = 0, len = array.length; i < len; i++) { - if (typeof array[i] === 'string') { - ndjson += array[i] + '\n' - } else { - ndjson += this.serialize(array[i]) + '\n' - } - } - return ndjson - } - - qserialize (object) { - debug('qserialize', object) - if (object == null) return '' - if (typeof object === 'string') return object - // arrays should be serialized as comma separated list - const keys = Object.keys(object) - for (var i = 0, len = keys.length; i < len; i++) { - var key = keys[i] - // elasticsearch will complain for keys without a value - if (object[key] === undefined) { - delete object[key] - } else if (Array.isArray(object[key]) === true) { - object[key] = object[key].join(',') - } - } - return stringify(object) - } -} - -module.exports = Serializer diff --git a/lib/Transport.d.ts b/lib/Transport.d.ts deleted file mode 100644 index 00130aa66..000000000 --- a/lib/Transport.d.ts +++ /dev/null @@ -1,143 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -import { Readable as ReadableStream } from 'stream'; -import { ConnectionPool, CloudConnectionPool } from './pool'; -import Connection from './Connection'; -import Serializer from './Serializer'; -import * as errors from './errors'; - -export type ApiError = errors.ConfigurationError | errors.ConnectionError | - errors.DeserializationError | errors.SerializationError | - errors.NoLivingConnectionsError | errors.ResponseError | - errors.TimeoutError | errors.RequestAbortedError - -export interface nodeSelectorFn { - (connections: Connection[]): Connection; -} - -export interface nodeFilterFn { - (connection: Connection): boolean; -} - -export interface generateRequestIdFn { - (params: TransportRequestParams, options: TransportRequestOptions): any; -} - -interface TransportOptions { - emit: (event: string | symbol, ...args: any[]) => boolean; - connectionPool: ConnectionPool | CloudConnectionPool; - serializer: Serializer; - maxRetries: number; - requestTimeout: number | string; - suggestCompression?: boolean; - compression?: 'gzip'; - sniffInterval?: number; - sniffOnConnectionFault?: boolean; - sniffEndpoint: string; - sniffOnStart?: boolean; - nodeFilter?: nodeFilterFn; - nodeSelector?: string | nodeSelectorFn; - headers?: Record; - generateRequestId?: generateRequestIdFn; - name?: string; - opaqueIdPrefix?: string; -} - -export interface RequestEvent, TContext = unknown> { - body: TResponse; - statusCode: number | null; - headers: Record | null; - warnings: string[] | null; - meta: { - context: TContext; - name: string; - request: { - params: TransportRequestParams; - options: TransportRequestOptions; - id: any; - }; - connection: Connection; - attempts: number; - aborted: boolean; - sniff?: { - hosts: any[]; - reason: string; - }; - }; -} - -// ApiResponse and RequestEvent are the same thing -// we are doing this for have more clear names -export interface ApiResponse, TContext = unknown> extends RequestEvent {} - -export type RequestBody> = T | string | Buffer | ReadableStream -export type RequestNDBody[]> = T | string | string[] | Buffer | ReadableStream - -export interface TransportRequestParams { - method: string; - path: string; - body?: RequestBody; - bulkBody?: RequestNDBody; - querystring?: Record; -} - -export interface TransportRequestOptions { - ignore?: number[]; - requestTimeout?: number | string; - maxRetries?: number; - asStream?: boolean; - headers?: Record; - querystring?: Record; - compression?: 'gzip'; - id?: any; - context?: any; - warnings?: string[]; - opaqueId?: string; -} - -export interface TransportRequestCallback { - abort: () => void; -} - -export interface TransportRequestPromise extends Promise { - abort: () => void; -} - -export interface TransportGetConnectionOptions { - requestId: string; -} - -export interface TransportSniffOptions { - reason: string; - requestId?: string; -} - -export default class Transport { - static sniffReasons: { - SNIFF_ON_START: string; - SNIFF_INTERVAL: string; - SNIFF_ON_CONNECTION_FAULT: string; - DEFAULT: string; - }; - emit: (event: string | symbol, ...args: any[]) => boolean; - connectionPool: ConnectionPool | CloudConnectionPool; - serializer: Serializer; - maxRetries: number; - requestTimeout: number; - suggestCompression: boolean; - compression: 'gzip' | false; - sniffInterval: number; - sniffOnConnectionFault: boolean; - opaqueIdPrefix: string | null; - sniffEndpoint: string; - _sniffEnabled: boolean; - _nextSniff: number; - _isSniffing: boolean; - constructor(opts: TransportOptions); - request(params: TransportRequestParams, options?: TransportRequestOptions): Promise; - request(params: TransportRequestParams, options?: TransportRequestOptions, callback?: (err: ApiError, result: ApiResponse) => void): TransportRequestCallback; - getConnection(opts: TransportGetConnectionOptions): Connection | null; - sniff(opts?: TransportSniffOptions, callback?: (...args: any[]) => void): void; -} diff --git a/lib/Transport.js b/lib/Transport.js deleted file mode 100644 index 0edf78347..000000000 --- a/lib/Transport.js +++ /dev/null @@ -1,473 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -const debug = require('debug')('elasticsearch') -const os = require('os') -const { gzip, createGzip } = require('zlib') -const ms = require('ms') -const { - ConnectionError, - RequestAbortedError, - NoLivingConnectionsError, - ResponseError, - ConfigurationError -} = require('./errors') - -const noop = () => {} - -const clientVersion = require('../package.json').version -const userAgent = `elasticsearch-js/${clientVersion} (${os.platform()} ${os.release()}-${os.arch()}; Node.js ${process.version})` - -class Transport { - constructor (opts = {}) { - if (typeof opts.compression === 'string' && opts.compression !== 'gzip') { - throw new ConfigurationError(`Invalid compression: '${opts.compression}'`) - } - this.emit = opts.emit - this.connectionPool = opts.connectionPool - this.serializer = opts.serializer - this.maxRetries = opts.maxRetries - this.requestTimeout = toMs(opts.requestTimeout) - this.suggestCompression = opts.suggestCompression === true - this.compression = opts.compression || false - this.headers = Object.assign({}, - { 'user-agent': userAgent }, - opts.suggestCompression === true ? { 'accept-encoding': 'gzip,deflate' } : null, - lowerCaseHeaders(opts.headers) - ) - this.sniffInterval = opts.sniffInterval - this.sniffOnConnectionFault = opts.sniffOnConnectionFault - this.sniffEndpoint = opts.sniffEndpoint - this.generateRequestId = opts.generateRequestId || generateRequestId() - this.name = opts.name - this.opaqueIdPrefix = opts.opaqueIdPrefix - - this.nodeFilter = opts.nodeFilter || defaultNodeFilter - if (typeof opts.nodeSelector === 'function') { - this.nodeSelector = opts.nodeSelector - } else if (opts.nodeSelector === 'round-robin') { - this.nodeSelector = roundRobinSelector() - } else if (opts.nodeSelector === 'random') { - /* istanbul ignore next */ - this.nodeSelector = randomSelector - } else { - this.nodeSelector = roundRobinSelector() - } - - this._sniffEnabled = typeof this.sniffInterval === 'number' - this._nextSniff = this._sniffEnabled ? (Date.now() + this.sniffInterval) : 0 - this._isSniffing = false - - if (opts.sniffOnStart === true) { - this.sniff({ reason: Transport.sniffReasons.SNIFF_ON_START }) - } - } - - request (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - var p = null - - // promises support - if (callback === undefined) { - let onFulfilled = null - let onRejected = null - p = new Promise((resolve, reject) => { - onFulfilled = resolve - onRejected = reject - }) - callback = function callback (err, result) { - err ? onRejected(err) : onFulfilled(result) - } - } - - const meta = { - context: options.context || null, - request: { - params: null, - options: null, - id: options.id || this.generateRequestId(params, options) - }, - name: this.name, - connection: null, - attempts: 0, - aborted: false - } - - const result = { - body: null, - statusCode: null, - headers: null, - warnings: options.warnings || null, - meta - } - - // We should not retry if we are sending a stream body, because we should store in memory - // a copy of the stream to be able to send it again, but since we don't know in advance - // the size of the stream, we risk to take too much memory. - // Furthermore, copying everytime the stream is very a expensive operation. - const maxRetries = isStream(params.body) ? 0 : options.maxRetries || this.maxRetries - const compression = options.compression !== undefined ? options.compression : this.compression - var request = { abort: noop } - - const makeRequest = () => { - if (meta.aborted === true) { - return callback(new RequestAbortedError(), result) - } - meta.connection = this.getConnection({ requestId: meta.request.id }) - if (meta.connection == null) { - return callback(new NoLivingConnectionsError(), result) - } - this.emit('request', null, result) - // perform the actual http request - request = meta.connection.request(params, onResponse) - } - - const onResponse = (err, response) => { - if (err !== null) { - if (err.name !== 'RequestAbortedError') { - // if there is an error in the connection - // let's mark the connection as dead - this.connectionPool.markDead(meta.connection) - - if (this.sniffOnConnectionFault === true) { - this.sniff({ - reason: Transport.sniffReasons.SNIFF_ON_CONNECTION_FAULT, - requestId: meta.request.id - }) - } - - // retry logic - if (meta.attempts < maxRetries) { - meta.attempts++ - debug(`Retrying request, there are still ${maxRetries - meta.attempts} attempts`, params) - makeRequest() - return - } - } - - err.meta = result - this.emit('response', err, result) - return callback(err, result) - } - - const { statusCode, headers } = response - result.statusCode = statusCode - result.headers = headers - if (headers['warning'] !== undefined) { - result.warnings = result.warnings || [] - // split the string over the commas not inside quotes - result.warnings.push.apply(result.warnings, headers['warning'].split(/(?!\B"[^"]*),(?![^"]*"\B)/)) - } - - if (options.asStream === true) { - result.body = response - this.emit('response', null, result) - callback(null, result) - return - } - - var payload = '' - // collect the payload - response.setEncoding('utf8') - response.on('data', chunk => { payload += chunk }) - /* istanbul ignore next */ - response.on('error', err => { - const error = new ConnectionError(err.message, result) - this.emit('response', error, result) - callback(error, result) - }) - response.on('end', () => { - const isHead = params.method === 'HEAD' - // we should attempt the payload deserialization only if: - // - a `content-type` is defined and is equal to `application/json` - // - the request is not a HEAD request - // - the payload is not an empty string - if (headers['content-type'] !== undefined && - headers['content-type'].indexOf('application/json') > -1 && - isHead === false && - payload !== '' - ) { - try { - result.body = this.serializer.deserialize(payload) - } catch (err) { - this.emit('response', err, result) - return callback(err, result) - } - } else { - // cast to boolean if the request method was HEAD - result.body = isHead === true ? true : payload - } - - // we should ignore the statusCode if the user has configured the `ignore` field with - // the statusCode we just got or if the request method is HEAD and the statusCode is 404 - const ignoreStatusCode = (Array.isArray(options.ignore) && options.ignore.indexOf(statusCode) > -1) || - (isHead === true && statusCode === 404) - - if (ignoreStatusCode === false && - (statusCode === 502 || statusCode === 503 || statusCode === 504)) { - // if the statusCode is 502/3/4 we should run our retry strategy - // and mark the connection as dead - this.connectionPool.markDead(meta.connection) - // retry logic (we shoukd not retry on "429 - Too Many Requests") - if (meta.attempts < maxRetries && statusCode !== 429) { - meta.attempts++ - debug(`Retrying request, there are still ${maxRetries - meta.attempts} attempts`, params) - makeRequest() - return - } - } else { - // everything has worked as expected, let's mark - // the connection as alive (or confirm it) - this.connectionPool.markAlive(meta.connection) - } - - if (ignoreStatusCode === false && statusCode >= 400) { - const error = new ResponseError(result) - this.emit('response', error, result) - callback(error, result) - } else { - // cast to boolean if the request method was HEAD - if (isHead === true && statusCode === 404) { - result.body = false - } - this.emit('response', null, result) - callback(null, result) - } - }) - } - - const headers = Object.assign({}, this.headers, lowerCaseHeaders(options.headers)) - - if (options.opaqueId !== undefined) { - headers['x-opaque-id'] = this.opaqueIdPrefix !== null - ? this.opaqueIdPrefix + options.opaqueId - : options.opaqueId - } - - // handle json body - if (params.body != null) { - if (shouldSerialize(params.body) === true) { - try { - params.body = this.serializer.serialize(params.body) - } catch (err) { - return callback(err, result) - } - } - - if (params.body !== '') { - headers['content-type'] = headers['content-type'] || 'application/json' - } - - // handle ndjson body - } else if (params.bulkBody != null) { - if (shouldSerialize(params.bulkBody) === true) { - try { - params.body = this.serializer.ndserialize(params.bulkBody) - } catch (err) { - return callback(err, result) - } - } else { - params.body = params.bulkBody - } - if (params.body !== '') { - headers['content-type'] = headers['content-type'] || 'application/x-ndjson' - } - } - - params.headers = headers - // serializes the querystring - if (options.querystring == null) { - params.querystring = this.serializer.qserialize(params.querystring) - } else { - params.querystring = this.serializer.qserialize( - Object.assign({}, params.querystring, options.querystring) - ) - } - - // handles request timeout - params.timeout = toMs(options.requestTimeout || this.requestTimeout) - if (options.asStream === true) params.asStream = true - meta.request.params = params - meta.request.options = options - - // handle compression - if (params.body !== '' && params.body != null) { - if (isStream(params.body) === true) { - if (compression === 'gzip') { - params.headers['content-encoding'] = compression - params.body = params.body.pipe(createGzip()) - } - makeRequest() - } else if (compression === 'gzip') { - gzip(params.body, (err, buffer) => { - /* istanbul ignore next */ - if (err) { - return callback(err, result) - } - params.headers['content-encoding'] = compression - params.headers['content-length'] = '' + Buffer.byteLength(buffer) - params.body = buffer - makeRequest() - }) - } else { - params.headers['content-length'] = '' + Buffer.byteLength(params.body) - makeRequest() - } - } else { - makeRequest() - } - - return { - then (onFulfilled, onRejected) { - return p.then(onFulfilled, onRejected) - }, - catch (onRejected) { - return p.catch(onRejected) - }, - abort () { - meta.aborted = true - request.abort() - debug('Aborting request', params) - return this - } - } - } - - getConnection (opts) { - const now = Date.now() - if (this._sniffEnabled === true && now > this._nextSniff) { - this.sniff({ reason: Transport.sniffReasons.SNIFF_INTERVAL, requestId: opts.requestId }) - } - return this.connectionPool.getConnection({ - filter: this.nodeFilter, - selector: this.nodeSelector, - requestId: opts.requestId, - name: this.name, - now - }) - } - - sniff (opts, callback = noop) { - if (this._isSniffing === true) return - this._isSniffing = true - debug('Started sniffing request') - - if (typeof opts === 'function') { - callback = opts - opts = { reason: Transport.sniffReasons.DEFAULT } - } - - const { reason } = opts - - const request = { - method: 'GET', - path: this.sniffEndpoint - } - - this.request(request, { id: opts.requestId }, (err, result) => { - this._isSniffing = false - if (this._sniffEnabled === true) { - this._nextSniff = Date.now() + this.sniffInterval - } - - if (err != null) { - debug('Sniffing errored', err) - result.meta.sniff = { hosts: [], reason } - this.emit('sniff', err, result) - return callback(err) - } - - debug('Sniffing ended successfully', result.body) - const protocol = result.meta.connection.url.protocol || 'http:' - const hosts = this.connectionPool.nodesToHost(result.body.nodes, protocol) - this.connectionPool.update(hosts) - - result.meta.sniff = { hosts, reason } - this.emit('sniff', null, result) - callback(null, hosts) - }) - } -} - -Transport.sniffReasons = { - SNIFF_ON_START: 'sniff-on-start', - SNIFF_INTERVAL: 'sniff-interval', - SNIFF_ON_CONNECTION_FAULT: 'sniff-on-connection-fault', - // TODO: find a better name - DEFAULT: 'default' -} - -function toMs (time) { - if (typeof time === 'string') { - return ms(time) - } - return time -} - -function shouldSerialize (obj) { - return typeof obj !== 'string' && - typeof obj.pipe !== 'function' && - Buffer.isBuffer(obj) === false -} - -function isStream (obj) { - return obj != null && typeof obj.pipe === 'function' -} - -function defaultNodeFilter (node) { - // avoid master only nodes - if (node.roles.master === true && - node.roles.data === false && - node.roles.ingest === false) { - return false - } - return true -} - -function roundRobinSelector () { - var current = -1 - return function _roundRobinSelector (connections) { - if (++current >= connections.length) { - current = 0 - } - return connections[current] - } -} - -function randomSelector (connections) { - const index = Math.floor(Math.random() * connections.length) - return connections[index] -} - -function generateRequestId () { - var maxInt = 2147483647 - var nextReqId = 0 - return function genReqId (params, options) { - return (nextReqId = (nextReqId + 1) & maxInt) - } -} - -function lowerCaseHeaders (oldHeaders) { - if (oldHeaders == null) return oldHeaders - const newHeaders = {} - for (const header in oldHeaders) { - newHeaders[header.toLowerCase()] = oldHeaders[header] - } - return newHeaders -} - -module.exports = Transport -module.exports.internals = { - defaultNodeFilter, - roundRobinSelector, - randomSelector, - generateRequestId, - lowerCaseHeaders -} diff --git a/lib/errors.d.ts b/lib/errors.d.ts deleted file mode 100644 index 1540ab25a..000000000 --- a/lib/errors.d.ts +++ /dev/null @@ -1,68 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -import { ApiResponse } from './Transport' - -export declare class ElasticsearchClientError extends Error { - name: string; - message: string; -} - -export declare class TimeoutError, TContext = unknown> extends ElasticsearchClientError { - name: string; - message: string; - meta: ApiResponse; - constructor(message: string, meta: ApiResponse); -} - -export declare class ConnectionError, TContext = unknown> extends ElasticsearchClientError { - name: string; - message: string; - meta: ApiResponse; - constructor(message: string, meta: ApiResponse); -} - -export declare class NoLivingConnectionsError, TContext = unknown> extends ElasticsearchClientError { - name: string; - message: string; - meta: ApiResponse; - constructor(message: string, meta: ApiResponse); -} - -export declare class SerializationError extends ElasticsearchClientError { - name: string; - message: string; - data: any; - constructor(message: string, data: any); -} - -export declare class DeserializationError extends ElasticsearchClientError { - name: string; - message: string; - data: string; - constructor(message: string, data: string); -} - -export declare class ConfigurationError extends ElasticsearchClientError { - name: string; - message: string; - constructor(message: string); -} - -export declare class ResponseError, TContext = unknown> extends ElasticsearchClientError { - name: string; - message: string; - meta: ApiResponse; - body: TResponse; - statusCode: number; - headers: Record; - constructor(meta: ApiResponse); -} - -export declare class RequestAbortedError, TContext = unknown> extends ElasticsearchClientError { - name: string; - message: string; - meta: ApiResponse; - constructor(message: string, meta: ApiResponse); -} \ No newline at end of file diff --git a/lib/errors.js b/lib/errors.js deleted file mode 100644 index b96aa811e..000000000 --- a/lib/errors.js +++ /dev/null @@ -1,118 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -class ElasticsearchClientError extends Error { - constructor (message) { - super(message) - this.name = 'ElasticsearchClientError' - } -} - -class TimeoutError extends ElasticsearchClientError { - constructor (message, meta) { - super(message) - Error.captureStackTrace(this, TimeoutError) - this.name = 'TimeoutError' - this.message = message || 'Timeout Error' - this.meta = meta - } -} - -class ConnectionError extends ElasticsearchClientError { - constructor (message, meta) { - super(message) - Error.captureStackTrace(this, ConnectionError) - this.name = 'ConnectionError' - this.message = message || 'Connection Error' - this.meta = meta - } -} - -class NoLivingConnectionsError extends ElasticsearchClientError { - constructor (message, meta) { - super(message) - Error.captureStackTrace(this, NoLivingConnectionsError) - this.name = 'NoLivingConnectionsError' - this.message = message || 'Given the configuration, the ConnectionPool was not able to find a usable Connection for this request.' - this.meta = meta - } -} - -class SerializationError extends ElasticsearchClientError { - constructor (message, data) { - super(message, data) - Error.captureStackTrace(this, SerializationError) - this.name = 'SerializationError' - this.message = message || 'Serialization Error' - this.data = data - } -} - -class DeserializationError extends ElasticsearchClientError { - constructor (message, data) { - super(message, data) - Error.captureStackTrace(this, DeserializationError) - this.name = 'DeserializationError' - this.message = message || 'Deserialization Error' - this.data = data - } -} - -class ConfigurationError extends ElasticsearchClientError { - constructor (message) { - super(message) - Error.captureStackTrace(this, ConfigurationError) - this.name = 'ConfigurationError' - this.message = message || 'Configuration Error' - } -} - -class ResponseError extends ElasticsearchClientError { - constructor (meta) { - super('Response Error') - Error.captureStackTrace(this, ResponseError) - this.name = 'ResponseError' - this.message = (meta.body && meta.body.error && meta.body.error.type) || 'Response Error' - this.meta = meta - } - - get body () { - return this.meta.body - } - - get statusCode () { - if (this.meta.body && typeof this.meta.body.status === 'number') { - return this.meta.body.status - } - return this.meta.statusCode - } - - get headers () { - return this.meta.headers - } -} - -class RequestAbortedError extends ElasticsearchClientError { - constructor (message, meta) { - super(message) - Error.captureStackTrace(this, RequestAbortedError) - this.name = 'RequestAbortedError' - this.message = message || 'Request aborted' - this.meta = meta - } -} - -module.exports = { - ElasticsearchClientError, - TimeoutError, - ConnectionError, - NoLivingConnectionsError, - SerializationError, - DeserializationError, - ConfigurationError, - ResponseError, - RequestAbortedError -} diff --git a/lib/pool/BaseConnectionPool.js b/lib/pool/BaseConnectionPool.js deleted file mode 100644 index c08d5be06..000000000 --- a/lib/pool/BaseConnectionPool.js +++ /dev/null @@ -1,239 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -const { URL } = require('url') -const debug = require('debug')('elasticsearch') -const Connection = require('../Connection') -const noop = () => {} - -class BaseConnectionPool { - constructor (opts) { - // list of nodes and weights - this.connections = [] - // how many nodes we have in our scheduler - this.size = this.connections.length - this.Connection = opts.Connection - this.emit = opts.emit || noop - this.auth = opts.auth || null - this._ssl = opts.ssl - this._agent = opts.agent - } - - getConnection () { - throw new Error('getConnection must be implemented') - } - - markAlive () { - return this - } - - markDead () { - return this - } - - /** - * Creates a new connection instance. - */ - createConnection (opts) { - if (typeof opts === 'string') { - opts = this.urlToHost(opts) - } - - if (this.auth !== null) { - opts.auth = this.auth - } else if (opts.url.username !== '' && opts.url.password !== '') { - opts.auth = { - username: decodeURIComponent(opts.url.username), - password: decodeURIComponent(opts.url.password) - } - } - - if (opts.ssl == null) opts.ssl = this._ssl - if (opts.agent == null) opts.agent = this._agent - - const connection = new this.Connection(opts) - - for (const conn of this.connections) { - if (conn.id === connection.id) { - throw new Error(`Connection with id '${connection.id}' is already present`) - } - } - - return connection - } - - /** - * Adds a new connection to the pool. - * - * @param {object|string} host - * @returns {ConnectionPool} - */ - addConnection (opts) { - if (Array.isArray(opts)) { - return opts.forEach(o => this.addConnection(o)) - } - - if (typeof opts === 'string') { - opts = this.urlToHost(opts) - } - - const connectionById = this.connections.find(c => c.id === opts.id) - const connectionByUrl = this.connections.find(c => c.id === opts.url.href) - - if (connectionById || connectionByUrl) { - throw new Error(`Connection with id '${opts.id || opts.url.href}' is already present`) - } - - this.update([...this.connections, opts]) - return this.connections[this.size - 1] - } - - /** - * Removes a new connection to the pool. - * - * @param {object} connection - * @returns {ConnectionPool} - */ - removeConnection (connection) { - debug('Removing connection', connection) - return this.update(this.connections.filter(c => c.id !== connection.id)) - } - - /** - * Empties the connection pool. - * - * @returns {ConnectionPool} - */ - empty (callback) { - debug('Emptying the connection pool') - var openConnections = this.size - this.connections.forEach(connection => { - connection.close(() => { - if (--openConnections === 0) { - this.connections = [] - this.size = this.connections.length - callback() - } - }) - }) - } - - /** - * Update the ConnectionPool with new connections. - * - * @param {array} array of connections - * @returns {ConnectionPool} - */ - update (nodes) { - debug('Updating the connection pool') - const newConnections = [] - const oldConnections = [] - - for (const node of nodes) { - // if we already have a given connection in the pool - // we mark it as alive and we do not close the connection - // to avoid socket issues - const connectionById = this.connections.find(c => c.id === node.id) - const connectionByUrl = this.connections.find(c => c.id === node.url.href) - if (connectionById) { - debug(`The connection with id '${node.id}' is already present`) - this.markAlive(connectionById) - newConnections.push(connectionById) - // in case the user has passed a single url (or an array of urls), - // the connection id will be the full href; to avoid closing valid connections - // because are not present in the pool, we check also the node url, - // and if is already present we update its id with the ES provided one. - } else if (connectionByUrl) { - connectionByUrl.id = node.id - this.markAlive(connectionByUrl) - newConnections.push(connectionByUrl) - } else { - newConnections.push(this.createConnection(node)) - } - } - - const ids = nodes.map(c => c.id) - // remove all the dead connections and old connections - for (const connection of this.connections) { - if (ids.indexOf(connection.id) === -1) { - oldConnections.push(connection) - } - } - - // close old connections - oldConnections.forEach(connection => connection.close()) - - this.connections = newConnections - this.size = this.connections.length - - return this - } - - /** - * Transforms the nodes objects to a host object. - * - * @param {object} nodes - * @returns {array} hosts - */ - nodesToHost (nodes, protocol) { - const ids = Object.keys(nodes) - const hosts = [] - - for (var i = 0, len = ids.length; i < len; i++) { - const node = nodes[ids[i]] - // If there is no protocol in - // the `publish_address` new URL will throw - // the publish_address can have two forms: - // - ip:port - // - hostname/ip:port - // if we encounter the second case, we should - // use the hostname instead of the ip - var address = node.http.publish_address - const parts = address.split('/') - // the url is in the form of hostname/ip:port - if (parts.length > 1) { - const hostname = parts[0] - const port = parts[1].match(/((?::))(?:[0-9]+)$/g)[0].slice(1) - address = `${hostname}:${port}` - } - - address = address.slice(0, 4) === 'http' - ? address - : `${protocol}//${address}` - const roles = node.roles.reduce((acc, role) => { - acc[role] = true - return acc - }, {}) - - hosts.push({ - url: new URL(address), - id: ids[i], - roles: Object.assign({ - [Connection.roles.MASTER]: false, - [Connection.roles.DATA]: false, - [Connection.roles.INGEST]: false, - [Connection.roles.ML]: false - }, roles) - }) - } - - return hosts - } - - /** - * Transforms an url string to a host object - * - * @param {string} url - * @returns {object} host - */ - urlToHost (url) { - return { - url: new URL(url) - } - } -} - -module.exports = BaseConnectionPool diff --git a/lib/pool/CloudConnectionPool.js b/lib/pool/CloudConnectionPool.js deleted file mode 100644 index 0ff5a4da2..000000000 --- a/lib/pool/CloudConnectionPool.js +++ /dev/null @@ -1,49 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -const BaseConnectionPool = require('./BaseConnectionPool') - -class CloudConnectionPool extends BaseConnectionPool { - constructor (opts = {}) { - super(opts) - this.cloudConnection = null - } - - /** - * Returns the only cloud connection. - * - * @returns {object} connection - */ - getConnection () { - return this.cloudConnection - } - - /** - * Empties the connection pool. - * - * @returns {ConnectionPool} - */ - empty (callback) { - super.empty(() => { - this.cloudConnection = null - callback() - }) - } - - /** - * Update the ConnectionPool with new connections. - * - * @param {array} array of connections - * @returns {ConnectionPool} - */ - update (connections) { - super.update(connections) - this.cloudConnection = this.connections[0] - return this - } -} - -module.exports = CloudConnectionPool diff --git a/lib/pool/ConnectionPool.js b/lib/pool/ConnectionPool.js deleted file mode 100644 index 143fd75c7..000000000 --- a/lib/pool/ConnectionPool.js +++ /dev/null @@ -1,232 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -const BaseConnectionPool = require('./BaseConnectionPool') -const assert = require('assert') -const debug = require('debug')('elasticsearch') -const Connection = require('../Connection') -const noop = () => {} - -class ConnectionPool extends BaseConnectionPool { - constructor (opts = {}) { - super(opts) - - this.dead = [] - // the resurrect timeout is 60s - this.resurrectTimeout = 1000 * 60 - // number of consecutive failures after which - // the timeout doesn't increase - this.resurrectTimeoutCutoff = 5 - this.pingTimeout = opts.pingTimeout - this._sniffEnabled = opts.sniffEnabled || false - - const resurrectStrategy = opts.resurrectStrategy || 'ping' - this.resurrectStrategy = ConnectionPool.resurrectStrategies[resurrectStrategy] - assert( - this.resurrectStrategy != null, - `Invalid resurrection strategy: '${resurrectStrategy}'` - ) - } - - /** - * Marks a connection as 'alive'. - * If needed removes the connection from the dead list - * and then resets the `deadCount`. - * - * @param {object} connection - */ - markAlive (connection) { - const { id } = connection - debug(`Marking as 'alive' connection '${id}'`) - const index = this.dead.indexOf(id) - if (index > -1) this.dead.splice(index, 1) - connection.status = Connection.statuses.ALIVE - connection.deadCount = 0 - connection.resurrectTimeout = 0 - return this - } - - /** - * Marks a connection as 'dead'. - * If needed adds the connection to the dead list - * and then increments the `deadCount`. - * - * @param {object} connection - */ - markDead (connection) { - const { id } = connection - debug(`Marking as 'dead' connection '${id}'`) - if (this.dead.indexOf(id) === -1) { - // It might happen that `markDead` is called jsut after - // a pool update, and in such case we will add to the dead - // list a node that no longer exist. The following check verify - // that the connection is still part of the pool before - // marking it as dead. - for (var i = 0; i < this.size; i++) { - if (this.connections[i].id === id) { - this.dead.push(id) - break - } - } - } - connection.status = Connection.statuses.DEAD - connection.deadCount++ - // resurrectTimeout formula: - // `resurrectTimeout * 2 ** min(deadCount - 1, resurrectTimeoutCutoff)` - connection.resurrectTimeout = Date.now() + this.resurrectTimeout * Math.pow( - 2, Math.min(connection.deadCount - 1, this.resurrectTimeoutCutoff) - ) - - // sort the dead list in ascending order - // based on the resurrectTimeout - this.dead.sort((a, b) => { - const conn1 = this.connections.find(c => c.id === a) - const conn2 = this.connections.find(c => c.id === b) - return conn1.resurrectTimeout - conn2.resurrectTimeout - }) - - return this - } - - /** - * If enabled, tries to resurrect a connection with the given - * resurrect strategy ('ping', 'optimistic', 'none'). - * - * @param {object} { now, requestId } - * @param {function} callback (isAlive, connection) - */ - resurrect (opts, callback = noop) { - if (this.resurrectStrategy === 0 || this.dead.length === 0) { - debug('Nothing to resurrect') - callback(null, null) - return - } - - // the dead list is sorted in ascending order based on the timeout - // so the first element will always be the one with the smaller timeout - const connection = this.connections.find(c => c.id === this.dead[0]) - if ((opts.now || Date.now()) < connection.resurrectTimeout) { - debug('Nothing to resurrect') - callback(null, null) - return - } - - const { id } = connection - - // ping strategy - if (this.resurrectStrategy === 1) { - connection.request({ - method: 'HEAD', - path: '/', - timeout: this.pingTimeout - }, (err, response) => { - var isAlive = true - const statusCode = response !== null ? response.statusCode : 0 - if (err != null || - (statusCode === 502 || statusCode === 503 || statusCode === 504)) { - debug(`Resurrect: connection '${id}' is still dead`) - this.markDead(connection) - isAlive = false - } else { - debug(`Resurrect: connection '${id}' is now alive`) - this.markAlive(connection) - } - this.emit('resurrect', null, { - strategy: 'ping', - name: opts.name, - request: { id: opts.requestId }, - isAlive, - connection - }) - callback(isAlive, connection) - }) - // optimistic strategy - } else { - debug(`Resurrect: optimistic resurrection for connection '${id}'`) - this.dead.splice(this.dead.indexOf(id), 1) - connection.status = Connection.statuses.ALIVE - this.emit('resurrect', null, { - strategy: 'optimistic', - name: opts.name, - request: { id: opts.requestId }, - isAlive: true, - connection - }) - // eslint-disable-next-line standard/no-callback-literal - callback(true, connection) - } - } - - /** - * Returns an alive connection if present, - * otherwise returns a dead connection. - * By default it filters the `master` only nodes. - * It uses the selector to choose which - * connection return. - * - * @param {object} options (filter and selector) - * @returns {object|null} connection - */ - getConnection (opts = {}) { - const filter = opts.filter || (() => true) - const selector = opts.selector || (c => c[0]) - - this.resurrect({ - now: opts.now, - requestId: opts.requestId, - name: opts.name - }) - - const noAliveConnections = this.size === this.dead.length - - // TODO: can we cache this? - const connections = [] - for (var i = 0; i < this.size; i++) { - const connection = this.connections[i] - if (noAliveConnections || connection.status === Connection.statuses.ALIVE) { - if (filter(connection) === true) { - connections.push(connection) - } - } - } - - if (connections.length === 0) return null - - return selector(connections) - } - - /** - * Empties the connection pool. - * - * @returns {ConnectionPool} - */ - empty (callback) { - super.empty(() => { - this.dead = [] - callback() - }) - } - - /** - * Update the ConnectionPool with new connections. - * - * @param {array} array of connections - * @returns {ConnectionPool} - */ - update (connections) { - super.update(connections) - this.dead = [] - return this - } -} - -ConnectionPool.resurrectStrategies = { - none: 0, - ping: 1, - optimistic: 2 -} - -module.exports = ConnectionPool diff --git a/lib/pool/index.d.ts b/lib/pool/index.d.ts deleted file mode 100644 index bbad18710..000000000 --- a/lib/pool/index.d.ts +++ /dev/null @@ -1,197 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -/// - -import { URL } from 'url' -import { SecureContextOptions } from 'tls'; -import Connection, { AgentOptions } from '../Connection'; -import { nodeFilterFn, nodeSelectorFn } from '../Transport'; - -interface BaseConnectionPoolOptions { - ssl?: SecureContextOptions; - agent?: AgentOptions; - auth?: BasicAuth | ApiKeyAuth; - emit: (event: string | symbol, ...args: any[]) => boolean; - Connection: typeof Connection; -} - -interface ConnectionPoolOptions extends BaseConnectionPoolOptions { - pingTimeout?: number; - resurrectStrategy?: 'ping' | 'optimistic' | 'none'; - sniffEnabled?: boolean; -} - -interface getConnectionOptions { - filter?: nodeFilterFn; - selector?: nodeSelectorFn; - requestId?: string | number; - name?: string; - now?: number; -} - -interface ApiKeyAuth { - apiKey: - | string - | { - id: string; - api_key: string; - } -} - -interface BasicAuth { - username: string; - password: string; -} - -interface resurrectOptions { - now?: number; - requestId: string; - name: string; -} - -interface ResurrectEvent { - strategy: string; - isAlive: boolean; - connection: Connection; - name: string; - request: { - id: any; - }; -} - - -declare class BaseConnectionPool { - connections: Connection[]; - size: number; - emit: (event: string | symbol, ...args: any[]) => boolean; - _ssl: SecureContextOptions | null; - _agent: AgentOptions | null; - auth: BasicAuth | ApiKeyAuth; - Connection: typeof Connection; - constructor(opts?: BaseConnectionPoolOptions); - /** - * Marks a connection as 'alive'. - * If needed removes the connection from the dead list - * and then resets the `deadCount`. - * - * @param {object} connection - */ - markAlive(connection: Connection): this; - /** - * Marks a connection as 'dead'. - * If needed adds the connection to the dead list - * and then increments the `deadCount`. - * - * @param {object} connection - */ - markDead(connection: Connection): this; - /** - * Returns an alive connection if present, - * otherwise returns a dead connection. - * By default it filters the `master` only nodes. - * It uses the selector to choose which - * connection return. - * - * @param {object} options (filter and selector) - * @returns {object|null} connection - */ - getConnection(opts?: getConnectionOptions): Connection | null; - /** - * Adds a new connection to the pool. - * - * @param {object|string} host - * @returns {ConnectionPool} - */ - addConnection(opts: any): Connection; - /** - * Removes a new connection to the pool. - * - * @param {object} connection - * @returns {ConnectionPool} - */ - removeConnection(connection: Connection): this; - /** - * Empties the connection pool. - * - * @returns {ConnectionPool} - */ - empty(): this; - /** - * Update the ConnectionPool with new connections. - * - * @param {array} array of connections - * @returns {ConnectionPool} - */ - update(connections: any[]): this; - /** - * Transforms the nodes objects to a host object. - * - * @param {object} nodes - * @returns {array} hosts - */ - nodesToHost(nodes: any, protocol: string): any[]; - /** - * Transforms an url string to a host object - * - * @param {string} url - * @returns {object} host - */ - urlToHost(url: string): { url: URL }; -} - -declare class ConnectionPool extends BaseConnectionPool { - static resurrectStrategies: { - none: number; - ping: number; - optimistic: number; - }; - dead: string[]; - _sniffEnabled: boolean; - resurrectTimeout: number; - resurrectTimeoutCutoff: number; - pingTimeout: number; - resurrectStrategy: number; - constructor(opts?: ConnectionPoolOptions); - - /** - * If enabled, tries to resurrect a connection with the given - * resurrect strategy ('ping', 'optimistic', 'none'). - * - * @param {object} { now, requestId, name } - * @param {function} callback (isAlive, connection) - */ - resurrect(opts: resurrectOptions, callback?: (isAlive: boolean | null, connection: Connection | null) => void): void; -} - -declare class CloudConnectionPool extends BaseConnectionPool { - cloudConnection: Connection | null - constructor(opts?: BaseConnectionPoolOptions); - getConnection(): Connection | null; -} - -declare function defaultNodeFilter(node: Connection): boolean; -declare function roundRobinSelector(): (connections: Connection[]) => Connection; -declare function randomSelector(connections: Connection[]): Connection; - -declare const internals: { - defaultNodeFilter: typeof defaultNodeFilter; - roundRobinSelector: typeof roundRobinSelector; - randomSelector: typeof randomSelector; -}; - -export { - // Interfaces - ConnectionPoolOptions, - getConnectionOptions, - ApiKeyAuth, - BasicAuth, - internals, - resurrectOptions, - ResurrectEvent, - // Classes - BaseConnectionPool, - ConnectionPool, - CloudConnectionPool -}; diff --git a/lib/pool/index.js b/lib/pool/index.js deleted file mode 100644 index 1cdf1ed4c..000000000 --- a/lib/pool/index.js +++ /dev/null @@ -1,15 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -const BaseConnectionPool = require('./BaseConnectionPool') -const ConnectionPool = require('./ConnectionPool') -const CloudConnectionPool = require('./CloudConnectionPool') - -module.exports = { - BaseConnectionPool, - ConnectionPool, - CloudConnectionPool -} diff --git a/package.json b/package.json index c4db60189..ec21e7362 100644 --- a/package.json +++ b/package.json @@ -1,10 +1,34 @@ { "name": "@elastic/elasticsearch", + "version": "9.2.0", + "versionCanary": "9.2.0-canary.0", "description": "The official Elasticsearch client for Node.js", - "main": "index.js", + "main": "./index.js", "types": "index.d.ts", - "homepage": "http://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/index.html", - "version": "8.0.0-SNAPSHOT.9f33e3c7", + "exports": { + "require": "./index.js", + "import": "./index.js", + "types": "./index.d.ts" + }, + "scripts": { + "test": "npm run build && npm run lint && tap", + "test:unit": "npm run build && tap", + "test:unit-bun": "bun run build && bunx tap", + "test:esm": "npm run build && cd test/esm/ && npm install && node test-import.mjs", + "test:coverage-100": "npm run build && tap --coverage --100", + "test:coverage-report": "npm run build && tap --coverage && nyc report --reporter=text-lcov > coverage.lcov", + "test:coverage-ui": "npm run build && tap --coverage --coverage-report=html", + "test:integration-build": "npm run build && node test/integration/index.js", + "test:integration": "npm run test:integration-build && env tap run --jobs=1 --reporter=junit --reporter-file=report-junit.xml generated-tests/", + "lint": "ts-standard src", + "lint:fix": "ts-standard --fix src", + "license-checker": "license-checker --production --onlyAllow='MIT;Apache-2.0;Apache1.1;ISC;BSD-3-Clause;BSD-2-Clause;0BSD'", + "license-header": "./scripts/check-spdx", + "prebuild": "npm run clean-build && npm run lint", + "build": "tsc && rm lib/package.json && mv lib/src/* lib/ && rm -rf lib/src", + "clean-build": "rimraf ./lib && mkdir lib", + "prepublishOnly": "npm run build" + }, "keywords": [ "elasticsearch", "elastic", @@ -15,65 +39,12 @@ "client", "index" ], - "scripts": { - "test": "npm run lint && npm run test:unit && npm run test:behavior && npm run test:types", - "test:node8": "npm run lint && tap test/unit/*.test.js -t 300 --no-coverage && npm run test:behavior && npm run test:types", - "test:unit": "tap test/unit/*.test.js test/unit/**/*.test.js -t 300 --no-coverage", - "test:behavior": "tap test/behavior/*.test.js -t 300 --no-coverage", - "test:integration": "node test/integration/index.js", - "test:integration:helpers": "tap test/integration/helpers/*.test.js --no-coverage -J", - "test:types": "tsd", - "test:coverage": "tap test/unit/*.test.js test/unit/**/*.test.js test/behavior/*.test.js -t 300 && nyc report --reporter=text-lcov > coverage.lcov", - "test:coverage-ui": "tap test/unit/*.test.js test/unit/**/*.test.js test/behavior/*.test.js -t 300 --coverage-report=html", - "lint": "standard", - "lint:fix": "standard --fix", - "ci": "npm run license-checker && npm test && npm run test:integration:helpers && npm run test:integration && npm run test:coverage", - "license-checker": "license-checker --production --onlyAllow='MIT;Apache-2.0;Apache1.1;ISC;BSD-3-Clause;BSD-2-Clause'", - "elasticsearch": "./scripts/es-docker.sh", - "elasticsearch:xpack": "./scripts/es-docker-platinum.sh" - }, - "author": { - "name": "Tomas Della Vedova", - "company": "Elastic BV" - }, - "original-author": { - "name": "Spencer Alger", - "company": "Elasticsearch BV" - }, - "devDependencies": { - "@sinonjs/fake-timers": "^6.0.1", - "@types/node": "^12.6.2", - "convert-hrtime": "^3.0.0", - "dedent": "^0.7.0", - "deepmerge": "^4.0.0", - "dezalgo": "^1.0.3", - "fast-deep-equal": "^3.1.1", - "into-stream": "^5.1.1", - "js-yaml": "^3.13.1", - "license-checker": "^25.0.1", - "lolex": "^4.0.1", - "minimist": "^1.2.0", - "ora": "^3.4.0", - "pretty-hrtime": "^1.0.3", - "rimraf": "^2.6.3", - "semver": "^6.0.0", - "simple-git": "^1.110.0", - "simple-statistics": "^7.0.2", - "split2": "^3.1.1", - "standard": "^13.0.2", - "stoppable": "^1.1.0", - "tap": "^14.4.1", - "tsd": "^0.11.0", - "workq": "^2.1.0", - "xmlbuilder2": "^2.1.2" - }, - "dependencies": { - "debug": "^4.1.1", - "decompress-response": "^4.2.0", - "ms": "^2.1.1", - "pump": "^3.0.0", - "secure-json-parse": "^2.1.0" - }, + "contributors": [ + { + "name": "Elastic Client Library Maintainers", + "company": "Elastic BV" + } + ], "license": "Apache-2.0", "repository": { "type": "git", @@ -82,10 +53,53 @@ "bugs": { "url": "https://github.com/elastic/elasticsearch-js/issues" }, + "homepage": "http://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/index.html", "engines": { - "node": ">=8" + "node": ">=18" + }, + "devDependencies": { + "@elastic/request-converter": "9.1.2", + "@opentelemetry/sdk-trace-base": "1.30.1", + "@sinonjs/fake-timers": "14.0.0", + "@types/debug": "4.1.12", + "@types/ms": "2.1.0", + "@types/node": "22.17.1", + "@types/sinonjs__fake-timers": "8.1.5", + "@types/split2": "4.2.3", + "@types/stoppable": "1.1.3", + "chai": "5.2.1", + "cross-zip": "4.0.1", + "desm": "1.3.1", + "into-stream": "8.0.1", + "js-yaml": "4.1.0", + "license-checker": "25.0.1", + "minimist": "1.2.8", + "ms": "2.1.3", + "node-abort-controller": "3.1.1", + "node-fetch": "2.7.0", + "ora": "5.4.1", + "proxy": "2.2.0", + "rimraf": "6.0.1", + "semver": "7.7.2", + "split2": "4.2.0", + "stoppable": "1.1.0", + "tap": "21.1.0", + "ts-node": "10.9.2", + "ts-standard": "12.0.2", + "typescript": "5.9.2", + "workq": "3.0.0", + "xmlbuilder2": "3.1.1", + "zx": "8.8.0" + }, + "dependencies": { + "@elastic/transport": "^9.2.0", + "apache-arrow": "18.x - 21.x", + "tslib": "^2.4.0" }, - "tsd": { - "directory": "test/types" + "tap": { + "disable-coverage": true, + "files": [ + "test/unit/{*,**/*}.test.ts" + ] } } diff --git a/renovate.json b/renovate.json new file mode 100644 index 000000000..1ae05807b --- /dev/null +++ b/renovate.json @@ -0,0 +1,33 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "extends": [ + "local>elastic/renovate-config", + "schedule:automergeMonthly", + "npm:unpublishSafe", + "mergeConfidence:all-badges" + ], + "schedule": [ + "* 0-3 1 * *" + ], + "prConcurrentLimit": 10, + "packageRules": [ + { + "matchManagers": [ + "dockerfile" + ], + "pinDigests": false + }, + { + "matchDatasources": [ + "docker" + ], + "pinDigests": false + }, + { + "matchDepNames": [ + "@types/**" + ], + "bumpVersion": "minor" + } + ] +} diff --git a/scripts/check-spdx b/scripts/check-spdx new file mode 100755 index 000000000..c60d600e8 --- /dev/null +++ b/scripts/check-spdx @@ -0,0 +1,27 @@ +#!/usr/bin/env bash + +# Copyright Elasticsearch B.V. and contributors +# SPDX-License-Identifier: Apache-2.0 + +correct='/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */' + +the_exit=0 + +check_file() { + if $(diff <(head -n4 "$1") <(echo "$correct") &>/dev/null); then + echo "Correct: $1" + else + echo "Incorrect: $1" + the_exit=1 + fi +} + +echo "SPDX license header check" +for file in $(git ls-files | grep -E '\.(ts|js|mjs)$'); do + check_file "$file" +done + +exit "$the_exit" diff --git a/scripts/download-artifacts.js b/scripts/download-artifacts.js new file mode 100644 index 000000000..c15ed4ae1 --- /dev/null +++ b/scripts/download-artifacts.js @@ -0,0 +1,104 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +const { join } = require('path') +const stream = require('stream') +const { promisify } = require('util') +const { createWriteStream, promises } = require('fs') +const { rimraf } = require('rimraf') +const fetch = require('node-fetch') +const crossZip = require('cross-zip') +const ora = require('ora') + +const { mkdir, cp } = promises +const pipeline = promisify(stream.pipeline) +const unzip = promisify(crossZip.unzip) + +const testYamlFolder = join(__dirname, '..', 'yaml-rest-tests') +const zipFile = join(__dirname, '..', 'elasticsearch-clients-tests.zip') + +const schemaFolder = join(__dirname, '..', 'schema') +const schemaJson = join(schemaFolder, 'schema.json') + +async function downloadArtifacts (localTests, version = 'main') { + const log = ora('Checking out spec and test').start() + + const { GITHUB_TOKEN } = process.env + + if (version !== 'main') { + version = version.split('.').slice(0, 2).join('.') + } + + log.text = 'Clean tests folder' + await rimraf(testYamlFolder) + await mkdir(testYamlFolder, { recursive: true }) + + log.text = `Fetch test YAML files for version ${version}` + + if (localTests) { + log.text = `Copying local tests from ${localTests}` + await cp(localTests, testYamlFolder, { recursive: true }) + } else { + if (!GITHUB_TOKEN) { + log.fail("Missing required environment variable 'GITHUB_TOKEN'") + process.exit(1) + } + + const response = await fetch(`https://api.github.com/repos/elastic/elasticsearch-clients-tests/zipball/${version}`, { + headers: { + Authorization: `Bearer ${GITHUB_TOKEN}`, + Accept: 'application/vnd.github+json' + } + }) + + if (!response.ok) { + log.fail(`unexpected response ${response.statusText}`) + process.exit(1) + } + + log.text = 'Downloading tests zipball' + await pipeline(response.body, createWriteStream(zipFile)) + + log.text = 'Unzipping tests' + await unzip(zipFile, testYamlFolder) + + log.text = 'Cleanup' + await rimraf(zipFile) + } + + log.text = 'Fetching Elasticsearch specification' + await rimraf(schemaFolder) + await mkdir(schemaFolder, { recursive: true }) + + const response = await fetch(`https://raw.githubusercontent.com/elastic/elasticsearch-specification/${version}/output/schema/schema.json`) + if (!response.ok) { + log.fail(`unexpected response ${response.statusText}`) + process.exit(1) + } + + log.text = 'Downloading schema.json' + await pipeline(response.body, createWriteStream(schemaJson)) + + log.succeed('Done') +} + +async function main () { + await downloadArtifacts() +} + +if (require.main === module) { + process.on('unhandledRejection', function (err) { + console.error(err) + process.exit(1) + }) + + main().catch(t => { + console.log(t) + process.exit(2) + }) +} + +module.exports = downloadArtifacts +module.exports.locations = { testYamlFolder, zipFile, schemaJson } diff --git a/scripts/es-docker-platinum.sh b/scripts/es-docker-platinum.sh deleted file mode 100755 index 930e124cc..000000000 --- a/scripts/es-docker-platinum.sh +++ /dev/null @@ -1,75 +0,0 @@ -#!/bin/bash - -# Images are cached locally, it may be needed -# to delete an old image and download again -# the latest snapshot. - -repo=$(pwd) -testnodecrt="/.ci/certs/testnode.crt" -testnodekey="/.ci/certs/testnode.key" -cacrt="/.ci/certs/ca.crt" - -# pass `--clean` to reemove the old snapshot -if [ "$1" == "--clean" ]; then - docker rmi $(docker images --format '{{.Repository}}:{{.Tag}}' | grep '8.0.0-SNAPSHOT') -fi - -# Create the 'elastic' network if doesn't exist -exec docker network ls | grep elastic > /dev/null || docker network create elastic > /dev/null - -if [ "$1" == "--detach" ]; then - exec docker run \ - --rm \ - -e "node.attr.testattr=test" \ - -e "path.repo=/tmp" \ - -e "repositories.url.allowed_urls=http://snapshot.*" \ - -e "discovery.type=single-node" \ - -e "ES_JAVA_OPTS=-Xms1g -Xmx1g" \ - -e "ELASTIC_PASSWORD=changeme" \ - -e "xpack.security.enabled=true" \ - -e "xpack.license.self_generated.type=trial" \ - -e "xpack.security.http.ssl.enabled=true" \ - -e "xpack.security.http.ssl.verification_mode=certificate" \ - -e "xpack.security.http.ssl.key=certs/testnode.key" \ - -e "xpack.security.http.ssl.certificate=certs/testnode.crt" \ - -e "xpack.security.http.ssl.certificate_authorities=certs/ca.crt" \ - -e "xpack.security.transport.ssl.enabled=true" \ - -e "xpack.security.transport.ssl.key=certs/testnode.key" \ - -e "xpack.security.transport.ssl.certificate=certs/testnode.crt" \ - -e "xpack.security.transport.ssl.certificate_authorities=certs/ca.crt" \ - -v "$repo$testnodecrt:/usr/share/elasticsearch/config/certs/testnode.crt" \ - -v "$repo$testnodekey:/usr/share/elasticsearch/config/certs/testnode.key" \ - -v "$repo$cacrt:/usr/share/elasticsearch/config/certs/ca.crt" \ - -p 9200:9200 \ - --detach \ - --network=elastic \ - --name=elasticsearch \ - docker.elastic.co/elasticsearch/elasticsearch:8.0.0-SNAPSHOT -else - exec docker run \ - --rm \ - -e "node.attr.testattr=test" \ - -e "path.repo=/tmp" \ - -e "repositories.url.allowed_urls=http://snapshot.*" \ - -e "discovery.type=single-node" \ - -e "ES_JAVA_OPTS=-Xms1g -Xmx1g" \ - -e "ELASTIC_PASSWORD=changeme" \ - -e "xpack.security.enabled=true" \ - -e "xpack.license.self_generated.type=trial" \ - -e "xpack.security.http.ssl.enabled=true" \ - -e "xpack.security.http.ssl.verification_mode=certificate" \ - -e "xpack.security.http.ssl.key=certs/testnode.key" \ - -e "xpack.security.http.ssl.certificate=certs/testnode.crt" \ - -e "xpack.security.http.ssl.certificate_authorities=certs/ca.crt" \ - -e "xpack.security.transport.ssl.enabled=true" \ - -e "xpack.security.transport.ssl.key=certs/testnode.key" \ - -e "xpack.security.transport.ssl.certificate=certs/testnode.crt" \ - -e "xpack.security.transport.ssl.certificate_authorities=certs/ca.crt" \ - -v "$repo$testnodecrt:/usr/share/elasticsearch/config/certs/testnode.crt" \ - -v "$repo$testnodekey:/usr/share/elasticsearch/config/certs/testnode.key" \ - -v "$repo$cacrt:/usr/share/elasticsearch/config/certs/ca.crt" \ - -p 9200:9200 \ - --network=elastic \ - --name=elasticsearch \ - docker.elastic.co/elasticsearch/elasticsearch:8.0.0-SNAPSHOT -fi diff --git a/scripts/es-docker.sh b/scripts/es-docker.sh deleted file mode 100755 index 76677974e..000000000 --- a/scripts/es-docker.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/bash - -# Images are cached locally, it may be needed -# to delete an old image and download again -# the latest snapshot. - -# pass `--clean` to reemove the old snapshot -if [ "$1" == "--clean" ]; then - docker rmi $(docker images --format '{{.Repository}}:{{.Tag}}' | grep '8.0.0-SNAPSHOT') -fi - -# Create the 'elastic' network if doesn't exist -exec docker network ls | grep elastic > /dev/null || docker network create elastic > /dev/null - -if [ "$1" == "--detach" ]; then - exec docker run \ - --rm \ - -e "node.attr.testattr=test" \ - -e "path.repo=/tmp" \ - -e "repositories.url.allowed_urls=http://snapshot.*" \ - -e "discovery.type=single-node" \ - -p 9200:9200 \ - --detach \ - --network=elastic \ - --name=elasticsearch \ - docker.elastic.co/elasticsearch/elasticsearch:8.0.0-SNAPSHOT -else - exec docker run \ - --rm \ - -e "node.attr.testattr=test" \ - -e "path.repo=/tmp" \ - -e "repositories.url.allowed_urls=http://snapshot.*" \ - -e "discovery.type=single-node" \ - -p 9200:9200 \ - --network=elastic \ - --name=elasticsearch \ - docker.elastic.co/elasticsearch/elasticsearch:8.0.0-SNAPSHOT -fi diff --git a/scripts/generate-docs-examples.js b/scripts/generate-docs-examples.js index e14f8ccc9..8026547c3 100644 --- a/scripts/generate-docs-examples.js +++ b/scripts/generate-docs-examples.js @@ -1,131 +1,99 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/** - * To run this generator you must have the - * `alternatives_report.spec.json` placed in the root of this project. - * To get the `alternatives_report.spec.json` you must run the script - * to parse the original `alternatives_report.json`, which is not yet public - * and lives in github.com/elastic/clients-team/tree/master/scripts/docs-json-generator - * - * This script will remove the content of the `docs/doc_examples` folder and generate - * all the files present in the `enabledFiles` list below. - * You can run it with the following command: - * - * ```bash - * $ node scripts/generate-docs-examples.js - * ``` +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ const { join } = require('path') -const { writeFileSync } = require('fs') -const rimraf = require('rimraf') -const standard = require('standard') -const dedent = require('dedent') +const { writeFile } = require('fs/promises') +const fetch = require('node-fetch') +const { rimraf } = require('rimraf') +const ora = require('ora') +const { convertRequests } = require('@elastic/request-converter') +const minimist = require('minimist') const docsExamplesDir = join('docs', 'doc_examples') -const enabledFiles = [ - 'docs/delete.asciidoc', - 'docs/get.asciidoc', - 'docs/index_.asciidoc', - 'getting-started.asciidoc', - 'query-dsl/query-string-query.asciidoc', - 'query-dsl.asciidoc', - 'search/request-body.asciidoc', - 'setup/install/check-running.asciidoc', - 'mapping.asciidoc', - 'query-dsl/query_filter_context.asciidoc', - 'query-dsl/bool-query.asciidoc', - 'query-dsl/match-query.asciidoc', - 'indices/create-index.asciidoc', - 'docs/index_.asciidoc', - 'aggregations/bucket/terms-aggregation.asciidoc', - 'query-dsl/range-query.asciidoc', - 'search/search.asciidoc', - 'query-dsl/multi-match-query.asciidoc', - 'docs/bulk.asciidoc', - 'indices/delete-index.asciidoc', - 'indices/put-mapping.asciidoc', - 'query-dsl/match-all-query.asciidoc', - 'query-dsl/term-query.asciidoc', - 'docs/update.asciidoc', - 'docs/reindex.asciidoc', - 'indices/templates.asciidoc', - 'query-dsl/exists-query.asciidoc', - 'query-dsl/terms-query.asciidoc', - 'query-dsl/wildcard-query.asciidoc', - 'mapping/types/nested.asciidoc', - 'mapping/params/format.asciidoc', - 'docs/delete-by-query.asciidoc', - 'search/request/sort.asciidoc', - 'query-dsl/function-score-query.asciidoc', - 'query-dsl/nested-query.asciidoc', - 'query-dsl/regexp-query.asciidoc', - 'mapping/types/array.asciidoc', - 'mapping/types/date.asciidoc', - 'mapping/types/keyword.asciidoc', - 'mapping/params/fielddata.asciidoc', - 'cluster/health.asciidoc', - 'docs/bulk.asciidoc', - 'indices/aliases.asciidoc', - 'indices/update-settings.asciidoc', - 'search/request/from-size.asciidoc', - 'search/count.asciidoc' -] - -function generate () { - rimraf.sync(join(docsExamplesDir, '*')) - const examples = require(join(__dirname, '..', 'alternatives_report.spec.json')) - for (const example of examples) { - if (example.lang !== 'console') continue - if (!enabledFiles.includes(example.source_location.file)) continue - - const asciidoc = generateAsciidoc(example.parsed_source) - writeFileSync( - join(docsExamplesDir, `${example.digest}.asciidoc`), - asciidoc, - 'utf8' - ) +const log = ora('Generating example snippets') + +const failures = {} + +async function getAlternativesReport (version = 'master') { + const reportUrl = `https://raw.githubusercontent.com/elastic/built-docs/master/raw/en/elasticsearch/reference/${version}/alternatives_report.json` + const response = await fetch(reportUrl) + if (!response.ok) { + log.fail(`unexpected response ${response.statusText}`) + process.exit(1) } + return await response.json() } -function generateAsciidoc (source) { - var asciidoc = '// This file is autogenerated, DO NOT EDIT\n' - asciidoc += '// Use `node scripts/generate-docs-examples.js` to generate the docs examples\n\n' - var code = 'async function run (client) {\n// START\n' - - for (var i = 0; i < source.length; i++) { - const { api, query, params, body } = source[i] - const apiArguments = Object.assign({}, params, query, body ? { body } : body) - var serializedApiArguments = Object.keys(apiArguments).length > 0 - ? JSON.stringify(apiArguments, null, 2) - : '' - code += `const response${getResponsePostfix(i)} = await client.${api.replace(/_([a-z])/g, g => g[1].toUpperCase())}(${serializedApiArguments}) -console.log(response${getResponsePostfix(i)}) -\n` +async function makeSnippet (example) { + const { source, digest } = example + const fileName = `${digest}.asciidoc` + const filePath = join(docsExamplesDir, fileName) + + try { + const code = await convertRequests(source, 'javascript', { + complete: false, + printResponse: true + }) + await writeFile(filePath, asciidocWrapper(code), 'utf8') + } catch (err) { + failures[digest] = err.message } +} + +async function generate (version) { + log.start() - code += '// END\n}' - const { results } = standard.lintTextSync(code, { fix: true }) - code = results[0].output - code = code.slice(code.indexOf('// START\n') + 9, code.indexOf('\n\n// END')) + rimraf.sync(join(docsExamplesDir, '*')) - asciidoc += `[source, js] + log.text = `Downloading alternatives report for version ${version}` + const examples = await getAlternativesReport(version) + + let counter = 1 + for (const example of examples) { + log.text = `${counter++}/${examples.length}: ${example.digest}` + + // skip over bad request definitions + if (example.source.startsWith('{') || example.source.endsWith('...')) { + failures[example.digest] = 'Incomplete request syntax' + continue + } + + await makeSnippet(example) + } +} + +function asciidocWrapper (source) { + return `// This file is autogenerated, DO NOT EDIT +// Use \`node scripts/generate-docs-examples.js\` to generate the docs examples + +[source, js] ---- -${dedent(code)} +${source.trim()} ---- - ` - return asciidoc +} - function getResponsePostfix (i) { - if (source.length === 1) return '' - return String(i) +const options = minimist(process.argv.slice(2), { + boolean: ['debug'], + string: ['version'], + default: { + version: 'master' } -} +}) -generate() +generate(options.version) + .then(() => log.succeed('done!')) + .catch(err => log.fail(err.message)) + .finally(() => { + const keys = Object.keys(failures) + if (keys.length > 0 && options.debug) { + let message = 'Some examples failed to generate:\n\n' + for (const key of keys) { + message += `${key}: ${failures[key]}\n` + } + console.error(message) + } + }) diff --git a/scripts/generate.js b/scripts/generate.js deleted file mode 100644 index 3a68b28da..000000000 --- a/scripts/generate.js +++ /dev/null @@ -1,124 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -const { join } = require('path') -const { readdirSync, readFileSync, writeFileSync } = require('fs') -const minimist = require('minimist') -const semver = require('semver') -const ora = require('ora') -const rimraf = require('rimraf') -const standard = require('standard') -const { - generate, - cloneAndCheckout, - genFactory, - generateRequestTypes, - generateDocs -} = require('./utils') - -start(minimist(process.argv.slice(2), { - string: ['tag', 'branch'] -})) - -function start (opts) { - const log = ora('Loading Elasticsearch Repository').start() - if (opts.branch == null && semver.valid(opts.tag) === null) { - log.fail(`Missing or invalid tag: ${opts.tag}`) - return - } - const packageFolder = join(__dirname, '..', 'api') - const apiOutputFolder = join(packageFolder, 'api') - const mainOutputFile = join(packageFolder, 'index.js') - const typeDefFile = join(__dirname, '..', 'index.d.ts') - const docOutputFile = join(__dirname, '..', 'docs', 'reference.asciidoc') - const requestParamsOutputFile = join(packageFolder, 'requestParams.d.ts') - const allSpec = [] - - log.text = 'Cleaning API folder...' - rimraf.sync(join(apiOutputFolder, '*.js')) - - cloneAndCheckout({ log, tag: opts.tag, branch: opts.branch }, (err, { apiFolder, xPackFolder }) => { - if (err) { - log.fail(err.message) - return - } - - const apiFolderContents = readdirSync(apiFolder) - const xPackFolderContents = readdirSync(xPackFolder) - - apiFolderContents.forEach(generateApiFile(apiFolder, log)) - xPackFolderContents.forEach(generateApiFile(xPackFolder, log)) - - writeFileSync( - requestParamsOutputFile, - generateRequestTypes(opts.branch || opts.tag, allSpec), - { encoding: 'utf8' } - ) - - const { fn: factory, types } = genFactory(apiOutputFolder, [apiFolder, xPackFolder]) - writeFileSync( - mainOutputFile, - factory, - { encoding: 'utf8' } - ) - - const oldTypeDefString = readFileSync(typeDefFile, 'utf8') - const start = oldTypeDefString.indexOf('/* GENERATED */') - const end = oldTypeDefString.indexOf('/* /GENERATED */') - const newTypeDefString = oldTypeDefString.slice(0, start + 15) + '\n' + types + '\n ' + oldTypeDefString.slice(end) - writeFileSync( - typeDefFile, - newTypeDefString, - { encoding: 'utf8' } - ) - - lintFiles(log, () => { - log.text = 'Generating documentation' - const allSpec = apiFolderContents.filter(f => f !== '_common.json') - .map(f => require(join(apiFolder, f))) - .concat(xPackFolderContents.map(f => require(join(xPackFolder, f)))) - writeFileSync( - docOutputFile, - generateDocs(require(join(apiFolder, '_common.json')), allSpec), - { encoding: 'utf8' } - ) - - log.succeed('Done!') - }) - }) - - function generateApiFile (apiFolder, log) { - var common = null - try { - common = require(join(apiFolder, '_common.json')) - } catch (e) {} - - return function _generateApiFile (file) { - if (file === '_common.json') return - log.text = `Processing ${file}` - - const spec = require(join(apiFolder, file)) - // const { stability } = spec[Object.keys(spec)[0]] - // if (stability !== 'stable') return - allSpec.push(spec) - const code = generate(opts.branch || opts.tag, spec, common) - const filePath = join(apiOutputFolder, `${file.slice(0, file.lastIndexOf('.'))}.js`) - - writeFileSync(filePath, code, { encoding: 'utf8' }) - } - } - - function lintFiles (log, cb) { - log.text = 'Linting...' - const files = [join(packageFolder, '*.js'), join(apiOutputFolder, '*.js')] - standard.lintFiles(files, { fix: true }, err => { - if (err) { - return log.fail(err.message) - } - cb() - }) - } -} diff --git a/scripts/kibana-docker.sh b/scripts/kibana-docker.sh deleted file mode 100755 index 8c39f9647..000000000 --- a/scripts/kibana-docker.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -exec docker run \ - --rm \ - -e ELASTICSEARCH_URL="http://elasticsearch:9200/" \ - -p 5601:5601 \ - --network=elastic \ - docker.elastic.co/kibana/kibana:7.0.0-beta1 diff --git a/scripts/release-canary.js b/scripts/release-canary.js new file mode 100644 index 000000000..a4bd8780f --- /dev/null +++ b/scripts/release-canary.js @@ -0,0 +1,147 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +'use strict' + +/** + * Script for releasing the canary client to npm. + * It should be executed from the top level directory of the repository. + * + * Usage: + * node scripts/release-canary.js --otp + * + * You can reset the canary count via the `--reset` option + * node scripts/release-canary.js --otp --reset + * + * You can also do a dry run with the `--dry-run` option + * node scripts/release-canary.js --otp --dry-run + */ + +const readline = require('readline') +const assert = require('assert') +const { execSync } = require('child_process') +const { writeFile, readFile } = require('fs').promises +const { join } = require('path') +const minimist = require('minimist') +const chalk = require('chalk') + +const helpMessage = `usage: node scripts/release-canary.js [options] + + --otp One-time password (required) + --reset Reset the canary version to 1 + --dry-run Run everything but don't actually publish + -h, --help Show this help message` + +async function release (opts) { + if (opts.help) { + console.log(helpMessage) + process.exit(0) + } + + assert(process.cwd() !== __dirname, 'You should run the script from the top level directory of the repository') + if (!opts['dry-run']) { + assert(typeof opts.otp === 'string', 'Missing OTP') + } + + const packageJson = JSON.parse(await readFile(join(__dirname, '..', 'package.json'), 'utf8')) + const originalName = packageJson.name + const originalVersion = packageJson.version + const currentCanaryVersion = packageJson.versionCanary + const originalTypes = packageJson.types + + const newCanaryInteger = opts.reset ? 1 : (Number(currentCanaryVersion.split('-')[1].split('.')[1]) + 1) + const newCanaryVersion = `${originalVersion.split('-')[0]}-canary.${newCanaryInteger}` + + // Update the package.json with the correct name and new version + packageJson.name = '@elastic/elasticsearch-canary' + packageJson.version = newCanaryVersion + packageJson.versionCanary = newCanaryVersion + packageJson.commitHash = execSync('git log -1 --pretty=format:%h').toString() + + // update the package.json + await writeFile( + join(__dirname, '..', 'package.json'), + JSON.stringify(packageJson, null, 2) + '\n', + 'utf8' + ) + + // confirm the package.json changes with the user + const diff = execSync('git diff').toString().split('\n').map(colorDiff).join('\n') + console.log(diff) + const answer = await confirm() + + // release on npm with provided otp + if (answer) { + execSync(`npm publish --otp ${opts.otp} ${opts['dry-run'] ? '--dry-run' : ''}`, { stdio: 'inherit' }) + } else { + // the changes were not good, restore the previous canary version + packageJson.versionCanary = currentCanaryVersion + } + + // restore the package.json to the original values + packageJson.name = originalName + packageJson.version = originalVersion + packageJson.types = originalTypes + delete packageJson.commitHash + + await writeFile( + join(__dirname, '..', 'package.json'), + JSON.stringify(packageJson, null, 2) + '\n', + 'utf8' + ) +} + +function confirm () { + return new Promise((resolve) => { + const rl = readline.createInterface({ + input: process.stdin, + output: process.stdout + }) + + rl.question('Does it look good? (y/n) ', (answer) => { + resolve(answer === 'y') + rl.close() + }) + }) +} + +function colorDiff (line) { + if (line.startsWith('+')) { + return chalk.green(line) + } else if (line.startsWith('-')) { + return chalk.red(line) + } else { + return line + } +} + +release( + minimist(process.argv.slice(2), { + unknown (option) { + console.log(`Unrecognized option: ${option}`) + process.exit(1) + }, + string: [ + // The otp code for publishing the package + 'otp' + ], + boolean: [ + // Reset the canary version to '1' + 'reset', + + // run all the steps but don't publish + 'dry-run', + + // help text + 'help' + ], + alias: { help: 'h' } + }) +) + .catch(err => { + console.log(err) + console.log('\n' + helpMessage) + process.exit(1) + }) diff --git a/scripts/utils/clone-es.js b/scripts/utils/clone-es.js deleted file mode 100644 index d412ccc98..000000000 --- a/scripts/utils/clone-es.js +++ /dev/null @@ -1,124 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -const { accessSync, mkdirSync } = require('fs') -const { join } = require('path') -const Git = require('simple-git') - -const esRepo = 'https://github.com/elastic/elasticsearch.git' -const esFolder = join(__dirname, '..', '..', 'elasticsearch') -const apiFolder = join(esFolder, 'rest-api-spec', 'src', 'main', 'resources', 'rest-api-spec', 'api') -const xPackFolder = join(esFolder, 'x-pack', 'plugin', 'src', 'test', 'resources', 'rest-api-spec', 'api') - -function cloneAndCheckout (opts, callback) { - const { log, tag, branch } = opts - withTag(tag, callback) - - /** - * Sets the elasticsearch repository to the given tag. - * If the repository is not present in `esFolder` it will - * clone the repository and the checkout the tag. - * If the repository is already present but it cannot checkout to - * the given tag, it will perform a pull and then try again. - * @param {string} tag - * @param {function} callback - */ - function withTag (tag, callback) { - var fresh = false - var retry = 0 - - if (!pathExist(esFolder)) { - if (!createFolder(esFolder)) { - log.fail('Failed folder creation') - return - } - fresh = true - } - - const git = Git(esFolder) - - if (fresh) { - clone(checkout) - } else if (opts.branch) { - checkout(true) - } else { - checkout() - } - - function checkout (alsoPull = false) { - if (branch) { - log.text = `Checking out branch '${branch}'` - } else { - log.text = `Checking out tag '${tag}'` - } - git.checkout(branch || tag, err => { - if (err) { - if (retry++ > 0) { - callback(new Error(`Cannot checkout tag '${tag}'`), { apiFolder, xPackFolder }) - return - } - return pull(checkout) - } - if (alsoPull) { - return pull(checkout) - } - callback(null, { apiFolder, xPackFolder }) - }) - } - - function pull (cb) { - log.text = 'Pulling elasticsearch repository...' - git.pull(err => { - if (err) { - callback(err, { apiFolder, xPackFolder }) - return - } - cb() - }) - } - - function clone (cb) { - log.text = 'Cloning elasticsearch repository...' - git.clone(esRepo, esFolder, err => { - if (err) { - callback(err, { apiFolder, xPackFolder }) - return - } - cb() - }) - } - } - - /** - * Checks if the given path exists - * @param {string} path - * @returns {boolean} true if exists, false if not - */ - function pathExist (path) { - try { - accessSync(path) - return true - } catch (err) { - return false - } - } - - /** - * Creates the given folder - * @param {string} name - * @returns {boolean} true on success, false on failure - */ - function createFolder (name) { - try { - mkdirSync(name) - return true - } catch (err) { - return false - } - } -} - -module.exports = cloneAndCheckout diff --git a/scripts/utils/generateApis.js b/scripts/utils/generateApis.js deleted file mode 100644 index 5a550f314..000000000 --- a/scripts/utils/generateApis.js +++ /dev/null @@ -1,546 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -/* eslint camelcase: 0 */ - -'use strict' - -const dedent = require('dedent') -const semver = require('semver') -const allowedMethods = { - noBody: ['GET', 'HEAD', 'DELETE'], - body: ['POST', 'PUT', 'DELETE'] -} - -// if a parameter is depracted in a minor release -// we should be able to support it until the next major -const deprecatedParameters = require('./patch.json') - -// list of apis that does not need any kind of validation -// because of how the url is built or the `type` handling in ES7 -const noPathValidation = [ - 'create', - 'exists', - 'explain', - 'get', - 'get_source', - 'index', - 'indices.get_alias', - 'indices.exists_alias', - 'indices.get_field_mapping', - 'indices.get_mapping', - 'indices.get_settings', - 'indices.put_mapping', - 'indices.stats', - 'delete', - 'nodes.info', - 'nodes.stats', - 'nodes.usage', - 'tasks.cancel', - 'termvectors', - 'update' -] - -// apis that uses bulkBody property -const ndjsonApi = [ - 'bulk', - 'msearch', - 'msearch_template', - 'ml.find_file_structure', - 'monitoring.bulk', - 'xpack.ml.find_file_structure', - 'xpack.monitoring.bulk' -] - -function generate (version, spec, common) { - const release = semver.valid(version) ? semver.major(version) : version - const api = Object.keys(spec)[0] - const name = api - .replace(/\.([a-z])/g, k => k[1].toUpperCase()) - .replace(/_([a-z])/g, k => k[1].toUpperCase()) - - const { paths } = spec[api].url - const { params } = spec[api] - const acceptedQuerystring = [] - const required = [] - - const methods = paths.reduce((acc, val) => { - for (const method of val.methods) { - if (!acc.includes(method)) acc.push(method) - } - return acc - }, []) - const parts = paths.reduce((acc, val) => { - if (!val.parts) return acc - for (const part of Object.keys(val.parts)) { - if (!acc.includes(part)) acc.push(part) - } - return acc - }, []) - - // get the required parts from the url - // if the url has at least one static path, - // then there are not required parts of the url - var allParts = [] - for (const path of paths) { - if (path.parts) { - allParts.push(Object.keys(path.parts)) - } else { - allParts = [] - break - } - } - if (allParts.length > 0) { - intersect(...allParts).forEach(r => required.push(r)) - } - - for (const key in params) { - if (params[key].required) { - required.push(key) - } - - acceptedQuerystring.push(key) - if (deprecatedParameters[release] && deprecatedParameters[release][key]) { - acceptedQuerystring.push(deprecatedParameters[release][key]) - } - } - - for (const key in spec[api]) { - const k = spec[api][key] - if (k && k.required) { - required.push(key) - } - } - if (common && common.params) { - for (const key in common.params) { - acceptedQuerystring.push(key) - } - } - - const code = ` - function ${safeWords(name)} (params, options, callback) { - options = options || {} - if (typeof options === 'function') { - callback = options - options = {} - } - if (typeof params === 'function' || params == null) { - callback = params - params = {} - options = {} - } - - ${genRequiredChecks()} - - ${genUrlValidation(paths, api)} - - // validate headers object - if (options.headers != null && typeof options.headers !== 'object') { - const err = new ConfigurationError(\`Headers should be an object, instead got: \${typeof options.headers}\`) - return handleError(err, callback) - } - - var warnings = [] - var { ${genQueryBlacklist(false)}, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring, warnings) - - var ignore = options.ignore - if (typeof ignore === 'number') { - options.ignore = [ignore] - } - - - var path = '' - ${buildPath(api)} - - // build request object - const request = { - method, - path, - ${genBody(api, methods, spec[api].body)} - querystring - } - - options.warnings = warnings.length === 0 ? null : warnings - return makeRequest(request, options, callback) - } - `.trim() // always call trim to avoid newlines - - const fn = dedent` - // Licensed to Elasticsearch B.V under one or more agreements. - // Elasticsearch B.V licenses this file to you under the Apache 2.0 License. - // See the LICENSE file in the project root for more information - - 'use strict' - - /* eslint camelcase: 0 */ - /* eslint no-unused-vars: 0 */ - - function build${name[0].toUpperCase() + name.slice(1)} (opts) { - // eslint-disable-next-line no-unused-vars - const { makeRequest, ConfigurationError, handleError, snakeCaseKeys } = opts - - const acceptedQuerystring = [ - ${acceptedQuerystring.map(q => `'${q}'`).join(',\n')} - ] - - const snakeCase = { - ${genSnakeCaseMap()} - } - - ${generateDocumentation(spec[api], api)} - return ${code} - } - - module.exports = build${name[0].toUpperCase() + name.slice(1)} -` - - // new line at the end of file - return fn + '\n' - - function genRequiredChecks (param) { - const code = required - .map(_genRequiredCheck) - .concat(_noBody()) - .filter(Boolean) - - if (code.length) { - code.unshift('// check required parameters') - } - - return code.join('\n ') - - function _genRequiredCheck (param) { - var camelCased = param[0] === '_' - ? '_' + param.slice(1).replace(/_([a-z])/g, k => k[1].toUpperCase()) - : param.replace(/_([a-z])/g, k => k[1].toUpperCase()) - - if (param === camelCased) { - const check = ` - if (params['${param}'] == null) { - const err = new ConfigurationError('Missing required parameter: ${param}') - return handleError(err, callback) - } - ` - return check.trim() - } else { - const check = ` - if (params['${param}'] == null && params['${camelCased}'] == null) { - const err = new ConfigurationError('Missing required parameter: ${param} or ${camelCased}') - return handleError(err, callback) - } - ` - return check.trim() - } - } - - function _noBody () { - const check = ` - if (params.body != null) { - const err = new ConfigurationError('This API does not require a body') - return handleError(err, callback) - } - ` - return spec[api].body === null ? check.trim() : '' - } - } - - function genSnakeCaseMap () { - const toCamelCase = str => { - return str[0] === '_' - ? '_' + str.slice(1).replace(/_([a-z])/g, k => k[1].toUpperCase()) - : str.replace(/_([a-z])/g, k => k[1].toUpperCase()) - } - - return acceptedQuerystring.reduce((acc, val, index) => { - if (toCamelCase(val) !== val) { - acc += `${toCamelCase(val)}: '${val}'` - if (index !== acceptedQuerystring.length - 1) { - acc += ',\n' - } - } - return acc - }, '') - } - - function genQueryBlacklist (addQuotes = true) { - const toCamelCase = str => { - return str[0] === '_' - ? '_' + str.slice(1).replace(/_([a-z])/g, k => k[1].toUpperCase()) - : str.replace(/_([a-z])/g, k => k[1].toUpperCase()) - } - - const blacklist = ['method', 'body'] - parts.forEach(p => { - const camelStr = toCamelCase(p) - if (camelStr !== p) blacklist.push(`${camelStr}`) - blacklist.push(`${p}`) - }) - return addQuotes ? blacklist.map(q => `'${q}'`) : blacklist - } - - function buildPath () { - const toCamelCase = str => { - return str[0] === '_' - ? '_' + str.slice(1).replace(/_([a-z])/g, k => k[1].toUpperCase()) - : str.replace(/_([a-z])/g, k => k[1].toUpperCase()) - } - - const genAccessKey = str => { - const camelStr = toCamelCase(str) - return camelStr === str - ? str - : `${str} || ${camelStr}` - } - - const genCheck = path => { - return path - .split('/') - .filter(Boolean) - .map(p => p.startsWith('{') ? `(${genAccessKey(p.slice(1, -1))}) != null` : false) - .filter(Boolean) - .join(' && ') - } - - const genPath = path => { - path = path - .split('/') - .filter(Boolean) - .map(p => p.startsWith('{') ? `encodeURIComponent(${genAccessKey(p.slice(1, -1))})` : `'${p}'`) - .join(' + \'/\' + ') - return path.length > 0 ? ('\'/\' + ' + path) : '\'/\'' - } - - var hasStaticPath = false - const sortedPaths = paths - // some legacy API have mutliple statis paths - // this filter removes them - .filter(p => { - if (p.path.includes('{')) return true - if (hasStaticPath === false && p.deprecated == null) { - hasStaticPath = true - return true - } - return false - }) - // sort by number of parameters (desc) - .sort((a, b) => Object.keys(b.parts || {}).length - Object.keys(a.parts || {}).length) - - var code = '' - for (var i = 0; i < sortedPaths.length; i++) { - const { path, methods } = sortedPaths[i] - if (sortedPaths.length === 1) { - code += ` - if (method == null) method = ${generatePickMethod(methods)} - path = ${genPath(path)} - ` - } else if (i === 0) { - code += ` - if (${genCheck(path)}) { - if (method == null) method = ${generatePickMethod(methods)} - path = ${genPath(path)} - } - ` - } else if (i === sortedPaths.length - 1) { - code += ` else { - if (method == null) method = ${generatePickMethod(methods)} - path = ${genPath(path)} - } - ` - } else { - code += ` else if (${genCheck(path)}) { - if (method == null) method = ${generatePickMethod(methods)} - path = ${genPath(path)} - } - ` - } - } - - // var hasStaticPath = false - // var singlePathComponent = false - // paths - // .filter(path => { - // if (path.indexOf('{') > -1) return true - // if (hasStaticPath === false) { - // hasStaticPath = true - // return true - // } - // return false - // }) - // .sort((a, b) => (b.split('{').length + b.split('/').length) - (a.split('{').length + a.split('/').length)) - // .forEach((path, index, arr) => { - // if (arr.length === 1) { - // singlePathComponent = true - // code += ` - // path = ${genPath(path)} - // ` - // } else if (index === 0) { - // code += ` - // if (${genCheck(path)}) { - // path = ${genPath(path)} - // ` - // } else if (index === arr.length - 1) { - // code += ` - // } else { - // path = ${genPath(path)} - // ` - // } else { - // code += ` - // } else if (${genCheck(path)}) { - // path = ${genPath(path)} - // ` - // } - // }) - - // code += singlePathComponent ? '' : '}' - return code - } -} - -function safeWords (str) { - switch (str) { - // delete is a reserved word - case 'delete': - return '_delete' - // index is also a parameter - case 'index': - return '_index' - default: - return str - } -} - -function generatePickMethod (methods) { - if (methods.length === 1) { - return `'${methods[0]}'` - } - const bodyMethod = getBodyMethod(methods) - const noBodyMethod = getNoBodyMethod(methods) - if (bodyMethod && noBodyMethod) { - return `body == null ? '${noBodyMethod}' : '${bodyMethod}'` - } else if (bodyMethod) { - return `'${bodyMethod}'` - } else { - return `'${noBodyMethod}'` - } -} - -function genBody (api, methods, body) { - const bodyMethod = getBodyMethod(methods) - if (ndjsonApi.indexOf(api) > -1) { - return 'bulkBody: body,' - } - if (body === null && bodyMethod) { - return `body: '',` - } else if (bodyMethod) { - return `body: body || '',` - } else { - return 'body: null,' - } -} - -function getBodyMethod (methods) { - const m = methods.filter(m => ~allowedMethods.body.indexOf(m)) - if (m.length) return m[0] - return null -} - -function getNoBodyMethod (methods) { - const m = methods.filter(m => ~allowedMethods.noBody.indexOf(m)) - if (m.length) return m[0] - return null -} - -function genUrlValidation (paths, api) { - // this api does not need url validation - if (!needsPathValidation(api)) return '' - // gets only the dynamic components of the url in an array - // then we reverse it. A parameters always require what is - // at its right in the array. - const chunks = paths - .sort((a, b) => Object.keys(a.parts || {}).length > Object.keys(b.parts || {}).length ? -1 : 1) - .slice(0, 1) - .reduce((acc, val) => val.path, '') - // .reduce((a, b) => a.path.split('/').length > b.path.split('/').length ? a.path : b.path) - .split('/') - .filter(s => s.startsWith('{')) - .map(s => s.slice(1, -1)) - .reverse() - - var code = '' - - const len = chunks.length - chunks.forEach((chunk, index) => { - if (index === len - 1) return - var params = [] - var camelCased = chunk[0] === '_' - ? '_' + chunk.slice(1).replace(/_([a-z])/g, k => k[1].toUpperCase()) - : chunk.replace(/_([a-z])/g, k => k[1].toUpperCase()) - - if (chunk === camelCased) { - code += `${index ? '} else ' : ''}if (params['${chunk}'] != null && (` - } else { - code += `${index ? '} else ' : ''}if ((params['${chunk}'] != null || params['${camelCased}'] != null) && (` - } - for (var i = index + 1; i < len; i++) { - params.push(chunks[i]) - // url parts can be declared in camelCase fashion - camelCased = chunks[i][0] === '_' - ? '_' + chunks[i].slice(1).replace(/_([a-z])/g, k => k[1].toUpperCase()) - : chunks[i].replace(/_([a-z])/g, k => k[1].toUpperCase()) - - if (chunks[i] === camelCased) { - code += `params['${chunks[i]}'] == null${i === len - 1 ? '' : ' || '}` - } else { - code += `(params['${chunks[i]}'] == null && params['${camelCased}'] == null)${i === len - 1 ? '' : ' || '}` - } - } - code += `)) { - const err = new ConfigurationError('Missing required parameter of the url: ${params.join(', ')}') - return handleError(err, callback) - ` - }) - - if (chunks.length > 1) { - code += '\n}' - } - - if (code.length) { - code = '// check required url components\n' + code - } - - return code.trim() -} - -function generateDocumentation ({ documentation }, op) { - // we use `replace(/\u00A0/g, ' ')` to remove no breaking spaces - // because some parts of the description fields are using it - - if (documentation == null) return '' - - var doc = '/**\n' - doc += ` * Perform a ${op} request\n` - if (documentation.description) { - doc += ` * ${documentation.description.replace(/\u00A0/g, ' ')}\n` - } - if (documentation.url) { - doc += ` * ${documentation.url}\n` - } - doc += ' */' - - return doc -} - -function needsPathValidation (api) { - return noPathValidation.indexOf(api) === -1 -} - -function intersect (first, ...rest) { - return rest.reduce((accum, current) => { - return accum.filter(x => current.indexOf(x) !== -1) - }, first) -} - -module.exports = generate -module.exports.ndjsonApi = ndjsonApi diff --git a/scripts/utils/generateDocs.js b/scripts/utils/generateDocs.js deleted file mode 100644 index 6cb886f5b..000000000 --- a/scripts/utils/generateDocs.js +++ /dev/null @@ -1,301 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -const { readdirSync } = require('fs') -const { join } = require('path') -const dedent = require('dedent') - -const codeExamples = readdirSync(join(__dirname, '..', '..', 'docs', 'examples')) - .map(file => file.slice(0, -9)) - .filter(api => api !== 'index') - -function generateDocs (common, spec) { - var doc = dedent` - [[api-reference]] - - //////// - - - - =========================================================================================================================== - || || - || || - || || - || ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || - || ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || - || ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || - || ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || - || ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || - || ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || - || || - || || - || This file is autogenerated, DO NOT send pull requests that changes this file directly. || - || You should update the script that does the generation, which can be found in '/scripts/utils/generateDocs.js'. || - || || - || You can run the script with the following command: || - || node scripts/generate --branch || - || or || - || node scripts/generate --tag || - || || - || || - || || - =========================================================================================================================== - - - - //////// - - == API Reference - - This document contains the entire list of the Elasticsearch API supported by the client, both OSS and commercial. The client is entirely licensed under Apache 2.0. - - Elasticsearch exposes an HTTP layer to communicate with, and the client is a library that will help you do this. Because of this reason, you will see HTTP related parameters, such as ${'`'}body${'`'} or ${'`'}headers${'`'}. - - Every API can accept two objects, the first contains all the parameters that will be sent to Elasticsearch, while the second includes the request specific parameters, such as timeouts, headers, and so on. - In the first object, every parameter but the body will be sent via querystring or url parameter, depending on the API, and every unrecognized parameter will be sent as querystring. - - [source,js] - ---- - // promise API - const result = await client.search({ - index: 'my-index', - from: 20, - size: 10, - body: { foo: 'bar' } - }, { - ignore: [404], - maxRetries: 3 - }) - - // callback API - client.search({ - index: 'my-index', - from: 20, - size: 10, - body: { foo: 'bar' } - }, { - ignore: [404], - maxRetries: 3 - }, (err, result) => { - if (err) console.log(err) - }) - ---- - - In this document, you will find the reference of every parameter accepted by the querystring or the url. If you also need to send the body, you can find the documentation of its format in the reference link that is present along with every endpoint. - - \n\n` - doc += commonParameters(common) - spec.forEach(s => { - doc += '\n' + generateApiDoc(s) - }) - return doc -} - -function commonParameters (spec) { - var doc = dedent` - === Common parameters - Parameters that are accepted by all API endpoints. - - link:{ref}/common-options.html[Documentation] - [cols=2*] - |===\n` - Object.keys(spec.params).forEach(key => { - const name = isSnakeCased(key) && key !== camelify(key) - ? '`' + key + '` or `' + camelify(key) + '`' - : '`' + key + '`' - - doc += dedent` - |${name} - |${'`' + spec.params[key].type + '`'} - ${spec.params[key].description}` - if (spec.params[key].default) { - doc += ` + - _Default:_ ${'`' + spec.params[key].default + '`'}` - } - doc += '\n\n' - }) - - doc += dedent` - |=== - ` - return doc -} - -function generateApiDoc (spec) { - const name = Object.keys(spec)[0] - const documentationUrl = spec[name].documentation && spec[name].documentation.url - ? fixLink(name, spec[name].documentation.url) - : '' - const params = [] - // url params - const urlParts = spec[name].url.paths.reduce((acc, path) => { - if (!path.parts) return acc - for (const part in path.parts) { - if (acc[part] != null) continue - acc[part] = path.parts[part] - } - return acc - }, {}) - if (urlParts) { - Object.keys(urlParts).forEach(param => { - params.push({ - name: param, - type: getType(urlParts[param].type, urlParts[param].options), - description: urlParts[param].description, - default: urlParts[param].default, - deprecated: !!urlParts[param].deprecated - }) - }) - } - - // query params - const urlParams = spec[name].params - if (urlParams) { - Object.keys(urlParams).forEach(param => { - const duplicate = params.find(ele => ele.name === param) - if (duplicate) return - params.push({ - name: param, - type: getType(urlParams[param].type, urlParams[param].options), - description: urlParams[param].description, - default: urlParams[param].default, - deprecated: !!urlParams[param].deprecated - }) - }) - } - - // body params - const body = spec[name].body - if (body) { - params.push({ - name: 'body', - type: 'object', - description: body.description, - default: body.default, - deprecated: !!body.deprecated - }) - } - - const codeParameters = params - .reduce((acc, val) => { - var code = `${val.name}: ${val.type},` - acc += acc === '' - ? code - : '\n ' + code - - return acc - }, '') - // remove last comma - .slice(0, -1) - - const stability = spec[name].stability === 'stable' - ? '' - : `*Stability:* ${spec[name].stability}` - - var doc = dedent` - === ${camelify(name)} - ${stability} - [source,ts] - ---- - client.${camelify(name)}(${codeParameters.length > 0 ? `{\n ${codeParameters}\n}` : ''}) - ----\n` - if (documentationUrl) { - doc += `link:${documentationUrl}[Documentation] +\n` - } - if (codeExamples.includes(name)) { - doc += `{jsclient}/${name.replace(/\./g, '_')}_examples.html[Code Example] +\n` - } - - if (params.length !== 0) { - doc += dedent`[cols=2*] - |===\n` - doc += params.reduce((acc, val) => { - const name = isSnakeCased(val.name) && val.name !== camelify(val.name) - ? '`' + val.name + '` or `' + camelify(val.name) + '`' - : '`' + val.name + '`' - acc += dedent` - |${name} - |${'`' + val.type.replace(/\|/g, '\\|') + '`'} - ${val.description}` - if (val.default) { - acc += ` +\n_Default:_ ${'`' + val.default + '`'}` - } - if (val.deprecated) { - acc += ` +\n\nWARNING: This parameter has been deprecated.` - } - return acc + '\n\n' - }, '') - - doc += dedent` - |=== - ` - } - doc += '\n' - return doc -} - -const LINK_OVERRIDES = { - 'license.delete': '{ref}/delete-license.html', - 'license.get': '{ref}/get-license.html', - 'license.get_basic_status': '{ref}/get-basic-status.html', - 'license.get_trial_status': '{ref}/get-trial-status.html', - 'license.post': '{ref}/update-license.html', - 'license.post_start_basic': '{ref}/start-basic.html', - 'license.post_start_trial': '{ref}/start-trial.html', - 'migration.deprecations': '{ref}/migration-api-deprecation.html', - 'monitoring.bulk': '{ref}/monitor-elasticsearch-cluster.html', - 'ingest.delete_pipeline': '{ref}/delete-pipeline-api.html', - 'ingest.get_pipeline': '{ref}/get-pipeline-api.html', - 'ingest.put_pipeline': '{ref}/put-pipeline-api.html', - 'ingest.simulate': '{ref}/simulate-pipeline-api.html', - 'ingest.processor_grok': '{ref}/grok-processor.html#grok-processor-rest-get' -} -// Fixes bad urls in the JSON spec -function fixLink (name, str) { - /* In 6.x some API start with `xpack.` when in master they do not. We - * can safely ignore that for link generation. */ - name = name.replace(/^xpack\./, '') - const override = LINK_OVERRIDES[name] - if (override) return override - if (!str) return '' - /* Replace references to the guide with the attribute {ref} because - * the json files in the Elasticsearch repo are a bit of a mess. */ - str = str.replace(/^.+guide\/en\/elasticsearch\/reference\/[^/]+\/([^./]*\.html(?:#.+)?)$/, '{ref}/$1') - str = str.replace(/frozen\.html/, 'freeze-index-api.html') - str = str.replace(/ml-file-structure\.html/, 'ml-find-file-structure.html') - str = str.replace(/security-api-get-user-privileges\.html/, 'security-api-get-privileges.html') - - return str -} - -function getType (type, options) { - switch (type) { - case 'list': - return 'string | string[]' - case 'date': - case 'time': - case 'timeout': - return 'string' - case 'enum': - return options.map(k => `'${k}'`).join(' | ') - case 'int': - case 'double': - case 'long': - return 'number' - default: - return type - } -} - -function camelify (str) { - return str[0] === '_' - ? '_' + str.slice(1).replace(/_([a-z])/g, k => k[1].toUpperCase()) - : str.replace(/_([a-z])/g, k => k[1].toUpperCase()) -} - -function isSnakeCased (str) { - return !!~str.indexOf('_') -} - -module.exports = generateDocs diff --git a/scripts/utils/generateMain.js b/scripts/utils/generateMain.js deleted file mode 100644 index f0361c9ad..000000000 --- a/scripts/utils/generateMain.js +++ /dev/null @@ -1,237 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -/* eslint-disable no-template-curly-in-string */ - -'use strict' - -const { readdirSync } = require('fs') -const { join } = require('path') -const dedent = require('dedent') -const deepmerge = require('deepmerge') -const { ndjsonApi } = require('./generateApis') - -const ndjsonApiKey = ndjsonApi - .map(api => { - return api - .replace(/\.([a-z])/g, k => k[1].toUpperCase()) - .replace(/_([a-z])/g, k => k[1].toUpperCase()) - }) - .map(toPascalCase) - -function genFactory (folder, paths) { - // get all the API files - const apiFiles = readdirSync(folder) - const types = apiFiles - .map(file => { - const name = file - .slice(0, -3) - .replace(/\.([a-z])/g, k => k[1].toUpperCase()) - .replace(/_([a-z])/g, k => k[1].toUpperCase()) - - return file - .slice(0, -3) // remove `.js` extension - .split('.') - .reverse() - .reduce((acc, val) => { - const body = hasBody(paths, file.slice(0, -3)) - const methods = acc === null ? buildMethodDefinition(val, name, body) : null - const obj = {} - if (methods) { - for (const m of methods) { - obj[m.key] = m.val - } - } else { - obj[val] = acc - if (isSnakeCased(val)) { - obj[camelify(val)] = acc - } - } - return obj - }, null) - }) - .reduce((acc, val) => deepmerge(acc, val), {}) - - const apis = apiFiles - .map(file => { - // const name = format(file.slice(0, -3)) - return file - .slice(0, -3) // remove `.js` extension - .split('.') - .reverse() - .reduce((acc, val) => { - const obj = { - [val]: acc === null - ? `lazyLoad('${file.slice(0, -3)}', opts)` // `${name}(opts)` - : acc - } - if (isSnakeCased(val)) { - obj[camelify(val)] = acc === null - ? `lazyLoad('${file.slice(0, -3)}', opts)` // `${name}(opts)` - : acc - } - return obj - }, null) - }) - .reduce((acc, val) => deepmerge(acc, val), {}) - - // serialize the API object - const apisStr = JSON.stringify(apis, null, 2) - // split & join to fix the indentation - .split('\n') - .join('\n ') - // remove useless quotes - .replace(/"/g, '') - - // serialize the type object - const typesStr = Object.keys(types) - .map(key => { - const line = ` ${key}: ${JSON.stringify(types[key], null, 4)}` - if (line.slice(-1) === '}') { - return line.slice(0, -1) + ' }' - } - return line - }) - .join('\n') - // remove useless quotes and commas - .replace(/"/g, '') - .replace(/,$/gm, '') - - const fn = dedent` - // Licensed to Elasticsearch B.V under one or more agreements. - // Elasticsearch B.V licenses this file to you under the Apache 2.0 License. - // See the LICENSE file in the project root for more information - - 'use strict' - - const assert = require('assert') - - function ESAPI (opts) { - assert(opts.makeRequest, 'Missing makeRequest function') - assert(opts.ConfigurationError, 'Missing ConfigurationError class') - assert(opts.result, 'Missing default result object') - - const { result } = opts - opts.handleError = handleError - opts.snakeCaseKeys = snakeCaseKeys - - const apis = ${apisStr} - - - return apis - - function handleError(err, callback) { - if (callback) return callback(err, result) - return Promise.reject(err) - } - - function snakeCaseKeys (acceptedQuerystring, snakeCase, querystring, warnings) { - var target = {} - var keys = Object.keys(querystring) - for (var i = 0, len = keys.length; i < len; i++) { - var key = keys[i] - target[snakeCase[key] || key] = querystring[key] - if (acceptedQuerystring.indexOf(snakeCase[key] || key) === -1) { - warnings.push('Client - Unknown parameter: "' + key + '", sending it as query parameter') - } - } - return target - } - } - - // It's unlikely that a user needs all of our APIs, - // and since require is a sync operation that takes time - // (given the amount of APIs we have), let's lazy load them, - // so a given API file will be required only - // if the user actually needs that API. - // The following implementation takes advantage - // of js closures to have a simple cache with the least overhead. - function lazyLoad (file, opts) { - var fn = null - return function _lazyLoad (params, options, callback) { - if (fn === null) { - fn = require(${'`./api/${file}.js`'})(opts) - } - return fn(params, options, callback) - } - } - - module.exports = ESAPI - ` - - // new line at the end of file - return { fn: fn + '\n', types: typesStr } -} - -// from snake_case to camelCase -function camelify (str) { - return str.replace(/_([a-z])/g, k => k[1].toUpperCase()) -} - -function isSnakeCased (str) { - return !!~str.indexOf('_') -} - -function toPascalCase (str) { - return str[0].toUpperCase() + str.slice(1) -} - -function buildMethodDefinition (api, name, hasBody) { - const Name = toPascalCase(name) - const bodyType = ndjsonApiKey.includes(Name) ? 'RequestNDBody' : 'RequestBody' - const defaultBodyType = ndjsonApiKey.includes(Name) ? 'Record[]' : 'Record' - - if (hasBody) { - let methods = [ - { key: `${api}, TRequestBody extends ${bodyType} = ${defaultBodyType}, TContext = unknown>(params?: RequestParams.${Name}, options?: TransportRequestOptions)`, val: `TransportRequestPromise>` }, - { key: `${api}, TRequestBody extends ${bodyType} = ${defaultBodyType}, TContext = unknown>(callback: callbackFn)`, val: `TransportRequestCallback` }, - { key: `${api}, TRequestBody extends ${bodyType} = ${defaultBodyType}, TContext = unknown>(params: RequestParams.${Name}, callback: callbackFn)`, val: `TransportRequestCallback` }, - { key: `${api}, TRequestBody extends ${bodyType} = ${defaultBodyType}, TContext = unknown>(params: RequestParams.${Name}, options: TransportRequestOptions, callback: callbackFn)`, val: `TransportRequestCallback` } - ] - if (isSnakeCased(api)) { - methods = methods.concat([ - { key: `${camelify(api)}, TRequestBody extends ${bodyType} = ${defaultBodyType}, TContext = unknown>(params?: RequestParams.${Name}, options?: TransportRequestOptions)`, val: `TransportRequestPromise>` }, - { key: `${camelify(api)}, TRequestBody extends ${bodyType} = ${defaultBodyType}, TContext = unknown>(callback: callbackFn)`, val: `TransportRequestCallback` }, - { key: `${camelify(api)}, TRequestBody extends ${bodyType} = ${defaultBodyType}, TContext = unknown>(params: RequestParams.${Name}, callback: callbackFn)`, val: `TransportRequestCallback` }, - { key: `${camelify(api)}, TRequestBody extends ${bodyType} = ${defaultBodyType}, TContext = unknown>(params: RequestParams.${Name}, options: TransportRequestOptions, callback: callbackFn)`, val: `TransportRequestCallback` } - ]) - } - return methods - } else { - let methods = [ - { key: `${api}, TContext = unknown>(params?: RequestParams.${Name}, options?: TransportRequestOptions)`, val: `TransportRequestPromise>` }, - { key: `${api}, TContext = unknown>(callback: callbackFn)`, val: `TransportRequestCallback` }, - { key: `${api}, TContext = unknown>(params: RequestParams.${Name}, callback: callbackFn)`, val: `TransportRequestCallback` }, - { key: `${api}, TContext = unknown>(params: RequestParams.${Name}, options: TransportRequestOptions, callback: callbackFn)`, val: `TransportRequestCallback` } - ] - if (isSnakeCased(api)) { - methods = methods.concat([ - { key: `${camelify(api)}, TContext = unknown>(params?: RequestParams.${Name}, options?: TransportRequestOptions)`, val: `TransportRequestPromise>` }, - { key: `${camelify(api)}, TContext = unknown>(callback: callbackFn)`, val: `TransportRequestCallback` }, - { key: `${camelify(api)}, TContext = unknown>(params: RequestParams.${Name}, callback: callbackFn)`, val: `TransportRequestCallback` }, - { key: `${camelify(api)}, TContext = unknown>(params: RequestParams.${Name}, options: TransportRequestOptions, callback: callbackFn)`, val: `TransportRequestCallback` } - ]) - } - return methods - } -} - -function hasBody (paths, file) { - const spec = readSpec() - return !!spec[file].body - - function readSpec () { - try { - return require(join(paths[0], file)) - } catch (err) {} - - try { - return require(join(paths[1], file)) - } catch (err) {} - - throw new Error(`Cannot read spec file ${file}`) - } -} - -module.exports = genFactory diff --git a/scripts/utils/generateRequestTypes.js b/scripts/utils/generateRequestTypes.js deleted file mode 100644 index b790d8954..000000000 --- a/scripts/utils/generateRequestTypes.js +++ /dev/null @@ -1,144 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -const semver = require('semver') -const deprecatedParameters = require('./patch.json') -const { ndjsonApi } = require('./generateApis') - -const ndjsonApiKey = ndjsonApi - .map(api => { - return api - .replace(/\.([a-z])/g, k => k[1].toUpperCase()) - .replace(/_([a-z])/g, k => k[1].toUpperCase()) - }) - .map(toPascalCase) - -function generate (version, api) { - const release = semver.valid(version) ? semver.major(version) : version - var types = `// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -import { RequestBody, RequestNDBody } from '../lib/Transport' - -export interface Generic { - method?: string; - ignore?: number | number[]; - filter_path?: string | string[]; - pretty?: boolean; - human?: boolean; - error_trace?: boolean; - source?: string; -} -` - - api.forEach(generateRequestType) - return types - - function generateRequestType (spec) { - const api = Object.keys(spec)[0] - const name = api - .replace(/\.([a-z])/g, k => k[1].toUpperCase()) - .replace(/_([a-z])/g, k => k[1].toUpperCase()) - - const { paths = {} } = spec[api].url - const { body, params = {} } = spec[api] - - // get the required parts from the url - // if the url has at least one static path, - // then there are not required parts of the url - var allParts = [] - var requiredParts = [] - for (const path of paths) { - if (path.parts) { - allParts.push(Object.keys(path.parts)) - } else { - allParts = [] - break - } - } - if (allParts.length > 0) { - requiredParts = intersect(...allParts) - } - - const parts = paths.reduce((acc, path) => { - if (!path.parts) return acc - for (const part in path.parts) { - if (acc[part] != null) continue - acc[part] = { key: part, value: path.parts[part], required: requiredParts.includes(part) } - } - return acc - }, {}) - const deprecatedParametersToAdd = [] - const paramsArr = Object.keys(params) - .filter(k => !Object.keys(parts).includes(k)) - .map(k => { - if (deprecatedParameters[release] && deprecatedParameters[release][k]) { - deprecatedParametersToAdd.push({ - key: deprecatedParameters[release][k], - value: params[k], - required: params[k].required - }) - } - return { key: k, value: params[k], required: params[k].required } - }) - - const partsArr = Object.keys(parts).map(k => parts[k]) - deprecatedParametersToAdd.forEach(k => partsArr.push(k)) - - const genLine = e => { - const optional = e.required ? '' : '?' - return `${e.key}${optional}: ${getType(e.value.type, e.value.options)};` - } - - const bodyGeneric = ndjsonApiKey.includes(toPascalCase(name)) ? 'RequestNDBody' : 'RequestBody' - - const code = ` -export interface ${toPascalCase(name)}${body ? `` : ''} extends Generic { - ${partsArr.map(genLine).join('\n ')} - ${paramsArr.map(genLine).join('\n ')} - ${body ? `body${body.required ? '' : '?'}: T;` : ''} -} -` - - types += '\n' - // remove empty lines - types += code.replace(/^\s*\n/gm, '') - } - - function getType (type, options) { - switch (type) { - case 'list': - return 'string | string[]' - case 'date': - case 'time': - case 'timeout': - return 'string' - case 'enum': - return options.map(k => `'${k}'`).join(' | ') - case 'int': - case 'double': - case 'long': - return 'number' - case 'boolean|long': - return 'boolean | number' - default: - return type - } - } -} - -function intersect (first, ...rest) { - return rest.reduce((accum, current) => { - return accum.filter(x => current.indexOf(x) !== -1) - }, first) -} - -function toPascalCase (str) { - return str[0].toUpperCase() + str.slice(1) -} - -module.exports = generate diff --git a/scripts/utils/index.js b/scripts/utils/index.js deleted file mode 100644 index bdfa4956b..000000000 --- a/scripts/utils/index.js +++ /dev/null @@ -1,19 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -const generate = require('./generateApis') -const generateRequestTypes = require('./generateRequestTypes') -const cloneAndCheckout = require('./clone-es') -const genFactory = require('./generateMain') -const generateDocs = require('./generateDocs') - -module.exports = { - generate, - cloneAndCheckout, - genFactory, - generateRequestTypes, - generateDocs -} diff --git a/scripts/utils/patch.json b/scripts/utils/patch.json deleted file mode 100644 index 392b305cb..000000000 --- a/scripts/utils/patch.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "6": { - "_source_includes": "_source_include", - "_source_excludes": "_source_exclude" - }, - "7": { - "_source_includes": "_source_include", - "_source_excludes": "_source_exclude" - }, - "master": { - "_source_includes": "_source_include", - "_source_excludes": "_source_exclude" - } -} diff --git a/scripts/wait-cluster.sh b/scripts/wait-cluster.sh deleted file mode 100755 index 4cacaa4b6..000000000 --- a/scripts/wait-cluster.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash - -TEST_ES_SERVER=${TEST_ES_SERVER:-"http://localhost:9200/"} - -attempt_counter=0 -max_attempts=5 -url="${TEST_ES_SERVER}/_cluster/health?wait_for_status=green&timeout=50s" - -echo "Waiting for Elasticsearch..." -while [[ "$(curl -s -o /dev/null -w ''%{http_code}'' --max-time 55 "$url")" != "200" ]]; do - if [ ${attempt_counter} -eq ${max_attempts} ];then - echo "\nCouldn't connect to Elasticsearch" - exit 1 - fi - - printf '.' - attempt_counter=$(($attempt_counter+1)) - sleep 5 -done - -echo "\nReady" diff --git a/src/api/api/async_search.ts b/src/api/api/async_search.ts new file mode 100644 index 000000000..622a16fb8 --- /dev/null +++ b/src/api/api/async_search.ts @@ -0,0 +1,451 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import { kAcceptedParams } from '../../client' + +interface That { + transport: Transport + [kAcceptedParams]: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +export default class AsyncSearch { + transport: Transport + [kAcceptedParams]: Record + constructor (transport: Transport) { + this.transport = transport + this[kAcceptedParams] = { + 'async_search.delete': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'async_search.get': { + path: [ + 'id' + ], + body: [], + query: [ + 'keep_alive', + 'typed_keys', + 'wait_for_completion_timeout' + ] + }, + 'async_search.status': { + path: [ + 'id' + ], + body: [], + query: [ + 'keep_alive' + ] + }, + 'async_search.submit': { + path: [ + 'index' + ], + body: [ + 'aggregations', + 'aggs', + 'collapse', + 'explain', + 'ext', + 'from', + 'highlight', + 'track_total_hits', + 'indices_boost', + 'docvalue_fields', + 'knn', + 'min_score', + 'post_filter', + 'profile', + 'query', + 'rescore', + 'script_fields', + 'search_after', + 'size', + 'slice', + 'sort', + '_source', + 'fields', + 'suggest', + 'terminate_after', + 'timeout', + 'track_scores', + 'version', + 'seq_no_primary_term', + 'stored_fields', + 'pit', + 'runtime_mappings', + 'stats' + ], + query: [ + 'wait_for_completion_timeout', + 'keep_alive', + 'keep_on_completion', + 'allow_no_indices', + 'allow_partial_search_results', + 'analyzer', + 'analyze_wildcard', + 'batched_reduce_size', + 'ccs_minimize_roundtrips', + 'default_operator', + 'df', + 'docvalue_fields', + 'expand_wildcards', + 'explain', + 'ignore_throttled', + 'ignore_unavailable', + 'lenient', + 'max_concurrent_shard_requests', + 'preference', + 'project_routing', + 'request_cache', + 'routing', + 'search_type', + 'stats', + 'stored_fields', + 'suggest_field', + 'suggest_mode', + 'suggest_size', + 'suggest_text', + 'terminate_after', + 'timeout', + 'track_total_hits', + 'track_scores', + 'typed_keys', + 'rest_total_hits_as_int', + 'version', + '_source', + '_source_excludes', + '_source_includes', + 'seq_no_primary_term', + 'q', + 'size', + 'from', + 'sort' + ] + } + } + } + + /** + * Delete an async search. If the asynchronous search is still running, it is cancelled. Otherwise, the saved search results are deleted. If the Elasticsearch security features are enabled, the deletion of a specific async search is restricted to: the authenticated user that submitted the original search request; users that have the `cancel_task` cluster privilege. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit | Elasticsearch API documentation} + */ + async delete (this: That, params: T.AsyncSearchDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async delete (this: That, params: T.AsyncSearchDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> + async delete (this: That, params: T.AsyncSearchDeleteRequest, options?: TransportRequestOptions): Promise + async delete (this: That, params: T.AsyncSearchDeleteRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['async_search.delete'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_async_search/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'async_search.delete', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get async search results. Retrieve the results of a previously submitted asynchronous search request. If the Elasticsearch security features are enabled, access to the results of a specific async search is restricted to the user or API key that submitted it. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit | Elasticsearch API documentation} + */ + async get> (this: That, params: T.AsyncSearchGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> + async get> (this: That, params: T.AsyncSearchGetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> + async get> (this: That, params: T.AsyncSearchGetRequest, options?: TransportRequestOptions): Promise> + async get> (this: That, params: T.AsyncSearchGetRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['async_search.get'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_async_search/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'async_search.get', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id', + 'keep_alive', + 'typed_keys', + 'wait_for_completion_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get the async search status. Get the status of a previously submitted async search request given its identifier, without retrieving search results. If the Elasticsearch security features are enabled, the access to the status of a specific async search is restricted to: * The user or API key that submitted the original async search request. * Users that have the `monitor` cluster privilege or greater privileges. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit | Elasticsearch API documentation} + */ + async status (this: That, params: T.AsyncSearchStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async status (this: That, params: T.AsyncSearchStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> + async status (this: That, params: T.AsyncSearchStatusRequest, options?: TransportRequestOptions): Promise + async status (this: That, params: T.AsyncSearchStatusRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['async_search.status'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_async_search/status/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'async_search.status', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id', + 'keep_alive' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Run an async search. When the primary sort of the results is an indexed field, shards get sorted based on minimum and maximum value that they hold for that field. Partial results become available following the sort criteria that was requested. Warning: Asynchronous search does not support scroll or search requests that include only the suggest section. By default, Elasticsearch does not allow you to store an async search response larger than 10Mb and an attempt to do this results in an error. The maximum allowed size for a stored async search response can be set by changing the `search.max_async_search_response_size` cluster level setting. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit | Elasticsearch API documentation} + */ + async submit> (this: That, params?: T.AsyncSearchSubmitRequest, options?: TransportRequestOptionsWithOutMeta): Promise> + async submit> (this: That, params?: T.AsyncSearchSubmitRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> + async submit> (this: That, params?: T.AsyncSearchSubmitRequest, options?: TransportRequestOptions): Promise> + async submit> (this: That, params?: T.AsyncSearchSubmitRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['async_search.submit'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + if (key === 'sort' && typeof params[key] === 'string' && params[key].includes(':')) { // eslint-disable-line + querystring[key] = params[key] + } else { + // @ts-expect-error + body[key] = params[key] + } + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = 'POST' + path = `/${encodeURIComponent(params.index.toString())}/_async_search` + } else { + method = 'POST' + path = '/_async_search' + } + const meta: TransportRequestMetadata = { + name: 'async_search.submit', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'aggregations', + 'aggs', + 'collapse', + 'explain', + 'ext', + 'from', + 'highlight', + 'track_total_hits', + 'indices_boost', + 'docvalue_fields', + 'knn', + 'min_score', + 'post_filter', + 'profile', + 'query', + 'rescore', + 'script_fields', + 'search_after', + 'size', + 'slice', + 'sort', + '_source', + 'fields', + 'suggest', + 'terminate_after', + 'timeout', + 'track_scores', + 'version', + 'seq_no_primary_term', + 'stored_fields', + 'pit', + 'runtime_mappings', + 'stats', + 'wait_for_completion_timeout', + 'keep_alive', + 'keep_on_completion', + 'allow_no_indices', + 'allow_partial_search_results', + 'analyzer', + 'analyze_wildcard', + 'batched_reduce_size', + 'ccs_minimize_roundtrips', + 'default_operator', + 'df', + 'docvalue_fields', + 'expand_wildcards', + 'explain', + 'ignore_throttled', + 'ignore_unavailable', + 'lenient', + 'max_concurrent_shard_requests', + 'preference', + 'project_routing', + 'request_cache', + 'routing', + 'search_type', + 'stats', + 'stored_fields', + 'suggest_field', + 'suggest_mode', + 'suggest_size', + 'suggest_text', + 'terminate_after', + 'timeout', + 'track_total_hits', + 'track_scores', + 'typed_keys', + 'rest_total_hits_as_int', + 'version', + '_source', + '_source_excludes', + '_source_includes', + 'seq_no_primary_term', + 'q', + 'size', + 'from', + 'sort' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/api/autoscaling.ts b/src/api/api/autoscaling.ts new file mode 100644 index 000000000..dfbe9df2c --- /dev/null +++ b/src/api/api/autoscaling.ts @@ -0,0 +1,277 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import { kAcceptedParams } from '../../client' + +interface That { + transport: Transport + [kAcceptedParams]: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +export default class Autoscaling { + transport: Transport + [kAcceptedParams]: Record + constructor (transport: Transport) { + this.transport = transport + this[kAcceptedParams] = { + 'autoscaling.delete_autoscaling_policy': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'autoscaling.get_autoscaling_capacity': { + path: [], + body: [], + query: [ + 'master_timeout' + ] + }, + 'autoscaling.get_autoscaling_policy': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'autoscaling.put_autoscaling_policy': { + path: [ + 'name' + ], + body: [ + 'policy' + ], + query: [ + 'master_timeout', + 'timeout' + ] + } + } + } + + /** + * Delete an autoscaling policy. NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-delete-autoscaling-policy | Elasticsearch API documentation} + */ + async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise + async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['autoscaling.delete_autoscaling_policy'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_autoscaling/policy/${encodeURIComponent(params.name.toString())}` + const meta: TransportRequestMetadata = { + name: 'autoscaling.delete_autoscaling_policy', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get the autoscaling capacity. NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. This API gets the current autoscaling capacity based on the configured autoscaling policy. It will return information to size the cluster appropriately to the current workload. The `required_capacity` is calculated as the maximum of the `required_capacity` result of all individual deciders that are enabled for the policy. The operator should verify that the `current_nodes` match the operator’s knowledge of the cluster to avoid making autoscaling decisions based on stale or incomplete information. The response contains decider-specific information you can use to diagnose how and why autoscaling determined a certain capacity was required. This information is provided for diagnosis only. Do not use this information to make autoscaling decisions. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-get-autoscaling-capacity | Elasticsearch API documentation} + */ + async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptions): Promise + async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['autoscaling.get_autoscaling_capacity'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_autoscaling/capacity' + const meta: TransportRequestMetadata = { + name: 'autoscaling.get_autoscaling_capacity', + acceptedParams: [ + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get an autoscaling policy. NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-get-autoscaling-capacity | Elasticsearch API documentation} + */ + async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise + async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['autoscaling.get_autoscaling_policy'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_autoscaling/policy/${encodeURIComponent(params.name.toString())}` + const meta: TransportRequestMetadata = { + name: 'autoscaling.get_autoscaling_policy', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create or update an autoscaling policy. NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-put-autoscaling-policy | Elasticsearch API documentation} + */ + async putAutoscalingPolicy (this: That, params: T.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putAutoscalingPolicy (this: That, params: T.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putAutoscalingPolicy (this: That, params: T.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise + async putAutoscalingPolicy (this: That, params: T.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['autoscaling.put_autoscaling_policy'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: any = params.body ?? undefined + for (const key in params) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_autoscaling/policy/${encodeURIComponent(params.name.toString())}` + const meta: TransportRequestMetadata = { + name: 'autoscaling.put_autoscaling_policy', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'policy', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/api/bulk.ts b/src/api/api/bulk.ts new file mode 100644 index 000000000..06cab1229 --- /dev/null +++ b/src/api/api/bulk.ts @@ -0,0 +1,124 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + bulk: { + path: [ + 'index' + ], + body: [ + 'operations' + ], + query: [ + 'include_source_on_error', + 'list_executed_pipelines', + 'pipeline', + 'refresh', + 'routing', + '_source', + '_source_excludes', + '_source_includes', + 'timeout', + 'wait_for_active_shards', + 'require_alias', + 'require_data_stream' + ] + } +} + +/** + * Bulk index or delete documents. Perform multiple `index`, `create`, `delete`, and `update` actions in a single request. This reduces overhead and can greatly increase indexing speed. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: * To use the `create` action, you must have the `create_doc`, `create`, `index`, or `write` index privilege. Data streams support only the `create` action. * To use the `index` action, you must have the `create`, `index`, or `write` index privilege. * To use the `delete` action, you must have the `delete` or `write` index privilege. * To use the `update` action, you must have the `index` or `write` index privilege. * To automatically create a data stream or index with a bulk API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. * To make the result of a bulk operation visible to search using the `refresh` parameter, you must have the `maintenance` or `manage` index privilege. Automatic data stream creation requires a matching index template with data stream enabled. The actions are specified in the request body using a newline delimited JSON (NDJSON) structure: ``` action_and_meta_data\n optional_source\n action_and_meta_data\n optional_source\n .... action_and_meta_data\n optional_source\n ``` The `index` and `create` actions expect a source on the next line and have the same semantics as the `op_type` parameter in the standard index API. A `create` action fails if a document with the same ID already exists in the target An `index` action adds or replaces a document as necessary. NOTE: Data streams support only the `create` action. To update or delete a document in a data stream, you must target the backing index containing the document. An `update` action expects that the partial doc, upsert, and script and its options are specified on the next line. A `delete` action does not expect a source on the next line and has the same semantics as the standard delete API. NOTE: The final line of data must end with a newline character (`\n`). Each newline character may be preceded by a carriage return (`\r`). When sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header of `application/json` or `application/x-ndjson`. Because this format uses literal newline characters (`\n`) as delimiters, make sure that the JSON actions and sources are not pretty printed. If you provide a target in the request path, it is used for any actions that don't explicitly specify an `_index` argument. A note on the format: the idea here is to make processing as fast as possible. As some of the actions are redirected to other shards on other nodes, only `action_meta_data` is parsed on the receiving node side. Client libraries using this protocol should try and strive to do something similar on the client side, and reduce buffering as much as possible. There is no "correct" number of actions to perform in a single bulk request. Experiment with different settings to find the optimal size for your particular workload. Note that Elasticsearch limits the maximum size of a HTTP request to 100mb by default so clients must ensure that no request exceeds this size. It is not possible to index a single document that exceeds the size limit, so you must pre-process any such documents into smaller pieces before sending them to Elasticsearch. For instance, split documents into pages or chapters before indexing them, or store raw binary data in a system outside Elasticsearch and replace the raw data with a link to the external system in the documents that you send to Elasticsearch. **Client suppport for bulk requests** Some of the officially supported clients provide helpers to assist with bulk requests and reindexing: * Go: Check out `esutil.BulkIndexer` * Perl: Check out `Search::Elasticsearch::Client::5_0::Bulk` and `Search::Elasticsearch::Client::5_0::Scroll` * Python: Check out `elasticsearch.helpers.*` * JavaScript: Check out `client.helpers.*` * .NET: Check out `BulkAllObservable` * PHP: Check out bulk indexing. * Ruby: Check out `Elasticsearch::Helpers::BulkHelper` **Submitting bulk requests with cURL** If you're providing text file input to `curl`, you must use the `--data-binary` flag instead of plain `-d`. The latter doesn't preserve newlines. For example: ``` $ cat requests { "index" : { "_index" : "test", "_id" : "1" } } { "field1" : "value1" } $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk --data-binary "@requests"; echo {"took":7, "errors": false, "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]} ``` **Optimistic concurrency control** Each `index` and `delete` action within a bulk API call may include the `if_seq_no` and `if_primary_term` parameters in their respective action and meta data lines. The `if_seq_no` and `if_primary_term` parameters control how operations are run, based on the last modification to existing documents. See Optimistic concurrency control for more details. **Versioning** Each bulk item can include the version value using the `version` field. It automatically follows the behavior of the index or delete operation based on the `_version` mapping. It also support the `version_type`. **Routing** Each bulk item can include the routing value using the `routing` field. It automatically follows the behavior of the index or delete operation based on the `_routing` mapping. NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. **Wait for active shards** When making bulk calls, you can set the `wait_for_active_shards` parameter to require a minimum number of shard copies to be active before starting to process the bulk request. **Refresh** Control when the changes made by this request are visible to search. NOTE: Only the shards that receive the bulk request will be affected by refresh. Imagine a `_bulk?refresh=wait_for` request with three documents in it that happen to be routed to different shards in an index with five shards. The request will only wait for those three shards to refresh. The other two shards that make up the index do not participate in the `_bulk` request at all. You might want to disable the refresh interval temporarily to improve indexing throughput for large bulk requests. Refer to the linked documentation for step-by-step instructions using the index settings API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk | Elasticsearch API documentation} + */ +export default async function BulkApi (this: That, params: T.BulkRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function BulkApi (this: That, params: T.BulkRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function BulkApi (this: That, params: T.BulkRequest, options?: TransportRequestOptions): Promise +export default async function BulkApi (this: That, params: T.BulkRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.bulk + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: any = params.body ?? undefined + for (const key in params) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = 'POST' + path = `/${encodeURIComponent(params.index.toString())}/_bulk` + } else { + method = 'POST' + path = '/_bulk' + } + const meta: TransportRequestMetadata = { + name: 'bulk', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'operations', + 'include_source_on_error', + 'list_executed_pipelines', + 'pipeline', + 'refresh', + 'routing', + '_source', + '_source_excludes', + '_source_includes', + 'timeout', + 'wait_for_active_shards', + 'require_alias', + 'require_data_stream' + ] + } + return await this.transport.request({ path, method, querystring, bulkBody: body, meta }, options) +} diff --git a/src/api/api/capabilities.ts b/src/api/api/capabilities.ts new file mode 100644 index 000000000..894fae876 --- /dev/null +++ b/src/api/api/capabilities.ts @@ -0,0 +1,79 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + capabilities: { + path: [], + body: [], + query: [] + } +} + +/** + * Checks if the specified combination of method, API, parameters, and arbitrary capabilities are supported + * @see {@link https://github.com/elastic/elasticsearch/blob/main/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/README.asciidoc#require-or-skip-api-capabilities | Elasticsearch API documentation} + */ +export default async function CapabilitiesApi (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function CapabilitiesApi (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> +export default async function CapabilitiesApi (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise +export default async function CapabilitiesApi (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = acceptedParams.capabilities + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_capabilities' + const meta: TransportRequestMetadata = { + name: 'capabilities', + acceptedParams: [ + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) +} diff --git a/src/api/api/cat.ts b/src/api/api/cat.ts new file mode 100644 index 000000000..eef10f6c4 --- /dev/null +++ b/src/api/api/cat.ts @@ -0,0 +1,1867 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import { kAcceptedParams } from '../../client' + +interface That { + transport: Transport + [kAcceptedParams]: Record +} + +export default class Cat { + transport: Transport + [kAcceptedParams]: Record + constructor (transport: Transport) { + this.transport = transport + this[kAcceptedParams] = { + 'cat.aliases': { + path: [ + 'name' + ], + body: [], + query: [ + 'h', + 's', + 'expand_wildcards', + 'master_timeout' + ] + }, + 'cat.allocation': { + path: [ + 'node_id' + ], + body: [], + query: [ + 'h', + 's', + 'local', + 'master_timeout' + ] + }, + 'cat.circuit_breaker': { + path: [ + 'circuit_breaker_patterns' + ], + body: [], + query: [] + }, + 'cat.component_templates': { + path: [ + 'name' + ], + body: [], + query: [ + 'h', + 's', + 'local', + 'master_timeout' + ] + }, + 'cat.count': { + path: [ + 'index' + ], + body: [], + query: [ + 'h', + 'project_routing', + 's' + ] + }, + 'cat.fielddata': { + path: [ + 'fields' + ], + body: [], + query: [ + 'fields', + 'h', + 's' + ] + }, + 'cat.health': { + path: [], + body: [], + query: [ + 'ts', + 'h', + 's' + ] + }, + 'cat.help': { + path: [], + body: [], + query: [] + }, + 'cat.indices': { + path: [ + 'index' + ], + body: [], + query: [ + 'expand_wildcards', + 'health', + 'include_unloaded_segments', + 'pri', + 'master_timeout', + 'h', + 's' + ] + }, + 'cat.master': { + path: [], + body: [], + query: [ + 'h', + 's', + 'local', + 'master_timeout' + ] + }, + 'cat.ml_data_frame_analytics': { + path: [ + 'id' + ], + body: [], + query: [ + 'allow_no_match', + 'h', + 's' + ] + }, + 'cat.ml_datafeeds': { + path: [ + 'datafeed_id' + ], + body: [], + query: [ + 'allow_no_match', + 'h', + 's' + ] + }, + 'cat.ml_jobs': { + path: [ + 'job_id' + ], + body: [], + query: [ + 'allow_no_match', + 'h', + 's' + ] + }, + 'cat.ml_trained_models': { + path: [ + 'model_id' + ], + body: [], + query: [ + 'allow_no_match', + 'h', + 's', + 'from', + 'size' + ] + }, + 'cat.nodeattrs': { + path: [], + body: [], + query: [ + 'h', + 's', + 'local', + 'master_timeout' + ] + }, + 'cat.nodes': { + path: [], + body: [], + query: [ + 'full_id', + 'include_unloaded_segments', + 'h', + 's', + 'master_timeout' + ] + }, + 'cat.pending_tasks': { + path: [], + body: [], + query: [ + 'h', + 's', + 'local', + 'master_timeout' + ] + }, + 'cat.plugins': { + path: [], + body: [], + query: [ + 'h', + 's', + 'include_bootstrap', + 'local', + 'master_timeout' + ] + }, + 'cat.recovery': { + path: [ + 'index' + ], + body: [], + query: [ + 'active_only', + 'detailed', + 'index', + 'h', + 's' + ] + }, + 'cat.repositories': { + path: [], + body: [], + query: [ + 'h', + 's', + 'local', + 'master_timeout' + ] + }, + 'cat.segments': { + path: [ + 'index' + ], + body: [], + query: [ + 'h', + 's', + 'local', + 'master_timeout', + 'expand_wildcards', + 'allow_no_indices', + 'ignore_throttled', + 'ignore_unavailable', + 'allow_closed' + ] + }, + 'cat.shards': { + path: [ + 'index' + ], + body: [], + query: [ + 'h', + 's', + 'master_timeout' + ] + }, + 'cat.snapshots': { + path: [ + 'repository' + ], + body: [], + query: [ + 'ignore_unavailable', + 'h', + 's', + 'master_timeout' + ] + }, + 'cat.tasks': { + path: [], + body: [], + query: [ + 'actions', + 'detailed', + 'nodes', + 'parent_task_id', + 'h', + 's', + 'timeout', + 'wait_for_completion' + ] + }, + 'cat.templates': { + path: [ + 'name' + ], + body: [], + query: [ + 'h', + 's', + 'local', + 'master_timeout' + ] + }, + 'cat.thread_pool': { + path: [ + 'thread_pool_patterns' + ], + body: [], + query: [ + 'h', + 's', + 'local', + 'master_timeout' + ] + }, + 'cat.transforms': { + path: [ + 'transform_id' + ], + body: [], + query: [ + 'allow_no_match', + 'from', + 'h', + 's', + 'size' + ] + } + } + } + + /** + * Get aliases. Get the cluster's index aliases, including filter and routing information. This API does not return data stream aliases. IMPORTANT: CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-aliases | Elasticsearch API documentation} + */ + async aliases (this: That, params?: T.CatAliasesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async aliases (this: That, params?: T.CatAliasesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async aliases (this: That, params?: T.CatAliasesRequest, options?: TransportRequestOptions): Promise + async aliases (this: That, params?: T.CatAliasesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['cat.aliases'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.name != null) { + method = 'GET' + path = `/_cat/aliases/${encodeURIComponent(params.name.toString())}` + } else { + method = 'GET' + path = '/_cat/aliases' + } + const meta: TransportRequestMetadata = { + name: 'cat.aliases', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'h', + 's', + 'expand_wildcards', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get shard allocation information. Get a snapshot of the number of shards allocated to each data node and their disk space. IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-allocation | Elasticsearch API documentation} + */ + async allocation (this: That, params?: T.CatAllocationRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async allocation (this: That, params?: T.CatAllocationRequest, options?: TransportRequestOptionsWithMeta): Promise> + async allocation (this: That, params?: T.CatAllocationRequest, options?: TransportRequestOptions): Promise + async allocation (this: That, params?: T.CatAllocationRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['cat.allocation'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.node_id != null) { + method = 'GET' + path = `/_cat/allocation/${encodeURIComponent(params.node_id.toString())}` + } else { + method = 'GET' + path = '/_cat/allocation' + } + const meta: TransportRequestMetadata = { + name: 'cat.allocation', + pathParts: { + node_id: params.node_id + }, + acceptedParams: [ + 'node_id', + 'h', + 's', + 'local', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get circuit breakers statistics + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch#TODO | Elasticsearch API documentation} + */ + async circuitBreaker (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async circuitBreaker (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async circuitBreaker (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async circuitBreaker (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['cat.circuit_breaker'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.circuit_breaker_patterns != null) { + method = 'GET' + path = `/_cat/circuit_breaker/${encodeURIComponent(params.circuit_breaker_patterns.toString())}` + } else { + method = 'GET' + path = '/_cat/circuit_breaker' + } + const meta: TransportRequestMetadata = { + name: 'cat.circuit_breaker', + pathParts: { + circuit_breaker_patterns: params.circuit_breaker_patterns + }, + acceptedParams: [ + 'circuit_breaker_patterns' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get component templates. Get information about component templates in a cluster. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get component template API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-component-templates | Elasticsearch API documentation} + */ + async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest, options?: TransportRequestOptions): Promise + async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['cat.component_templates'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.name != null) { + method = 'GET' + path = `/_cat/component_templates/${encodeURIComponent(params.name.toString())}` + } else { + method = 'GET' + path = '/_cat/component_templates' + } + const meta: TransportRequestMetadata = { + name: 'cat.component_templates', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'h', + 's', + 'local', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get a document count. Get quick access to a document count for a data stream, an index, or an entire cluster. The document count only includes live documents, not deleted documents which have not yet been removed by the merge process. IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the count API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-count | Elasticsearch API documentation} + */ + async count (this: That, params?: T.CatCountRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async count (this: That, params?: T.CatCountRequest, options?: TransportRequestOptionsWithMeta): Promise> + async count (this: That, params?: T.CatCountRequest, options?: TransportRequestOptions): Promise + async count (this: That, params?: T.CatCountRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['cat.count'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = 'GET' + path = `/_cat/count/${encodeURIComponent(params.index.toString())}` + } else { + method = 'GET' + path = '/_cat/count' + } + const meta: TransportRequestMetadata = { + name: 'cat.count', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'h', + 'project_routing', + 's' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get field data cache information. Get the amount of heap memory currently used by the field data cache on every data node in the cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes stats API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-fielddata | Elasticsearch API documentation} + */ + async fielddata (this: That, params?: T.CatFielddataRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async fielddata (this: That, params?: T.CatFielddataRequest, options?: TransportRequestOptionsWithMeta): Promise> + async fielddata (this: That, params?: T.CatFielddataRequest, options?: TransportRequestOptions): Promise + async fielddata (this: That, params?: T.CatFielddataRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['cat.fielddata'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.fields != null) { + method = 'GET' + path = `/_cat/fielddata/${encodeURIComponent(params.fields.toString())}` + } else { + method = 'GET' + path = '/_cat/fielddata' + } + const meta: TransportRequestMetadata = { + name: 'cat.fielddata', + pathParts: { + fields: params.fields + }, + acceptedParams: [ + 'fields', + 'fields', + 'h', + 's' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get the cluster health status. IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the cluster health API. This API is often used to check malfunctioning clusters. To help you track cluster health alongside log files and alerting systems, the API returns timestamps in two formats: `HH:MM:SS`, which is human-readable but includes no date information; `Unix epoch time`, which is machine-sortable and includes date information. The latter format is useful for cluster recoveries that take multiple days. You can use the cat health API to verify cluster health across multiple nodes. You also can use the API to track the recovery of a large cluster over a longer period of time. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-health | Elasticsearch API documentation} + */ + async health (this: That, params?: T.CatHealthRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async health (this: That, params?: T.CatHealthRequest, options?: TransportRequestOptionsWithMeta): Promise> + async health (this: That, params?: T.CatHealthRequest, options?: TransportRequestOptions): Promise + async health (this: That, params?: T.CatHealthRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['cat.health'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_cat/health' + const meta: TransportRequestMetadata = { + name: 'cat.health', + acceptedParams: [ + 'ts', + 'h', + 's' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get CAT help. Get help for the CAT APIs. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-cat | Elasticsearch API documentation} + */ + async help (this: That, params?: T.CatHelpRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async help (this: That, params?: T.CatHelpRequest, options?: TransportRequestOptionsWithMeta): Promise> + async help (this: That, params?: T.CatHelpRequest, options?: TransportRequestOptions): Promise + async help (this: That, params?: T.CatHelpRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['cat.help'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_cat' + const meta: TransportRequestMetadata = { + name: 'cat.help', + acceptedParams: [ + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get index information. Get high-level information about indices in a cluster, including backing indices for data streams. Use this request to get the following information for each index in a cluster: - shard count - document count - deleted document count - primary store size - total store size of all shards, including shard replicas These metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents. To get an accurate count of Elasticsearch documents, use the cat count or count APIs. CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use an index endpoint. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-indices | Elasticsearch API documentation} + */ + async indices (this: That, params?: T.CatIndicesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async indices (this: That, params?: T.CatIndicesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async indices (this: That, params?: T.CatIndicesRequest, options?: TransportRequestOptions): Promise + async indices (this: That, params?: T.CatIndicesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['cat.indices'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = 'GET' + path = `/_cat/indices/${encodeURIComponent(params.index.toString())}` + } else { + method = 'GET' + path = '/_cat/indices' + } + const meta: TransportRequestMetadata = { + name: 'cat.indices', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'expand_wildcards', + 'health', + 'include_unloaded_segments', + 'pri', + 'master_timeout', + 'h', + 's' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get master node information. Get information about the master node, including the ID, bound IP address, and name. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-master | Elasticsearch API documentation} + */ + async master (this: That, params?: T.CatMasterRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async master (this: That, params?: T.CatMasterRequest, options?: TransportRequestOptionsWithMeta): Promise> + async master (this: That, params?: T.CatMasterRequest, options?: TransportRequestOptions): Promise + async master (this: That, params?: T.CatMasterRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['cat.master'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_cat/master' + const meta: TransportRequestMetadata = { + name: 'cat.master', + acceptedParams: [ + 'h', + 's', + 'local', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get data frame analytics jobs. Get configuration and usage information about data frame analytics jobs. IMPORTANT: CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get data frame analytics jobs statistics API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-data-frame-analytics | Elasticsearch API documentation} + */ + async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise + async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['cat.ml_data_frame_analytics'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.id != null) { + method = 'GET' + path = `/_cat/ml/data_frame/analytics/${encodeURIComponent(params.id.toString())}` + } else { + method = 'GET' + path = '/_cat/ml/data_frame/analytics' + } + const meta: TransportRequestMetadata = { + name: 'cat.ml_data_frame_analytics', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id', + 'allow_no_match', + 'h', + 's' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get datafeeds. Get configuration and usage information about datafeeds. This API returns a maximum of 10,000 datafeeds. If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. IMPORTANT: CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get datafeed statistics API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-datafeeds | Elasticsearch API documentation} + */ + async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest, options?: TransportRequestOptions): Promise + async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['cat.ml_datafeeds'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.datafeed_id != null) { + method = 'GET' + path = `/_cat/ml/datafeeds/${encodeURIComponent(params.datafeed_id.toString())}` + } else { + method = 'GET' + path = '/_cat/ml/datafeeds' + } + const meta: TransportRequestMetadata = { + name: 'cat.ml_datafeeds', + pathParts: { + datafeed_id: params.datafeed_id + }, + acceptedParams: [ + 'datafeed_id', + 'allow_no_match', + 'h', + 's' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get anomaly detection jobs. Get configuration and usage information for anomaly detection jobs. This API returns a maximum of 10,000 jobs. If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. IMPORTANT: CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get anomaly detection job statistics API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-jobs | Elasticsearch API documentation} + */ + async mlJobs (this: That, params?: T.CatMlJobsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async mlJobs (this: That, params?: T.CatMlJobsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async mlJobs (this: That, params?: T.CatMlJobsRequest, options?: TransportRequestOptions): Promise + async mlJobs (this: That, params?: T.CatMlJobsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['cat.ml_jobs'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.job_id != null) { + method = 'GET' + path = `/_cat/ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}` + } else { + method = 'GET' + path = '/_cat/ml/anomaly_detectors' + } + const meta: TransportRequestMetadata = { + name: 'cat.ml_jobs', + pathParts: { + job_id: params.job_id + }, + acceptedParams: [ + 'job_id', + 'allow_no_match', + 'h', + 's' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get trained models. Get configuration and usage information about inference trained models. IMPORTANT: CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get trained models statistics API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-trained-models | Elasticsearch API documentation} + */ + async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest, options?: TransportRequestOptions): Promise + async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['cat.ml_trained_models'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.model_id != null) { + method = 'GET' + path = `/_cat/ml/trained_models/${encodeURIComponent(params.model_id.toString())}` + } else { + method = 'GET' + path = '/_cat/ml/trained_models' + } + const meta: TransportRequestMetadata = { + name: 'cat.ml_trained_models', + pathParts: { + model_id: params.model_id + }, + acceptedParams: [ + 'model_id', + 'allow_no_match', + 'h', + 's', + 'from', + 'size' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get node attribute information. Get information about custom node attributes. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodeattrs | Elasticsearch API documentation} + */ + async nodeattrs (this: That, params?: T.CatNodeattrsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async nodeattrs (this: That, params?: T.CatNodeattrsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async nodeattrs (this: That, params?: T.CatNodeattrsRequest, options?: TransportRequestOptions): Promise + async nodeattrs (this: That, params?: T.CatNodeattrsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['cat.nodeattrs'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_cat/nodeattrs' + const meta: TransportRequestMetadata = { + name: 'cat.nodeattrs', + acceptedParams: [ + 'h', + 's', + 'local', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get node information. Get information about the nodes in a cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodes | Elasticsearch API documentation} + */ + async nodes (this: That, params?: T.CatNodesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async nodes (this: That, params?: T.CatNodesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async nodes (this: That, params?: T.CatNodesRequest, options?: TransportRequestOptions): Promise + async nodes (this: That, params?: T.CatNodesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['cat.nodes'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_cat/nodes' + const meta: TransportRequestMetadata = { + name: 'cat.nodes', + acceptedParams: [ + 'full_id', + 'include_unloaded_segments', + 'h', + 's', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get pending task information. Get information about cluster-level changes that have not yet taken effect. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the pending cluster tasks API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-pending-tasks | Elasticsearch API documentation} + */ + async pendingTasks (this: That, params?: T.CatPendingTasksRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async pendingTasks (this: That, params?: T.CatPendingTasksRequest, options?: TransportRequestOptionsWithMeta): Promise> + async pendingTasks (this: That, params?: T.CatPendingTasksRequest, options?: TransportRequestOptions): Promise + async pendingTasks (this: That, params?: T.CatPendingTasksRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['cat.pending_tasks'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_cat/pending_tasks' + const meta: TransportRequestMetadata = { + name: 'cat.pending_tasks', + acceptedParams: [ + 'h', + 's', + 'local', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get plugin information. Get a list of plugins running on each node of a cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-plugins | Elasticsearch API documentation} + */ + async plugins (this: That, params?: T.CatPluginsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async plugins (this: That, params?: T.CatPluginsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async plugins (this: That, params?: T.CatPluginsRequest, options?: TransportRequestOptions): Promise + async plugins (this: That, params?: T.CatPluginsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['cat.plugins'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_cat/plugins' + const meta: TransportRequestMetadata = { + name: 'cat.plugins', + acceptedParams: [ + 'h', + 's', + 'include_bootstrap', + 'local', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get shard recovery information. Get information about ongoing and completed shard recoveries. Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or syncing a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing. For data streams, the API returns information about the stream’s backing indices. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index recovery API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-recovery | Elasticsearch API documentation} + */ + async recovery (this: That, params?: T.CatRecoveryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async recovery (this: That, params?: T.CatRecoveryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async recovery (this: That, params?: T.CatRecoveryRequest, options?: TransportRequestOptions): Promise + async recovery (this: That, params?: T.CatRecoveryRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['cat.recovery'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = 'GET' + path = `/_cat/recovery/${encodeURIComponent(params.index.toString())}` + } else { + method = 'GET' + path = '/_cat/recovery' + } + const meta: TransportRequestMetadata = { + name: 'cat.recovery', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'active_only', + 'detailed', + 'index', + 'h', + 's' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get snapshot repository information. Get a list of snapshot repositories for a cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot repository API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-repositories | Elasticsearch API documentation} + */ + async repositories (this: That, params?: T.CatRepositoriesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async repositories (this: That, params?: T.CatRepositoriesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async repositories (this: That, params?: T.CatRepositoriesRequest, options?: TransportRequestOptions): Promise + async repositories (this: That, params?: T.CatRepositoriesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['cat.repositories'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_cat/repositories' + const meta: TransportRequestMetadata = { + name: 'cat.repositories', + acceptedParams: [ + 'h', + 's', + 'local', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get segment information. Get low-level information about the Lucene segments in index shards. For data streams, the API returns information about the backing indices. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-segments | Elasticsearch API documentation} + */ + async segments (this: That, params?: T.CatSegmentsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async segments (this: That, params?: T.CatSegmentsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async segments (this: That, params?: T.CatSegmentsRequest, options?: TransportRequestOptions): Promise + async segments (this: That, params?: T.CatSegmentsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['cat.segments'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = 'GET' + path = `/_cat/segments/${encodeURIComponent(params.index.toString())}` + } else { + method = 'GET' + path = '/_cat/segments' + } + const meta: TransportRequestMetadata = { + name: 'cat.segments', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'h', + 's', + 'local', + 'master_timeout', + 'expand_wildcards', + 'allow_no_indices', + 'ignore_throttled', + 'ignore_unavailable', + 'allow_closed' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get shard information. Get information about the shards in a cluster. For data streams, the API returns information about the backing indices. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-shards | Elasticsearch API documentation} + */ + async shards (this: That, params?: T.CatShardsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async shards (this: That, params?: T.CatShardsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async shards (this: That, params?: T.CatShardsRequest, options?: TransportRequestOptions): Promise + async shards (this: That, params?: T.CatShardsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['cat.shards'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = 'GET' + path = `/_cat/shards/${encodeURIComponent(params.index.toString())}` + } else { + method = 'GET' + path = '/_cat/shards' + } + const meta: TransportRequestMetadata = { + name: 'cat.shards', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'h', + 's', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get snapshot information. Get information about the snapshots stored in one or more repositories. A snapshot is a backup of an index or running Elasticsearch cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-snapshots | Elasticsearch API documentation} + */ + async snapshots (this: That, params?: T.CatSnapshotsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async snapshots (this: That, params?: T.CatSnapshotsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async snapshots (this: That, params?: T.CatSnapshotsRequest, options?: TransportRequestOptions): Promise + async snapshots (this: That, params?: T.CatSnapshotsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['cat.snapshots'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.repository != null) { + method = 'GET' + path = `/_cat/snapshots/${encodeURIComponent(params.repository.toString())}` + } else { + method = 'GET' + path = '/_cat/snapshots' + } + const meta: TransportRequestMetadata = { + name: 'cat.snapshots', + pathParts: { + repository: params.repository + }, + acceptedParams: [ + 'repository', + 'ignore_unavailable', + 'h', + 's', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get task information. Get information about tasks currently running in the cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the task management API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-tasks | Elasticsearch API documentation} + */ + async tasks (this: That, params?: T.CatTasksRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async tasks (this: That, params?: T.CatTasksRequest, options?: TransportRequestOptionsWithMeta): Promise> + async tasks (this: That, params?: T.CatTasksRequest, options?: TransportRequestOptions): Promise + async tasks (this: That, params?: T.CatTasksRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['cat.tasks'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_cat/tasks' + const meta: TransportRequestMetadata = { + name: 'cat.tasks', + acceptedParams: [ + 'actions', + 'detailed', + 'nodes', + 'parent_task_id', + 'h', + 's', + 'timeout', + 'wait_for_completion' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get index template information. Get information about the index templates in a cluster. You can use index templates to apply index settings and field mappings to new indices at creation. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index template API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-templates | Elasticsearch API documentation} + */ + async templates (this: That, params?: T.CatTemplatesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async templates (this: That, params?: T.CatTemplatesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async templates (this: That, params?: T.CatTemplatesRequest, options?: TransportRequestOptions): Promise + async templates (this: That, params?: T.CatTemplatesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['cat.templates'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.name != null) { + method = 'GET' + path = `/_cat/templates/${encodeURIComponent(params.name.toString())}` + } else { + method = 'GET' + path = '/_cat/templates' + } + const meta: TransportRequestMetadata = { + name: 'cat.templates', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'h', + 's', + 'local', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get thread pool statistics. Get thread pool statistics for each node in a cluster. Returned information includes all built-in thread pools and custom thread pools. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-thread-pool | Elasticsearch API documentation} + */ + async threadPool (this: That, params?: T.CatThreadPoolRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async threadPool (this: That, params?: T.CatThreadPoolRequest, options?: TransportRequestOptionsWithMeta): Promise> + async threadPool (this: That, params?: T.CatThreadPoolRequest, options?: TransportRequestOptions): Promise + async threadPool (this: That, params?: T.CatThreadPoolRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['cat.thread_pool'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.thread_pool_patterns != null) { + method = 'GET' + path = `/_cat/thread_pool/${encodeURIComponent(params.thread_pool_patterns.toString())}` + } else { + method = 'GET' + path = '/_cat/thread_pool' + } + const meta: TransportRequestMetadata = { + name: 'cat.thread_pool', + pathParts: { + thread_pool_patterns: params.thread_pool_patterns + }, + acceptedParams: [ + 'thread_pool_patterns', + 'h', + 's', + 'local', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get transform information. Get configuration and usage information about transforms. CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get transform statistics API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-transforms | Elasticsearch API documentation} + */ + async transforms (this: That, params?: T.CatTransformsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async transforms (this: That, params?: T.CatTransformsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async transforms (this: That, params?: T.CatTransformsRequest, options?: TransportRequestOptions): Promise + async transforms (this: That, params?: T.CatTransformsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['cat.transforms'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.transform_id != null) { + method = 'GET' + path = `/_cat/transforms/${encodeURIComponent(params.transform_id.toString())}` + } else { + method = 'GET' + path = '/_cat/transforms' + } + const meta: TransportRequestMetadata = { + name: 'cat.transforms', + pathParts: { + transform_id: params.transform_id + }, + acceptedParams: [ + 'transform_id', + 'allow_no_match', + 'from', + 'h', + 's', + 'size' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/api/ccr.ts b/src/api/api/ccr.ts new file mode 100644 index 000000000..728f0077e --- /dev/null +++ b/src/api/api/ccr.ts @@ -0,0 +1,940 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import { kAcceptedParams } from '../../client' + +interface That { + transport: Transport + [kAcceptedParams]: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +export default class Ccr { + transport: Transport + [kAcceptedParams]: Record + constructor (transport: Transport) { + this.transport = transport + this[kAcceptedParams] = { + 'ccr.delete_auto_follow_pattern': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'ccr.follow': { + path: [ + 'index' + ], + body: [ + 'data_stream_name', + 'leader_index', + 'max_outstanding_read_requests', + 'max_outstanding_write_requests', + 'max_read_request_operation_count', + 'max_read_request_size', + 'max_retry_delay', + 'max_write_buffer_count', + 'max_write_buffer_size', + 'max_write_request_operation_count', + 'max_write_request_size', + 'read_poll_timeout', + 'remote_cluster', + 'settings' + ], + query: [ + 'master_timeout', + 'wait_for_active_shards' + ] + }, + 'ccr.follow_info': { + path: [ + 'index' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'ccr.follow_stats': { + path: [ + 'index' + ], + body: [], + query: [ + 'timeout' + ] + }, + 'ccr.forget_follower': { + path: [ + 'index' + ], + body: [ + 'follower_cluster', + 'follower_index', + 'follower_index_uuid', + 'leader_remote_cluster' + ], + query: [ + 'timeout' + ] + }, + 'ccr.get_auto_follow_pattern': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'ccr.pause_auto_follow_pattern': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'ccr.pause_follow': { + path: [ + 'index' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'ccr.put_auto_follow_pattern': { + path: [ + 'name' + ], + body: [ + 'remote_cluster', + 'follow_index_pattern', + 'leader_index_patterns', + 'leader_index_exclusion_patterns', + 'max_outstanding_read_requests', + 'settings', + 'max_outstanding_write_requests', + 'read_poll_timeout', + 'max_read_request_operation_count', + 'max_read_request_size', + 'max_retry_delay', + 'max_write_buffer_count', + 'max_write_buffer_size', + 'max_write_request_operation_count', + 'max_write_request_size' + ], + query: [ + 'master_timeout' + ] + }, + 'ccr.resume_auto_follow_pattern': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'ccr.resume_follow': { + path: [ + 'index' + ], + body: [ + 'max_outstanding_read_requests', + 'max_outstanding_write_requests', + 'max_read_request_operation_count', + 'max_read_request_size', + 'max_retry_delay', + 'max_write_buffer_count', + 'max_write_buffer_size', + 'max_write_request_operation_count', + 'max_write_request_size', + 'read_poll_timeout' + ], + query: [ + 'master_timeout' + ] + }, + 'ccr.stats': { + path: [], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ccr.unfollow': { + path: [ + 'index' + ], + body: [], + query: [ + 'master_timeout' + ] + } + } + } + + /** + * Delete auto-follow patterns. Delete a collection of cross-cluster replication auto-follow patterns. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-delete-auto-follow-pattern | Elasticsearch API documentation} + */ + async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptions): Promise + async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ccr.delete_auto_follow_pattern'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_ccr/auto_follow/${encodeURIComponent(params.name.toString())}` + const meta: TransportRequestMetadata = { + name: 'ccr.delete_auto_follow_pattern', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create a follower. Create a cross-cluster replication follower index that follows a specific leader index. When the API returns, the follower index exists and cross-cluster replication starts replicating operations from the leader index to the follower index. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow | Elasticsearch API documentation} + */ + async follow (this: That, params: T.CcrFollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async follow (this: That, params: T.CcrFollowRequest, options?: TransportRequestOptionsWithMeta): Promise> + async follow (this: That, params: T.CcrFollowRequest, options?: TransportRequestOptions): Promise + async follow (this: That, params: T.CcrFollowRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ccr.follow'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/${encodeURIComponent(params.index.toString())}/_ccr/follow` + const meta: TransportRequestMetadata = { + name: 'ccr.follow', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'data_stream_name', + 'leader_index', + 'max_outstanding_read_requests', + 'max_outstanding_write_requests', + 'max_read_request_operation_count', + 'max_read_request_size', + 'max_retry_delay', + 'max_write_buffer_count', + 'max_write_buffer_size', + 'max_write_request_operation_count', + 'max_write_request_size', + 'read_poll_timeout', + 'remote_cluster', + 'settings', + 'master_timeout', + 'wait_for_active_shards' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get follower information. Get information about all cross-cluster replication follower indices. For example, the results include follower index names, leader index names, replication options, and whether the follower indices are active or paused. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow-info | Elasticsearch API documentation} + */ + async followInfo (this: That, params: T.CcrFollowInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async followInfo (this: That, params: T.CcrFollowInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> + async followInfo (this: That, params: T.CcrFollowInfoRequest, options?: TransportRequestOptions): Promise + async followInfo (this: That, params: T.CcrFollowInfoRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ccr.follow_info'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/${encodeURIComponent(params.index.toString())}/_ccr/info` + const meta: TransportRequestMetadata = { + name: 'ccr.follow_info', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get follower stats. Get cross-cluster replication follower stats. The API returns shard-level stats about the "following tasks" associated with each shard for the specified indices. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow-stats | Elasticsearch API documentation} + */ + async followStats (this: That, params: T.CcrFollowStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async followStats (this: That, params: T.CcrFollowStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async followStats (this: That, params: T.CcrFollowStatsRequest, options?: TransportRequestOptions): Promise + async followStats (this: That, params: T.CcrFollowStatsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ccr.follow_stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/${encodeURIComponent(params.index.toString())}/_ccr/stats` + const meta: TransportRequestMetadata = { + name: 'ccr.follow_stats', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Forget a follower. Remove the cross-cluster replication follower retention leases from the leader. A following index takes out retention leases on its leader index. These leases are used to increase the likelihood that the shards of the leader index retain the history of operations that the shards of the following index need to run replication. When a follower index is converted to a regular index by the unfollow API (either by directly calling the API or by index lifecycle management tasks), these leases are removed. However, removal of the leases can fail, for example when the remote cluster containing the leader index is unavailable. While the leases will eventually expire on their own, their extended existence can cause the leader index to hold more history than necessary and prevent index lifecycle management from performing some operations on the leader index. This API exists to enable manually removing the leases when the unfollow API is unable to do so. NOTE: This API does not stop replication by a following index. If you use this API with a follower index that is still actively following, the following index will add back retention leases on the leader. The only purpose of this API is to handle the case of failure to remove the following retention leases after the unfollow API is invoked. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-forget-follower | Elasticsearch API documentation} + */ + async forgetFollower (this: That, params: T.CcrForgetFollowerRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async forgetFollower (this: That, params: T.CcrForgetFollowerRequest, options?: TransportRequestOptionsWithMeta): Promise> + async forgetFollower (this: That, params: T.CcrForgetFollowerRequest, options?: TransportRequestOptions): Promise + async forgetFollower (this: That, params: T.CcrForgetFollowerRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ccr.forget_follower'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/${encodeURIComponent(params.index.toString())}/_ccr/forget_follower` + const meta: TransportRequestMetadata = { + name: 'ccr.forget_follower', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'follower_cluster', + 'follower_index', + 'follower_index_uuid', + 'leader_remote_cluster', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get auto-follow patterns. Get cross-cluster replication auto-follow patterns. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-get-auto-follow-pattern-1 | Elasticsearch API documentation} + */ + async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptions): Promise + async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ccr.get_auto_follow_pattern'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.name != null) { + method = 'GET' + path = `/_ccr/auto_follow/${encodeURIComponent(params.name.toString())}` + } else { + method = 'GET' + path = '/_ccr/auto_follow' + } + const meta: TransportRequestMetadata = { + name: 'ccr.get_auto_follow_pattern', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Pause an auto-follow pattern. Pause a cross-cluster replication auto-follow pattern. When the API returns, the auto-follow pattern is inactive. New indices that are created on the remote cluster and match the auto-follow patterns are ignored. You can resume auto-following with the resume auto-follow pattern API. When it resumes, the auto-follow pattern is active again and automatically configures follower indices for newly created indices on the remote cluster that match its patterns. Remote indices that were created while the pattern was paused will also be followed, unless they have been deleted or closed in the interim. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-pause-auto-follow-pattern | Elasticsearch API documentation} + */ + async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> + async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptions): Promise + async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ccr.pause_auto_follow_pattern'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/_ccr/auto_follow/${encodeURIComponent(params.name.toString())}/pause` + const meta: TransportRequestMetadata = { + name: 'ccr.pause_auto_follow_pattern', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Pause a follower. Pause a cross-cluster replication follower index. The follower index will not fetch any additional operations from the leader index. You can resume following with the resume follower API. You can pause and resume a follower index to change the configuration of the following task. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-pause-follow | Elasticsearch API documentation} + */ + async pauseFollow (this: That, params: T.CcrPauseFollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async pauseFollow (this: That, params: T.CcrPauseFollowRequest, options?: TransportRequestOptionsWithMeta): Promise> + async pauseFollow (this: That, params: T.CcrPauseFollowRequest, options?: TransportRequestOptions): Promise + async pauseFollow (this: That, params: T.CcrPauseFollowRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ccr.pause_follow'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/${encodeURIComponent(params.index.toString())}/_ccr/pause_follow` + const meta: TransportRequestMetadata = { + name: 'ccr.pause_follow', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create or update auto-follow patterns. Create a collection of cross-cluster replication auto-follow patterns for a remote cluster. Newly created indices on the remote cluster that match any of the patterns are automatically configured as follower indices. Indices on the remote cluster that were created before the auto-follow pattern was created will not be auto-followed even if they match the pattern. This API can also be used to update auto-follow patterns. NOTE: Follower indices that were configured automatically before updating an auto-follow pattern will remain unchanged even if they do not match against the new patterns. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-put-auto-follow-pattern | Elasticsearch API documentation} + */ + async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptions): Promise + async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ccr.put_auto_follow_pattern'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_ccr/auto_follow/${encodeURIComponent(params.name.toString())}` + const meta: TransportRequestMetadata = { + name: 'ccr.put_auto_follow_pattern', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'remote_cluster', + 'follow_index_pattern', + 'leader_index_patterns', + 'leader_index_exclusion_patterns', + 'max_outstanding_read_requests', + 'settings', + 'max_outstanding_write_requests', + 'read_poll_timeout', + 'max_read_request_operation_count', + 'max_read_request_size', + 'max_retry_delay', + 'max_write_buffer_count', + 'max_write_buffer_size', + 'max_write_request_operation_count', + 'max_write_request_size', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Resume an auto-follow pattern. Resume a cross-cluster replication auto-follow pattern that was paused. The auto-follow pattern will resume configuring following indices for newly created indices that match its patterns on the remote cluster. Remote indices created while the pattern was paused will also be followed unless they have been deleted or closed in the interim. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-resume-auto-follow-pattern | Elasticsearch API documentation} + */ + async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> + async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptions): Promise + async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ccr.resume_auto_follow_pattern'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/_ccr/auto_follow/${encodeURIComponent(params.name.toString())}/resume` + const meta: TransportRequestMetadata = { + name: 'ccr.resume_auto_follow_pattern', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Resume a follower. Resume a cross-cluster replication follower index that was paused. The follower index could have been paused with the pause follower API. Alternatively it could be paused due to replication that cannot be retried due to failures during following tasks. When this API returns, the follower index will resume fetching operations from the leader index. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-resume-follow | Elasticsearch API documentation} + */ + async resumeFollow (this: That, params: T.CcrResumeFollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async resumeFollow (this: That, params: T.CcrResumeFollowRequest, options?: TransportRequestOptionsWithMeta): Promise> + async resumeFollow (this: That, params: T.CcrResumeFollowRequest, options?: TransportRequestOptions): Promise + async resumeFollow (this: That, params: T.CcrResumeFollowRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ccr.resume_follow'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/${encodeURIComponent(params.index.toString())}/_ccr/resume_follow` + const meta: TransportRequestMetadata = { + name: 'ccr.resume_follow', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'max_outstanding_read_requests', + 'max_outstanding_write_requests', + 'max_read_request_operation_count', + 'max_read_request_size', + 'max_retry_delay', + 'max_write_buffer_count', + 'max_write_buffer_size', + 'max_write_request_operation_count', + 'max_write_request_size', + 'read_poll_timeout', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get cross-cluster replication stats. This API returns stats about auto-following and the same shard-level stats as the get follower stats API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-stats | Elasticsearch API documentation} + */ + async stats (this: That, params?: T.CcrStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stats (this: That, params?: T.CcrStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stats (this: That, params?: T.CcrStatsRequest, options?: TransportRequestOptions): Promise + async stats (this: That, params?: T.CcrStatsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ccr.stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_ccr/stats' + const meta: TransportRequestMetadata = { + name: 'ccr.stats', + acceptedParams: [ + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Unfollow an index. Convert a cross-cluster replication follower index to a regular index. The API stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. The follower index must be paused and closed before you call the unfollow API. > info > Currently cross-cluster replication does not support converting an existing regular index to a follower index. Converting a follower index to a regular index is an irreversible operation. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-unfollow | Elasticsearch API documentation} + */ + async unfollow (this: That, params: T.CcrUnfollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async unfollow (this: That, params: T.CcrUnfollowRequest, options?: TransportRequestOptionsWithMeta): Promise> + async unfollow (this: That, params: T.CcrUnfollowRequest, options?: TransportRequestOptions): Promise + async unfollow (this: That, params: T.CcrUnfollowRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ccr.unfollow'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/${encodeURIComponent(params.index.toString())}/_ccr/unfollow` + const meta: TransportRequestMetadata = { + name: 'ccr.unfollow', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/api/clear_scroll.ts b/src/api/api/clear_scroll.ts new file mode 100644 index 000000000..509053aa9 --- /dev/null +++ b/src/api/api/clear_scroll.ts @@ -0,0 +1,101 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + clear_scroll: { + path: [], + body: [ + 'scroll_id' + ], + query: [] + } +} + +/** + * Clear a scrolling search. Clear the search context and results for a scrolling search. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-clear-scroll | Elasticsearch API documentation} + */ +export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest, options?: TransportRequestOptions): Promise +export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.clear_scroll + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'DELETE' + const path = '/_search/scroll' + const meta: TransportRequestMetadata = { + name: 'clear_scroll', + pathParts: { + scroll_id: params.scroll_id + }, + acceptedParams: [ + 'scroll_id', + 'scroll_id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) +} diff --git a/src/api/api/close_point_in_time.ts b/src/api/api/close_point_in_time.ts new file mode 100644 index 000000000..9af0ffe94 --- /dev/null +++ b/src/api/api/close_point_in_time.ts @@ -0,0 +1,96 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + close_point_in_time: { + path: [], + body: [ + 'id' + ], + query: [] + } +} + +/** + * Close a point in time. A point in time must be opened explicitly before being used in search requests. The `keep_alive` parameter tells Elasticsearch how long it should persist. A point in time is automatically closed when the `keep_alive` period has elapsed. However, keeping points in time has a cost; close them as soon as they are no longer required for search requests. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-open-point-in-time | Elasticsearch API documentation} + */ +export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest, options?: TransportRequestOptions): Promise +export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.close_point_in_time + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'DELETE' + const path = '/_pit' + const meta: TransportRequestMetadata = { + name: 'close_point_in_time', + acceptedParams: [ + 'id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) +} diff --git a/src/api/api/cluster.ts b/src/api/api/cluster.ts new file mode 100644 index 000000000..27e032432 --- /dev/null +++ b/src/api/api/cluster.ts @@ -0,0 +1,1129 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import { kAcceptedParams } from '../../client' + +interface That { + transport: Transport + [kAcceptedParams]: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +export default class Cluster { + transport: Transport + [kAcceptedParams]: Record + constructor (transport: Transport) { + this.transport = transport + this[kAcceptedParams] = { + 'cluster.allocation_explain': { + path: [], + body: [ + 'index', + 'shard', + 'primary', + 'current_node' + ], + query: [ + 'index', + 'shard', + 'primary', + 'current_node', + 'include_disk_info', + 'include_yes_decisions', + 'master_timeout' + ] + }, + 'cluster.delete_component_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'cluster.delete_voting_config_exclusions': { + path: [], + body: [], + query: [ + 'master_timeout', + 'wait_for_removal' + ] + }, + 'cluster.exists_component_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'local' + ] + }, + 'cluster.get_component_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'flat_settings', + 'settings_filter', + 'include_defaults', + 'local', + 'master_timeout' + ] + }, + 'cluster.get_settings': { + path: [], + body: [], + query: [ + 'flat_settings', + 'include_defaults', + 'master_timeout', + 'timeout' + ] + }, + 'cluster.health': { + path: [ + 'index' + ], + body: [], + query: [ + 'expand_wildcards', + 'level', + 'local', + 'master_timeout', + 'timeout', + 'wait_for_active_shards', + 'wait_for_events', + 'wait_for_nodes', + 'wait_for_no_initializing_shards', + 'wait_for_no_relocating_shards', + 'wait_for_status' + ] + }, + 'cluster.info': { + path: [ + 'target' + ], + body: [], + query: [] + }, + 'cluster.pending_tasks': { + path: [], + body: [], + query: [ + 'local', + 'master_timeout' + ] + }, + 'cluster.post_voting_config_exclusions': { + path: [], + body: [], + query: [ + 'node_names', + 'node_ids', + 'master_timeout', + 'timeout' + ] + }, + 'cluster.put_component_template': { + path: [ + 'name' + ], + body: [ + 'template', + 'version', + '_meta', + 'deprecated' + ], + query: [ + 'create', + 'cause', + 'master_timeout' + ] + }, + 'cluster.put_settings': { + path: [], + body: [ + 'persistent', + 'transient' + ], + query: [ + 'flat_settings', + 'master_timeout', + 'timeout' + ] + }, + 'cluster.remote_info': { + path: [], + body: [], + query: [] + }, + 'cluster.reroute': { + path: [], + body: [ + 'commands' + ], + query: [ + 'dry_run', + 'explain', + 'metric', + 'retry_failed', + 'master_timeout', + 'timeout' + ] + }, + 'cluster.state': { + path: [ + 'metric', + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'flat_settings', + 'ignore_unavailable', + 'local', + 'master_timeout', + 'wait_for_metadata_version', + 'wait_for_timeout' + ] + }, + 'cluster.stats': { + path: [ + 'node_id' + ], + body: [], + query: [ + 'include_remotes', + 'timeout' + ] + } + } + } + + /** + * Explain the shard allocations. Get explanations for shard allocations in the cluster. This API accepts the current_node, index, primary and shard parameters in the request body or in query parameters, but not in both at the same time. For unassigned shards, it provides an explanation for why the shard is unassigned. For assigned shards, it provides an explanation for why the shard is remaining on its current node and has not moved or rebalanced to another node. This API can be very useful when attempting to diagnose why a shard is unassigned or why a shard continues to remain on its current node when you might expect otherwise. Refer to the linked documentation for examples of how to troubleshoot allocation issues using this API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-allocation-explain | Elasticsearch API documentation} + */ + async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest, options?: TransportRequestOptionsWithMeta): Promise> + async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest, options?: TransportRequestOptions): Promise + async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['cluster.allocation_explain'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = body != null ? 'POST' : 'GET' + const path = '/_cluster/allocation/explain' + const meta: TransportRequestMetadata = { + name: 'cluster.allocation_explain', + acceptedParams: [ + 'index', + 'shard', + 'primary', + 'current_node', + 'index', + 'shard', + 'primary', + 'current_node', + 'include_disk_info', + 'include_yes_decisions', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Delete component templates. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template | Elasticsearch API documentation} + */ + async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptions): Promise + async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['cluster.delete_component_template'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_component_template/${encodeURIComponent(params.name.toString())}` + const meta: TransportRequestMetadata = { + name: 'cluster.delete_component_template', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Clear cluster voting config exclusions. Remove master-eligible nodes from the voting configuration exclusion list. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-post-voting-config-exclusions | Elasticsearch API documentation} + */ + async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise + async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['cluster.delete_voting_config_exclusions'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = '/_cluster/voting_config_exclusions' + const meta: TransportRequestMetadata = { + name: 'cluster.delete_voting_config_exclusions', + acceptedParams: [ + 'master_timeout', + 'wait_for_removal' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Check component templates. Returns information about whether a particular component template exists. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template | Elasticsearch API documentation} + */ + async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptions): Promise + async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['cluster.exists_component_template'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'HEAD' + const path = `/_component_template/${encodeURIComponent(params.name.toString())}` + const meta: TransportRequestMetadata = { + name: 'cluster.exists_component_template', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'master_timeout', + 'local' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get component templates. Get information about component templates. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template | Elasticsearch API documentation} + */ + async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest, options?: TransportRequestOptions): Promise + async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['cluster.get_component_template'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.name != null) { + method = 'GET' + path = `/_component_template/${encodeURIComponent(params.name.toString())}` + } else { + method = 'GET' + path = '/_component_template' + } + const meta: TransportRequestMetadata = { + name: 'cluster.get_component_template', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'flat_settings', + 'settings_filter', + 'include_defaults', + 'local', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get cluster-wide settings. By default, it returns only settings that have been explicitly defined. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-get-settings | Elasticsearch API documentation} + */ + async getSettings (this: That, params?: T.ClusterGetSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getSettings (this: That, params?: T.ClusterGetSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getSettings (this: That, params?: T.ClusterGetSettingsRequest, options?: TransportRequestOptions): Promise + async getSettings (this: That, params?: T.ClusterGetSettingsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['cluster.get_settings'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_cluster/settings' + const meta: TransportRequestMetadata = { + name: 'cluster.get_settings', + acceptedParams: [ + 'flat_settings', + 'include_defaults', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get the cluster health status. You can also use the API to get the health status of only specified data streams and indices. For data streams, the API retrieves the health status of the stream’s backing indices. The cluster health status is: green, yellow or red. On the shard level, a red status indicates that the specific shard is not allocated in the cluster. Yellow means that the primary shard is allocated but replicas are not. Green means that all shards are allocated. The index level status is controlled by the worst shard status. One of the main benefits of the API is the ability to wait until the cluster reaches a certain high watermark health level. The cluster status is controlled by the worst index status. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-health | Elasticsearch API documentation} + */ + async health (this: That, params?: T.ClusterHealthRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async health (this: That, params?: T.ClusterHealthRequest, options?: TransportRequestOptionsWithMeta): Promise> + async health (this: That, params?: T.ClusterHealthRequest, options?: TransportRequestOptions): Promise + async health (this: That, params?: T.ClusterHealthRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['cluster.health'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = 'GET' + path = `/_cluster/health/${encodeURIComponent(params.index.toString())}` + } else { + method = 'GET' + path = '/_cluster/health' + } + const meta: TransportRequestMetadata = { + name: 'cluster.health', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'expand_wildcards', + 'level', + 'local', + 'master_timeout', + 'timeout', + 'wait_for_active_shards', + 'wait_for_events', + 'wait_for_nodes', + 'wait_for_no_initializing_shards', + 'wait_for_no_relocating_shards', + 'wait_for_status' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get cluster info. Returns basic information about the cluster. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-info | Elasticsearch API documentation} + */ + async info (this: That, params: T.ClusterInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async info (this: That, params: T.ClusterInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> + async info (this: That, params: T.ClusterInfoRequest, options?: TransportRequestOptions): Promise + async info (this: That, params: T.ClusterInfoRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['cluster.info'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_info/${encodeURIComponent(params.target.toString())}` + const meta: TransportRequestMetadata = { + name: 'cluster.info', + pathParts: { + target: params.target + }, + acceptedParams: [ + 'target' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get the pending cluster tasks. Get information about cluster-level changes (such as create index, update mapping, allocate or fail shard) that have not yet taken effect. NOTE: This API returns a list of any pending updates to the cluster state. These are distinct from the tasks reported by the task management API which include periodic tasks and tasks initiated by the user, such as node stats, search queries, or create index requests. However, if a user-initiated task such as a create index command causes a cluster state update, the activity of this task might be reported by both task api and pending cluster tasks API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-pending-tasks | Elasticsearch API documentation} + */ + async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest, options?: TransportRequestOptionsWithMeta): Promise> + async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest, options?: TransportRequestOptions): Promise + async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['cluster.pending_tasks'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_cluster/pending_tasks' + const meta: TransportRequestMetadata = { + name: 'cluster.pending_tasks', + acceptedParams: [ + 'local', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Update voting configuration exclusions. Update the cluster voting config exclusions by node IDs or node names. By default, if there are more than three master-eligible nodes in the cluster and you remove fewer than half of the master-eligible nodes in the cluster at once, the voting configuration automatically shrinks. If you want to shrink the voting configuration to contain fewer than three nodes or to remove half or more of the master-eligible nodes in the cluster at once, use this API to remove departing nodes from the voting configuration manually. The API adds an entry for each specified node to the cluster’s voting configuration exclusions list. It then waits until the cluster has reconfigured its voting configuration to exclude the specified nodes. Clusters should have no voting configuration exclusions in normal operation. Once the excluded nodes have stopped, clear the voting configuration exclusions with `DELETE /_cluster/voting_config_exclusions`. This API waits for the nodes to be fully removed from the cluster before it returns. If your cluster has voting configuration exclusions for nodes that you no longer intend to remove, use `DELETE /_cluster/voting_config_exclusions?wait_for_removal=false` to clear the voting configuration exclusions without waiting for the nodes to leave the cluster. A response to `POST /_cluster/voting_config_exclusions` with an HTTP status code of 200 OK guarantees that the node has been removed from the voting configuration and will not be reinstated until the voting configuration exclusions are cleared by calling `DELETE /_cluster/voting_config_exclusions`. If the call to `POST /_cluster/voting_config_exclusions` fails or returns a response with an HTTP status code other than 200 OK then the node may not have been removed from the voting configuration. In that case, you may safely retry the call. NOTE: Voting exclusions are required only when you remove at least half of the master-eligible nodes from a cluster in a short time period. They are not required when removing master-ineligible nodes or when removing fewer than half of the master-eligible nodes. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-post-voting-config-exclusions | Elasticsearch API documentation} + */ + async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise + async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['cluster.post_voting_config_exclusions'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_cluster/voting_config_exclusions' + const meta: TransportRequestMetadata = { + name: 'cluster.post_voting_config_exclusions', + acceptedParams: [ + 'node_names', + 'node_ids', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create or update a component template. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. An index template can be composed of multiple component templates. To use a component template, specify it in an index template’s `composed_of` list. Component templates are only applied to new data streams and indices as part of a matching index template. Settings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template. Component templates are only used during index creation. For data streams, this includes data stream creation and the creation of a stream’s backing indices. Changes to component templates do not affect existing indices, including a stream’s backing indices. You can use C-style `/* *\/` block comments in component templates. You can include comments anywhere in the request body except before the opening curly bracket. **Applying component templates** You cannot directly apply a component template to a data stream or index. To be applied, a component template must be included in an index template's `composed_of` list. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template | Elasticsearch API documentation} + */ + async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest, options?: TransportRequestOptions): Promise + async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['cluster.put_component_template'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_component_template/${encodeURIComponent(params.name.toString())}` + const meta: TransportRequestMetadata = { + name: 'cluster.put_component_template', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'template', + 'version', + '_meta', + 'deprecated', + 'create', + 'cause', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Update the cluster settings. Configure and update dynamic settings on a running cluster. You can also configure dynamic settings locally on an unstarted or shut down node in `elasticsearch.yml`. Updates made with this API can be persistent, which apply across cluster restarts, or transient, which reset after a cluster restart. You can also reset transient or persistent settings by assigning them a null value. If you configure the same setting using multiple methods, Elasticsearch applies the settings in following order of precedence: 1) Transient setting; 2) Persistent setting; 3) `elasticsearch.yml` setting; 4) Default setting value. For example, you can apply a transient setting to override a persistent setting or `elasticsearch.yml` setting. However, a change to an `elasticsearch.yml` setting will not override a defined transient or persistent setting. TIP: In Elastic Cloud, use the user settings feature to configure all cluster settings. This method automatically rejects unsafe settings that could break your cluster. If you run Elasticsearch on your own hardware, use this API to configure dynamic cluster settings. Only use `elasticsearch.yml` for static cluster settings and node settings. The API doesn’t require a restart and ensures a setting’s value is the same on all nodes. WARNING: Transient cluster settings are no longer recommended. Use persistent cluster settings instead. If a cluster becomes unstable, transient settings can clear unexpectedly, resulting in a potentially undesired cluster configuration. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-settings | Elasticsearch API documentation} + */ + async putSettings (this: That, params?: T.ClusterPutSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putSettings (this: That, params?: T.ClusterPutSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putSettings (this: That, params?: T.ClusterPutSettingsRequest, options?: TransportRequestOptions): Promise + async putSettings (this: That, params?: T.ClusterPutSettingsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['cluster.put_settings'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = '/_cluster/settings' + const meta: TransportRequestMetadata = { + name: 'cluster.put_settings', + acceptedParams: [ + 'persistent', + 'transient', + 'flat_settings', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get remote cluster information. Get information about configured remote clusters. The API returns connection and endpoint information keyed by the configured remote cluster alias. > info > This API returns information that reflects current state on the local cluster. > The `connected` field does not necessarily reflect whether a remote cluster is down or unavailable, only whether there is currently an open connection to it. > Elasticsearch does not spontaneously try to reconnect to a disconnected remote cluster. > To trigger a reconnection, attempt a cross-cluster search, ES|QL cross-cluster search, or try the [resolve cluster endpoint](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster). + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-remote-info | Elasticsearch API documentation} + */ + async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> + async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest, options?: TransportRequestOptions): Promise + async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['cluster.remote_info'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_remote/info' + const meta: TransportRequestMetadata = { + name: 'cluster.remote_info', + acceptedParams: [ + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Reroute the cluster. Manually change the allocation of individual shards in the cluster. For example, a shard can be moved from one node to another explicitly, an allocation can be canceled, and an unassigned shard can be explicitly allocated to a specific node. It is important to note that after processing any reroute commands Elasticsearch will perform rebalancing as normal (respecting the values of settings such as `cluster.routing.rebalance.enable`) in order to remain in a balanced state. For example, if the requested allocation includes moving a shard from node1 to node2 then this may cause a shard to be moved from node2 back to node1 to even things out. The cluster can be set to disable allocations using the `cluster.routing.allocation.enable` setting. If allocations are disabled then the only allocations that will be performed are explicit ones given using the reroute command, and consequent allocations due to rebalancing. The cluster will attempt to allocate a shard a maximum of `index.allocation.max_retries` times in a row (defaults to `5`), before giving up and leaving the shard unallocated. This scenario can be caused by structural problems such as having an analyzer which refers to a stopwords file which doesn’t exist on all nodes. Once the problem has been corrected, allocation can be manually retried by calling the reroute API with the `?retry_failed` URI query parameter, which will attempt a single retry round for these shards. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-reroute | Elasticsearch API documentation} + */ + async reroute (this: That, params?: T.ClusterRerouteRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async reroute (this: That, params?: T.ClusterRerouteRequest, options?: TransportRequestOptionsWithMeta): Promise> + async reroute (this: That, params?: T.ClusterRerouteRequest, options?: TransportRequestOptions): Promise + async reroute (this: That, params?: T.ClusterRerouteRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['cluster.reroute'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = '/_cluster/reroute' + const meta: TransportRequestMetadata = { + name: 'cluster.reroute', + acceptedParams: [ + 'commands', + 'dry_run', + 'explain', + 'metric', + 'retry_failed', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get the cluster state. Get comprehensive information about the state of the cluster. The cluster state is an internal data structure which keeps track of a variety of information needed by every node, including the identity and attributes of the other nodes in the cluster; cluster-wide settings; index metadata, including the mapping and settings for each index; the location and status of every shard copy in the cluster. The elected master node ensures that every node in the cluster has a copy of the same cluster state. This API lets you retrieve a representation of this internal state for debugging or diagnostic purposes. You may need to consult the Elasticsearch source code to determine the precise meaning of the response. By default the API will route requests to the elected master node since this node is the authoritative source of cluster states. You can also retrieve the cluster state held on the node handling the API request by adding the `?local=true` query parameter. Elasticsearch may need to expend significant effort to compute a response to this API in larger clusters, and the response may comprise a very large quantity of data. If you use this API repeatedly, your cluster may become unstable. WARNING: The response is a representation of an internal data structure. Its format is not subject to the same compatibility guarantees as other more stable APIs and may change from version to version. Do not query this API using external monitoring tools. Instead, obtain the information you require using other more stable cluster APIs. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-state | Elasticsearch API documentation} + */ + async state (this: That, params?: T.ClusterStateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async state (this: That, params?: T.ClusterStateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async state (this: That, params?: T.ClusterStateRequest, options?: TransportRequestOptions): Promise + async state (this: That, params?: T.ClusterStateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['cluster.state'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.metric != null && params.index != null) { + method = 'GET' + path = `/_cluster/state/${encodeURIComponent(params.metric.toString())}/${encodeURIComponent(params.index.toString())}` + } else if (params.metric != null) { + method = 'GET' + path = `/_cluster/state/${encodeURIComponent(params.metric.toString())}` + } else { + method = 'GET' + path = '/_cluster/state' + } + const meta: TransportRequestMetadata = { + name: 'cluster.state', + pathParts: { + metric: params.metric, + index: params.index + }, + acceptedParams: [ + 'metric', + 'index', + 'allow_no_indices', + 'expand_wildcards', + 'flat_settings', + 'ignore_unavailable', + 'local', + 'master_timeout', + 'wait_for_metadata_version', + 'wait_for_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get cluster statistics. Get basic index metrics (shard numbers, store size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins). + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-stats | Elasticsearch API documentation} + */ + async stats (this: That, params?: T.ClusterStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stats (this: That, params?: T.ClusterStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stats (this: That, params?: T.ClusterStatsRequest, options?: TransportRequestOptions): Promise + async stats (this: That, params?: T.ClusterStatsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['cluster.stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.node_id != null) { + method = 'GET' + path = `/_cluster/stats/nodes/${encodeURIComponent(params.node_id.toString())}` + } else { + method = 'GET' + path = '/_cluster/stats' + } + const meta: TransportRequestMetadata = { + name: 'cluster.stats', + pathParts: { + node_id: params.node_id + }, + acceptedParams: [ + 'node_id', + 'include_remotes', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/api/connector.ts b/src/api/api/connector.ts new file mode 100644 index 000000000..eb3745556 --- /dev/null +++ b/src/api/api/connector.ts @@ -0,0 +1,2290 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import { kAcceptedParams } from '../../client' + +interface That { + transport: Transport + [kAcceptedParams]: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +export default class Connector { + transport: Transport + [kAcceptedParams]: Record + constructor (transport: Transport) { + this.transport = transport + this[kAcceptedParams] = { + 'connector.check_in': { + path: [ + 'connector_id' + ], + body: [], + query: [] + }, + 'connector.delete': { + path: [ + 'connector_id' + ], + body: [], + query: [ + 'delete_sync_jobs', + 'hard' + ] + }, + 'connector.get': { + path: [ + 'connector_id' + ], + body: [], + query: [ + 'include_deleted' + ] + }, + 'connector.last_sync': { + path: [ + 'connector_id' + ], + body: [ + 'last_access_control_sync_error', + 'last_access_control_sync_scheduled_at', + 'last_access_control_sync_status', + 'last_deleted_document_count', + 'last_incremental_sync_scheduled_at', + 'last_indexed_document_count', + 'last_seen', + 'last_sync_error', + 'last_sync_scheduled_at', + 'last_sync_status', + 'last_synced', + 'sync_cursor' + ], + query: [] + }, + 'connector.list': { + path: [], + body: [], + query: [ + 'from', + 'size', + 'index_name', + 'connector_name', + 'service_type', + 'include_deleted', + 'query' + ] + }, + 'connector.post': { + path: [], + body: [ + 'description', + 'index_name', + 'is_native', + 'language', + 'name', + 'service_type' + ], + query: [] + }, + 'connector.put': { + path: [ + 'connector_id' + ], + body: [ + 'description', + 'index_name', + 'is_native', + 'language', + 'name', + 'service_type' + ], + query: [] + }, + 'connector.secret_delete': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'connector.secret_get': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'connector.secret_post': { + path: [], + body: [], + query: [] + }, + 'connector.secret_put': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'connector.sync_job_cancel': { + path: [ + 'connector_sync_job_id' + ], + body: [], + query: [] + }, + 'connector.sync_job_check_in': { + path: [ + 'connector_sync_job_id' + ], + body: [], + query: [] + }, + 'connector.sync_job_claim': { + path: [ + 'connector_sync_job_id' + ], + body: [ + 'sync_cursor', + 'worker_hostname' + ], + query: [] + }, + 'connector.sync_job_delete': { + path: [ + 'connector_sync_job_id' + ], + body: [], + query: [] + }, + 'connector.sync_job_error': { + path: [ + 'connector_sync_job_id' + ], + body: [ + 'error' + ], + query: [] + }, + 'connector.sync_job_get': { + path: [ + 'connector_sync_job_id' + ], + body: [], + query: [] + }, + 'connector.sync_job_list': { + path: [], + body: [], + query: [ + 'from', + 'size', + 'status', + 'connector_id', + 'job_type' + ] + }, + 'connector.sync_job_post': { + path: [], + body: [ + 'id', + 'job_type', + 'trigger_method' + ], + query: [] + }, + 'connector.sync_job_update_stats': { + path: [ + 'connector_sync_job_id' + ], + body: [ + 'deleted_document_count', + 'indexed_document_count', + 'indexed_document_volume', + 'last_seen', + 'metadata', + 'total_document_count' + ], + query: [] + }, + 'connector.update_active_filtering': { + path: [ + 'connector_id' + ], + body: [], + query: [] + }, + 'connector.update_api_key_id': { + path: [ + 'connector_id' + ], + body: [ + 'api_key_id', + 'api_key_secret_id' + ], + query: [] + }, + 'connector.update_configuration': { + path: [ + 'connector_id' + ], + body: [ + 'configuration', + 'values' + ], + query: [] + }, + 'connector.update_error': { + path: [ + 'connector_id' + ], + body: [ + 'error' + ], + query: [] + }, + 'connector.update_features': { + path: [ + 'connector_id' + ], + body: [ + 'features' + ], + query: [] + }, + 'connector.update_filtering': { + path: [ + 'connector_id' + ], + body: [ + 'filtering', + 'rules', + 'advanced_snippet' + ], + query: [] + }, + 'connector.update_filtering_validation': { + path: [ + 'connector_id' + ], + body: [ + 'validation' + ], + query: [] + }, + 'connector.update_index_name': { + path: [ + 'connector_id' + ], + body: [ + 'index_name' + ], + query: [] + }, + 'connector.update_name': { + path: [ + 'connector_id' + ], + body: [ + 'name', + 'description' + ], + query: [] + }, + 'connector.update_native': { + path: [ + 'connector_id' + ], + body: [ + 'is_native' + ], + query: [] + }, + 'connector.update_pipeline': { + path: [ + 'connector_id' + ], + body: [ + 'pipeline' + ], + query: [] + }, + 'connector.update_scheduling': { + path: [ + 'connector_id' + ], + body: [ + 'scheduling' + ], + query: [] + }, + 'connector.update_service_type': { + path: [ + 'connector_id' + ], + body: [ + 'service_type' + ], + query: [] + }, + 'connector.update_status': { + path: [ + 'connector_id' + ], + body: [ + 'status' + ], + query: [] + } + } + } + + /** + * Check in a connector. Update the `last_seen` field in the connector and set it to the current timestamp. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-check-in | Elasticsearch API documentation} + */ + async checkIn (this: That, params: T.ConnectorCheckInRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async checkIn (this: That, params: T.ConnectorCheckInRequest, options?: TransportRequestOptionsWithMeta): Promise> + async checkIn (this: That, params: T.ConnectorCheckInRequest, options?: TransportRequestOptions): Promise + async checkIn (this: That, params: T.ConnectorCheckInRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['connector.check_in'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}/_check_in` + const meta: TransportRequestMetadata = { + name: 'connector.check_in', + pathParts: { + connector_id: params.connector_id + }, + acceptedParams: [ + 'connector_id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Delete a connector. Removes a connector and associated sync jobs. This is a destructive action that is not recoverable. NOTE: This action doesn’t delete any API keys, ingest pipelines, or data indices associated with the connector. These need to be removed manually. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-delete | Elasticsearch API documentation} + */ + async delete (this: That, params: T.ConnectorDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async delete (this: That, params: T.ConnectorDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> + async delete (this: That, params: T.ConnectorDeleteRequest, options?: TransportRequestOptions): Promise + async delete (this: That, params: T.ConnectorDeleteRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['connector.delete'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'connector.delete', + pathParts: { + connector_id: params.connector_id + }, + acceptedParams: [ + 'connector_id', + 'delete_sync_jobs', + 'hard' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get a connector. Get the details about a connector. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-get | Elasticsearch API documentation} + */ + async get (this: That, params: T.ConnectorGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async get (this: That, params: T.ConnectorGetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async get (this: That, params: T.ConnectorGetRequest, options?: TransportRequestOptions): Promise + async get (this: That, params: T.ConnectorGetRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['connector.get'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'connector.get', + pathParts: { + connector_id: params.connector_id + }, + acceptedParams: [ + 'connector_id', + 'include_deleted' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Update the connector last sync stats. Update the fields related to the last sync of a connector. This action is used for analytics and monitoring. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-last-sync | Elasticsearch API documentation} + */ + async lastSync (this: That, params: T.ConnectorLastSyncRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async lastSync (this: That, params: T.ConnectorLastSyncRequest, options?: TransportRequestOptionsWithMeta): Promise> + async lastSync (this: That, params: T.ConnectorLastSyncRequest, options?: TransportRequestOptions): Promise + async lastSync (this: That, params: T.ConnectorLastSyncRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['connector.last_sync'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}/_last_sync` + const meta: TransportRequestMetadata = { + name: 'connector.last_sync', + pathParts: { + connector_id: params.connector_id + }, + acceptedParams: [ + 'connector_id', + 'last_access_control_sync_error', + 'last_access_control_sync_scheduled_at', + 'last_access_control_sync_status', + 'last_deleted_document_count', + 'last_incremental_sync_scheduled_at', + 'last_indexed_document_count', + 'last_seen', + 'last_sync_error', + 'last_sync_scheduled_at', + 'last_sync_status', + 'last_synced', + 'sync_cursor' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get all connectors. Get information about all connectors. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-list | Elasticsearch API documentation} + */ + async list (this: That, params?: T.ConnectorListRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async list (this: That, params?: T.ConnectorListRequest, options?: TransportRequestOptionsWithMeta): Promise> + async list (this: That, params?: T.ConnectorListRequest, options?: TransportRequestOptions): Promise + async list (this: That, params?: T.ConnectorListRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['connector.list'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_connector' + const meta: TransportRequestMetadata = { + name: 'connector.list', + acceptedParams: [ + 'from', + 'size', + 'index_name', + 'connector_name', + 'service_type', + 'include_deleted', + 'query' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create a connector. Connectors are Elasticsearch integrations that bring content from third-party data sources, which can be deployed on Elastic Cloud or hosted on your own infrastructure. Elastic managed connectors (Native connectors) are a managed service on Elastic Cloud. Self-managed connectors (Connector clients) are self-managed on your infrastructure. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-put | Elasticsearch API documentation} + */ + async post (this: That, params?: T.ConnectorPostRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async post (this: That, params?: T.ConnectorPostRequest, options?: TransportRequestOptionsWithMeta): Promise> + async post (this: That, params?: T.ConnectorPostRequest, options?: TransportRequestOptions): Promise + async post (this: That, params?: T.ConnectorPostRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['connector.post'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = '/_connector' + const meta: TransportRequestMetadata = { + name: 'connector.post', + acceptedParams: [ + 'description', + 'index_name', + 'is_native', + 'language', + 'name', + 'service_type' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create or update a connector. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-put | Elasticsearch API documentation} + */ + async put (this: That, params?: T.ConnectorPutRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async put (this: That, params?: T.ConnectorPutRequest, options?: TransportRequestOptionsWithMeta): Promise> + async put (this: That, params?: T.ConnectorPutRequest, options?: TransportRequestOptions): Promise + async put (this: That, params?: T.ConnectorPutRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['connector.put'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + let method = '' + let path = '' + if (params.connector_id != null) { + method = 'PUT' + path = `/_connector/${encodeURIComponent(params.connector_id.toString())}` + } else { + method = 'PUT' + path = '/_connector' + } + const meta: TransportRequestMetadata = { + name: 'connector.put', + pathParts: { + connector_id: params.connector_id + }, + acceptedParams: [ + 'connector_id', + 'description', + 'index_name', + 'is_native', + 'language', + 'name', + 'service_type' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Deletes a connector secret + */ + async secretDelete (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async secretDelete (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async secretDelete (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async secretDelete (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['connector.secret_delete'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_connector/_secret/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'connector.secret_delete', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Retrieves a secret stored by Connectors + */ + async secretGet (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async secretGet (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async secretGet (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async secretGet (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['connector.secret_get'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_connector/_secret/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'connector.secret_get', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Creates a secret for a Connector + */ + async secretPost (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async secretPost (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async secretPost (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async secretPost (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['connector.secret_post'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_connector/_secret' + const meta: TransportRequestMetadata = { + name: 'connector.secret_post', + acceptedParams: [ + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Creates or updates a secret for a Connector + */ + async secretPut (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async secretPut (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async secretPut (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async secretPut (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['connector.secret_put'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_connector/_secret/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'connector.secret_put', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Cancel a connector sync job. Cancel a connector sync job, which sets the status to cancelling and updates `cancellation_requested_at` to the current time. The connector service is then responsible for setting the status of connector sync jobs to cancelled. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-cancel | Elasticsearch API documentation} + */ + async syncJobCancel (this: That, params: T.ConnectorSyncJobCancelRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobCancel (this: That, params: T.ConnectorSyncJobCancelRequest, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobCancel (this: That, params: T.ConnectorSyncJobCancelRequest, options?: TransportRequestOptions): Promise + async syncJobCancel (this: That, params: T.ConnectorSyncJobCancelRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['connector.sync_job_cancel'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_connector/_sync_job/${encodeURIComponent(params.connector_sync_job_id.toString())}/_cancel` + const meta: TransportRequestMetadata = { + name: 'connector.sync_job_cancel', + pathParts: { + connector_sync_job_id: params.connector_sync_job_id + }, + acceptedParams: [ + 'connector_sync_job_id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Check in a connector sync job. Check in a connector sync job and set the `last_seen` field to the current time before updating it in the internal index. To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-check-in | Elasticsearch API documentation} + */ + async syncJobCheckIn (this: That, params: T.ConnectorSyncJobCheckInRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobCheckIn (this: That, params: T.ConnectorSyncJobCheckInRequest, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobCheckIn (this: That, params: T.ConnectorSyncJobCheckInRequest, options?: TransportRequestOptions): Promise + async syncJobCheckIn (this: That, params: T.ConnectorSyncJobCheckInRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['connector.sync_job_check_in'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_connector/_sync_job/${encodeURIComponent(params.connector_sync_job_id.toString())}/_check_in` + const meta: TransportRequestMetadata = { + name: 'connector.sync_job_check_in', + pathParts: { + connector_sync_job_id: params.connector_sync_job_id + }, + acceptedParams: [ + 'connector_sync_job_id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Claim a connector sync job. This action updates the job status to `in_progress` and sets the `last_seen` and `started_at` timestamps to the current time. Additionally, it can set the `sync_cursor` property for the sync job. This API is not intended for direct connector management by users. It supports the implementation of services that utilize the connector protocol to communicate with Elasticsearch. To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-claim | Elasticsearch API documentation} + */ + async syncJobClaim (this: That, params: T.ConnectorSyncJobClaimRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobClaim (this: That, params: T.ConnectorSyncJobClaimRequest, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobClaim (this: That, params: T.ConnectorSyncJobClaimRequest, options?: TransportRequestOptions): Promise + async syncJobClaim (this: That, params: T.ConnectorSyncJobClaimRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['connector.sync_job_claim'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_connector/_sync_job/${encodeURIComponent(params.connector_sync_job_id.toString())}/_claim` + const meta: TransportRequestMetadata = { + name: 'connector.sync_job_claim', + pathParts: { + connector_sync_job_id: params.connector_sync_job_id + }, + acceptedParams: [ + 'connector_sync_job_id', + 'sync_cursor', + 'worker_hostname' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Delete a connector sync job. Remove a connector sync job and its associated data. This is a destructive action that is not recoverable. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-delete | Elasticsearch API documentation} + */ + async syncJobDelete (this: That, params: T.ConnectorSyncJobDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobDelete (this: That, params: T.ConnectorSyncJobDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobDelete (this: That, params: T.ConnectorSyncJobDeleteRequest, options?: TransportRequestOptions): Promise + async syncJobDelete (this: That, params: T.ConnectorSyncJobDeleteRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['connector.sync_job_delete'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_connector/_sync_job/${encodeURIComponent(params.connector_sync_job_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'connector.sync_job_delete', + pathParts: { + connector_sync_job_id: params.connector_sync_job_id + }, + acceptedParams: [ + 'connector_sync_job_id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Set a connector sync job error. Set the `error` field for a connector sync job and set its `status` to `error`. To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-error | Elasticsearch API documentation} + */ + async syncJobError (this: That, params: T.ConnectorSyncJobErrorRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobError (this: That, params: T.ConnectorSyncJobErrorRequest, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobError (this: That, params: T.ConnectorSyncJobErrorRequest, options?: TransportRequestOptions): Promise + async syncJobError (this: That, params: T.ConnectorSyncJobErrorRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['connector.sync_job_error'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_connector/_sync_job/${encodeURIComponent(params.connector_sync_job_id.toString())}/_error` + const meta: TransportRequestMetadata = { + name: 'connector.sync_job_error', + pathParts: { + connector_sync_job_id: params.connector_sync_job_id + }, + acceptedParams: [ + 'connector_sync_job_id', + 'error' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get a connector sync job. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-get | Elasticsearch API documentation} + */ + async syncJobGet (this: That, params: T.ConnectorSyncJobGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobGet (this: That, params: T.ConnectorSyncJobGetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobGet (this: That, params: T.ConnectorSyncJobGetRequest, options?: TransportRequestOptions): Promise + async syncJobGet (this: That, params: T.ConnectorSyncJobGetRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['connector.sync_job_get'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_connector/_sync_job/${encodeURIComponent(params.connector_sync_job_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'connector.sync_job_get', + pathParts: { + connector_sync_job_id: params.connector_sync_job_id + }, + acceptedParams: [ + 'connector_sync_job_id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get all connector sync jobs. Get information about all stored connector sync jobs listed by their creation date in ascending order. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-list | Elasticsearch API documentation} + */ + async syncJobList (this: That, params?: T.ConnectorSyncJobListRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobList (this: That, params?: T.ConnectorSyncJobListRequest, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobList (this: That, params?: T.ConnectorSyncJobListRequest, options?: TransportRequestOptions): Promise + async syncJobList (this: That, params?: T.ConnectorSyncJobListRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['connector.sync_job_list'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_connector/_sync_job' + const meta: TransportRequestMetadata = { + name: 'connector.sync_job_list', + acceptedParams: [ + 'from', + 'size', + 'status', + 'connector_id', + 'job_type' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create a connector sync job. Create a connector sync job document in the internal index and initialize its counters and timestamps with default values. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-post | Elasticsearch API documentation} + */ + async syncJobPost (this: That, params: T.ConnectorSyncJobPostRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobPost (this: That, params: T.ConnectorSyncJobPostRequest, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobPost (this: That, params: T.ConnectorSyncJobPostRequest, options?: TransportRequestOptions): Promise + async syncJobPost (this: That, params: T.ConnectorSyncJobPostRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['connector.sync_job_post'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = '/_connector/_sync_job' + const meta: TransportRequestMetadata = { + name: 'connector.sync_job_post', + acceptedParams: [ + 'id', + 'job_type', + 'trigger_method' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Set the connector sync job stats. Stats include: `deleted_document_count`, `indexed_document_count`, `indexed_document_volume`, and `total_document_count`. You can also update `last_seen`. This API is mainly used by the connector service for updating sync job information. To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-update-stats | Elasticsearch API documentation} + */ + async syncJobUpdateStats (this: That, params: T.ConnectorSyncJobUpdateStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobUpdateStats (this: That, params: T.ConnectorSyncJobUpdateStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobUpdateStats (this: That, params: T.ConnectorSyncJobUpdateStatsRequest, options?: TransportRequestOptions): Promise + async syncJobUpdateStats (this: That, params: T.ConnectorSyncJobUpdateStatsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['connector.sync_job_update_stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_connector/_sync_job/${encodeURIComponent(params.connector_sync_job_id.toString())}/_stats` + const meta: TransportRequestMetadata = { + name: 'connector.sync_job_update_stats', + pathParts: { + connector_sync_job_id: params.connector_sync_job_id + }, + acceptedParams: [ + 'connector_sync_job_id', + 'deleted_document_count', + 'indexed_document_count', + 'indexed_document_volume', + 'last_seen', + 'metadata', + 'total_document_count' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Activate the connector draft filter. Activates the valid draft filtering for a connector. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering | Elasticsearch API documentation} + */ + async updateActiveFiltering (this: That, params: T.ConnectorUpdateActiveFilteringRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateActiveFiltering (this: That, params: T.ConnectorUpdateActiveFilteringRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateActiveFiltering (this: That, params: T.ConnectorUpdateActiveFilteringRequest, options?: TransportRequestOptions): Promise + async updateActiveFiltering (this: That, params: T.ConnectorUpdateActiveFilteringRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['connector.update_active_filtering'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}/_filtering/_activate` + const meta: TransportRequestMetadata = { + name: 'connector.update_active_filtering', + pathParts: { + connector_id: params.connector_id + }, + acceptedParams: [ + 'connector_id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Update the connector API key ID. Update the `api_key_id` and `api_key_secret_id` fields of a connector. You can specify the ID of the API key used for authorization and the ID of the connector secret where the API key is stored. The connector secret ID is required only for Elastic managed (native) connectors. Self-managed connectors (connector clients) do not use this field. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-api-key-id | Elasticsearch API documentation} + */ + async updateApiKeyId (this: That, params: T.ConnectorUpdateApiKeyIdRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateApiKeyId (this: That, params: T.ConnectorUpdateApiKeyIdRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateApiKeyId (this: That, params: T.ConnectorUpdateApiKeyIdRequest, options?: TransportRequestOptions): Promise + async updateApiKeyId (this: That, params: T.ConnectorUpdateApiKeyIdRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['connector.update_api_key_id'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}/_api_key_id` + const meta: TransportRequestMetadata = { + name: 'connector.update_api_key_id', + pathParts: { + connector_id: params.connector_id + }, + acceptedParams: [ + 'connector_id', + 'api_key_id', + 'api_key_secret_id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Update the connector configuration. Update the configuration field in the connector document. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-configuration | Elasticsearch API documentation} + */ + async updateConfiguration (this: That, params: T.ConnectorUpdateConfigurationRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateConfiguration (this: That, params: T.ConnectorUpdateConfigurationRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateConfiguration (this: That, params: T.ConnectorUpdateConfigurationRequest, options?: TransportRequestOptions): Promise + async updateConfiguration (this: That, params: T.ConnectorUpdateConfigurationRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['connector.update_configuration'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}/_configuration` + const meta: TransportRequestMetadata = { + name: 'connector.update_configuration', + pathParts: { + connector_id: params.connector_id + }, + acceptedParams: [ + 'connector_id', + 'configuration', + 'values' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Update the connector error field. Set the error field for the connector. If the error provided in the request body is non-null, the connector’s status is updated to error. Otherwise, if the error is reset to null, the connector status is updated to connected. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-error | Elasticsearch API documentation} + */ + async updateError (this: That, params: T.ConnectorUpdateErrorRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateError (this: That, params: T.ConnectorUpdateErrorRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateError (this: That, params: T.ConnectorUpdateErrorRequest, options?: TransportRequestOptions): Promise + async updateError (this: That, params: T.ConnectorUpdateErrorRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['connector.update_error'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}/_error` + const meta: TransportRequestMetadata = { + name: 'connector.update_error', + pathParts: { + connector_id: params.connector_id + }, + acceptedParams: [ + 'connector_id', + 'error' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Update the connector features. Update the connector features in the connector document. This API can be used to control the following aspects of a connector: * document-level security * incremental syncs * advanced sync rules * basic sync rules Normally, the running connector service automatically manages these features. However, you can use this API to override the default behavior. To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-features | Elasticsearch API documentation} + */ + async updateFeatures (this: That, params: T.ConnectorUpdateFeaturesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateFeatures (this: That, params: T.ConnectorUpdateFeaturesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateFeatures (this: That, params: T.ConnectorUpdateFeaturesRequest, options?: TransportRequestOptions): Promise + async updateFeatures (this: That, params: T.ConnectorUpdateFeaturesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['connector.update_features'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}/_features` + const meta: TransportRequestMetadata = { + name: 'connector.update_features', + pathParts: { + connector_id: params.connector_id + }, + acceptedParams: [ + 'connector_id', + 'features' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Update the connector filtering. Update the draft filtering configuration of a connector and marks the draft validation state as edited. The filtering draft is activated once validated by the running Elastic connector service. The filtering property is used to configure sync rules (both basic and advanced) for a connector. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering | Elasticsearch API documentation} + */ + async updateFiltering (this: That, params: T.ConnectorUpdateFilteringRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateFiltering (this: That, params: T.ConnectorUpdateFilteringRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateFiltering (this: That, params: T.ConnectorUpdateFilteringRequest, options?: TransportRequestOptions): Promise + async updateFiltering (this: That, params: T.ConnectorUpdateFilteringRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['connector.update_filtering'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}/_filtering` + const meta: TransportRequestMetadata = { + name: 'connector.update_filtering', + pathParts: { + connector_id: params.connector_id + }, + acceptedParams: [ + 'connector_id', + 'filtering', + 'rules', + 'advanced_snippet' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Update the connector draft filtering validation. Update the draft filtering validation info for a connector. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering-validation | Elasticsearch API documentation} + */ + async updateFilteringValidation (this: That, params: T.ConnectorUpdateFilteringValidationRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateFilteringValidation (this: That, params: T.ConnectorUpdateFilteringValidationRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateFilteringValidation (this: That, params: T.ConnectorUpdateFilteringValidationRequest, options?: TransportRequestOptions): Promise + async updateFilteringValidation (this: That, params: T.ConnectorUpdateFilteringValidationRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['connector.update_filtering_validation'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}/_filtering/_validation` + const meta: TransportRequestMetadata = { + name: 'connector.update_filtering_validation', + pathParts: { + connector_id: params.connector_id + }, + acceptedParams: [ + 'connector_id', + 'validation' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Update the connector index name. Update the `index_name` field of a connector, specifying the index where the data ingested by the connector is stored. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-index-name | Elasticsearch API documentation} + */ + async updateIndexName (this: That, params: T.ConnectorUpdateIndexNameRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateIndexName (this: That, params: T.ConnectorUpdateIndexNameRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateIndexName (this: That, params: T.ConnectorUpdateIndexNameRequest, options?: TransportRequestOptions): Promise + async updateIndexName (this: That, params: T.ConnectorUpdateIndexNameRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['connector.update_index_name'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}/_index_name` + const meta: TransportRequestMetadata = { + name: 'connector.update_index_name', + pathParts: { + connector_id: params.connector_id + }, + acceptedParams: [ + 'connector_id', + 'index_name' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Update the connector name and description. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-name | Elasticsearch API documentation} + */ + async updateName (this: That, params: T.ConnectorUpdateNameRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateName (this: That, params: T.ConnectorUpdateNameRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateName (this: That, params: T.ConnectorUpdateNameRequest, options?: TransportRequestOptions): Promise + async updateName (this: That, params: T.ConnectorUpdateNameRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['connector.update_name'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}/_name` + const meta: TransportRequestMetadata = { + name: 'connector.update_name', + pathParts: { + connector_id: params.connector_id + }, + acceptedParams: [ + 'connector_id', + 'name', + 'description' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Update the connector is_native flag. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-native | Elasticsearch API documentation} + */ + async updateNative (this: That, params: T.ConnectorUpdateNativeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateNative (this: That, params: T.ConnectorUpdateNativeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateNative (this: That, params: T.ConnectorUpdateNativeRequest, options?: TransportRequestOptions): Promise + async updateNative (this: That, params: T.ConnectorUpdateNativeRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['connector.update_native'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}/_native` + const meta: TransportRequestMetadata = { + name: 'connector.update_native', + pathParts: { + connector_id: params.connector_id + }, + acceptedParams: [ + 'connector_id', + 'is_native' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Update the connector pipeline. When you create a new connector, the configuration of an ingest pipeline is populated with default settings. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-pipeline | Elasticsearch API documentation} + */ + async updatePipeline (this: That, params: T.ConnectorUpdatePipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updatePipeline (this: That, params: T.ConnectorUpdatePipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updatePipeline (this: That, params: T.ConnectorUpdatePipelineRequest, options?: TransportRequestOptions): Promise + async updatePipeline (this: That, params: T.ConnectorUpdatePipelineRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['connector.update_pipeline'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}/_pipeline` + const meta: TransportRequestMetadata = { + name: 'connector.update_pipeline', + pathParts: { + connector_id: params.connector_id + }, + acceptedParams: [ + 'connector_id', + 'pipeline' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Update the connector scheduling. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-scheduling | Elasticsearch API documentation} + */ + async updateScheduling (this: That, params: T.ConnectorUpdateSchedulingRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateScheduling (this: That, params: T.ConnectorUpdateSchedulingRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateScheduling (this: That, params: T.ConnectorUpdateSchedulingRequest, options?: TransportRequestOptions): Promise + async updateScheduling (this: That, params: T.ConnectorUpdateSchedulingRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['connector.update_scheduling'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}/_scheduling` + const meta: TransportRequestMetadata = { + name: 'connector.update_scheduling', + pathParts: { + connector_id: params.connector_id + }, + acceptedParams: [ + 'connector_id', + 'scheduling' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Update the connector service type. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-service-type | Elasticsearch API documentation} + */ + async updateServiceType (this: That, params: T.ConnectorUpdateServiceTypeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateServiceType (this: That, params: T.ConnectorUpdateServiceTypeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateServiceType (this: That, params: T.ConnectorUpdateServiceTypeRequest, options?: TransportRequestOptions): Promise + async updateServiceType (this: That, params: T.ConnectorUpdateServiceTypeRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['connector.update_service_type'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}/_service_type` + const meta: TransportRequestMetadata = { + name: 'connector.update_service_type', + pathParts: { + connector_id: params.connector_id + }, + acceptedParams: [ + 'connector_id', + 'service_type' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Update the connector status. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-status | Elasticsearch API documentation} + */ + async updateStatus (this: That, params: T.ConnectorUpdateStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateStatus (this: That, params: T.ConnectorUpdateStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateStatus (this: That, params: T.ConnectorUpdateStatusRequest, options?: TransportRequestOptions): Promise + async updateStatus (this: That, params: T.ConnectorUpdateStatusRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['connector.update_status'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}/_status` + const meta: TransportRequestMetadata = { + name: 'connector.update_status', + pathParts: { + connector_id: params.connector_id + }, + acceptedParams: [ + 'connector_id', + 'status' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/api/count.ts b/src/api/api/count.ts new file mode 100644 index 000000000..e71b731aa --- /dev/null +++ b/src/api/api/count.ts @@ -0,0 +1,141 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + count: { + path: [ + 'index' + ], + body: [ + 'query' + ], + query: [ + 'allow_no_indices', + 'analyzer', + 'analyze_wildcard', + 'default_operator', + 'df', + 'expand_wildcards', + 'ignore_throttled', + 'ignore_unavailable', + 'lenient', + 'min_score', + 'preference', + 'project_routing', + 'routing', + 'terminate_after', + 'q' + ] + } +} + +/** + * Count search results. Get the number of documents matching a query. The query can be provided either by using a simple query string as a parameter, or by defining Query DSL within the request body. The query is optional. When no query is provided, the API uses `match_all` to count all the documents. The count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices. The operation is broadcast across all shards. For each shard ID group, a replica is chosen and the search is run against it. This means that replicas increase the scalability of the count. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-count | Elasticsearch API documentation} + */ +export default async function CountApi (this: That, params?: T.CountRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function CountApi (this: That, params?: T.CountRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function CountApi (this: That, params?: T.CountRequest, options?: TransportRequestOptions): Promise +export default async function CountApi (this: That, params?: T.CountRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.count + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = body != null ? 'POST' : 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_count` + } else { + method = body != null ? 'POST' : 'GET' + path = '/_count' + } + const meta: TransportRequestMetadata = { + name: 'count', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'query', + 'allow_no_indices', + 'analyzer', + 'analyze_wildcard', + 'default_operator', + 'df', + 'expand_wildcards', + 'ignore_throttled', + 'ignore_unavailable', + 'lenient', + 'min_score', + 'preference', + 'project_routing', + 'routing', + 'terminate_after', + 'q' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) +} diff --git a/src/api/api/create.ts b/src/api/api/create.ts new file mode 100644 index 000000000..77195d4d2 --- /dev/null +++ b/src/api/api/create.ts @@ -0,0 +1,116 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + create: { + path: [ + 'id', + 'index' + ], + body: [ + 'document' + ], + query: [ + 'include_source_on_error', + 'pipeline', + 'refresh', + 'require_alias', + 'require_data_stream', + 'routing', + 'timeout', + 'version', + 'version_type', + 'wait_for_active_shards' + ] + } +} + +/** + * Create a new document in the index. You can index a new JSON document with the `//_doc/` or `//_create/<_id>` APIs Using `_create` guarantees that the document is indexed only if it does not already exist. It returns a 409 response when a document with a same ID already exists in the index. To update an existing document, you must use the `//_doc/` API. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: * To add a document using the `PUT //_create/<_id>` or `POST //_create/<_id>` request formats, you must have the `create_doc`, `create`, `index`, or `write` index privilege. * To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. Automatic data stream creation requires a matching index template with data stream enabled. **Automatically create data streams and indices** If the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream. If the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates. NOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation. If no mapping exists, the index operation creates a dynamic mapping. By default, new fields and objects are automatically added to the mapping if needed. Automatic index creation is controlled by the `action.auto_create_index` setting. If it is `true`, any index can be created automatically. You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely. Specify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked. When a list is specified, the default behaviour is to disallow. NOTE: The `action.auto_create_index` setting affects the automatic creation of indices only. It does not affect the creation of data streams. **Routing** By default, shard placement — or routing — is controlled by using a hash of the document's ID value. For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter. When setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself. This does come at the (very minimal) cost of an additional document parsing pass. If the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted. NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. **Distributed** The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard. After the primary shard completes the operation, if needed, the update is distributed to applicable replicas. **Active shards** To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation. If the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs. By default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`). This default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`. To alter this behavior per operation, use the `wait_for_active_shards request` parameter. Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1). Specifying a negative value or a number greater than the number of shard copies will throw an error. For example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). If you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding. This means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data. If `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. This requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard. However, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. The operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard. It is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts. After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. The `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create | Elasticsearch API documentation} + */ +export default async function CreateApi (this: That, params: T.CreateRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function CreateApi (this: That, params: T.CreateRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function CreateApi (this: That, params: T.CreateRequest, options?: TransportRequestOptions): Promise +export default async function CreateApi (this: That, params: T.CreateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.create + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: any = params.body ?? undefined + for (const key in params) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/${encodeURIComponent(params.index.toString())}/_create/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'create', + pathParts: { + id: params.id, + index: params.index + }, + acceptedParams: [ + 'id', + 'index', + 'document', + 'include_source_on_error', + 'pipeline', + 'refresh', + 'require_alias', + 'require_data_stream', + 'routing', + 'timeout', + 'version', + 'version_type', + 'wait_for_active_shards' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) +} diff --git a/src/api/api/dangling_indices.ts b/src/api/api/dangling_indices.ts new file mode 100644 index 000000000..6c914f9be --- /dev/null +++ b/src/api/api/dangling_indices.ts @@ -0,0 +1,213 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import { kAcceptedParams } from '../../client' + +interface That { + transport: Transport + [kAcceptedParams]: Record +} + +export default class DanglingIndices { + transport: Transport + [kAcceptedParams]: Record + constructor (transport: Transport) { + this.transport = transport + this[kAcceptedParams] = { + 'dangling_indices.delete_dangling_index': { + path: [ + 'index_uuid' + ], + body: [], + query: [ + 'accept_data_loss', + 'master_timeout', + 'timeout' + ] + }, + 'dangling_indices.import_dangling_index': { + path: [ + 'index_uuid' + ], + body: [], + query: [ + 'accept_data_loss', + 'master_timeout', + 'timeout' + ] + }, + 'dangling_indices.list_dangling_indices': { + path: [], + body: [], + query: [] + } + } + } + + /** + * Delete a dangling index. If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-delete-dangling-index | Elasticsearch API documentation} + */ + async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptions): Promise + async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['dangling_indices.delete_dangling_index'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_dangling/${encodeURIComponent(params.index_uuid.toString())}` + const meta: TransportRequestMetadata = { + name: 'dangling_indices.delete_dangling_index', + pathParts: { + index_uuid: params.index_uuid + }, + acceptedParams: [ + 'index_uuid', + 'accept_data_loss', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Import a dangling index. If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-import-dangling-index | Elasticsearch API documentation} + */ + async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptionsWithMeta): Promise> + async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptions): Promise + async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['dangling_indices.import_dangling_index'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/_dangling/${encodeURIComponent(params.index_uuid.toString())}` + const meta: TransportRequestMetadata = { + name: 'dangling_indices.import_dangling_index', + pathParts: { + index_uuid: params.index_uuid + }, + acceptedParams: [ + 'index_uuid', + 'accept_data_loss', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get the dangling indices. If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. Use this API to list dangling indices, which you can then import or delete. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-list-dangling-indices | Elasticsearch API documentation} + */ + async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptions): Promise + async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['dangling_indices.list_dangling_indices'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_dangling' + const meta: TransportRequestMetadata = { + name: 'dangling_indices.list_dangling_indices', + acceptedParams: [ + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/api/delete.ts b/src/api/api/delete.ts new file mode 100644 index 000000000..a8ea798b4 --- /dev/null +++ b/src/api/api/delete.ts @@ -0,0 +1,105 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + delete: { + path: [ + 'id', + 'index' + ], + body: [], + query: [ + 'if_primary_term', + 'if_seq_no', + 'refresh', + 'routing', + 'timeout', + 'version', + 'version_type', + 'wait_for_active_shards' + ] + } +} + +/** + * Delete a document. Remove a JSON document from the specified index. NOTE: You cannot send deletion requests directly to a data stream. To delete a document in a data stream, you must target the backing index containing the document. **Optimistic concurrency control** Delete operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters. If a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`. **Versioning** Each document indexed is versioned. When deleting a document, the version can be specified to make sure the relevant document you are trying to delete is actually being deleted and it has not changed in the meantime. Every write operation run on a document, deletes included, causes its version to be incremented. The version number of a deleted document remains available for a short time after deletion to allow for control of concurrent operations. The length of time for which a deleted document's version remains available is determined by the `index.gc_deletes` index setting. **Routing** If routing is used during indexing, the routing value also needs to be specified to delete a document. If the `_routing` mapping is set to `required` and no routing value is specified, the delete API throws a `RoutingMissingException` and rejects the request. For example: ``` DELETE /my-index-000001/_doc/1?routing=shard-1 ``` This request deletes the document with ID 1, but it is routed based on the user. The document is not deleted if the correct routing is not specified. **Distributed** The delete operation gets hashed into a specific shard ID. It then gets redirected into the primary shard within that ID group and replicated (if needed) to shard replicas within that ID group. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete | Elasticsearch API documentation} + */ +export default async function DeleteApi (this: That, params: T.DeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function DeleteApi (this: That, params: T.DeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function DeleteApi (this: That, params: T.DeleteRequest, options?: TransportRequestOptions): Promise +export default async function DeleteApi (this: That, params: T.DeleteRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = acceptedParams.delete + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/${encodeURIComponent(params.index.toString())}/_doc/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'delete', + pathParts: { + id: params.id, + index: params.index + }, + acceptedParams: [ + 'id', + 'index', + 'if_primary_term', + 'if_seq_no', + 'refresh', + 'routing', + 'timeout', + 'version', + 'version_type', + 'wait_for_active_shards' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) +} diff --git a/src/api/api/delete_by_query.ts b/src/api/api/delete_by_query.ts new file mode 100644 index 000000000..126746671 --- /dev/null +++ b/src/api/api/delete_by_query.ts @@ -0,0 +1,167 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + delete_by_query: { + path: [ + 'index' + ], + body: [ + 'max_docs', + 'query', + 'slice', + 'sort' + ], + query: [ + 'allow_no_indices', + 'analyzer', + 'analyze_wildcard', + 'conflicts', + 'default_operator', + 'df', + 'expand_wildcards', + 'from', + 'ignore_unavailable', + 'lenient', + 'max_docs', + 'preference', + 'refresh', + 'request_cache', + 'requests_per_second', + 'routing', + 'q', + 'scroll', + 'scroll_size', + 'search_timeout', + 'search_type', + 'slices', + 'sort', + 'stats', + 'terminate_after', + 'timeout', + 'version', + 'wait_for_active_shards', + 'wait_for_completion' + ] + } +} + +/** + * Delete documents. Deletes documents that match the specified query. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias: * `read` * `delete` or `write` You can specify the query criteria in the request URI or the request body using the same syntax as the search API. When you submit a delete by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and deletes matching documents using internal versioning. If a document changes between the time that the snapshot is taken and the delete operation is processed, it results in a version conflict and the delete operation fails. NOTE: Documents with a version equal to 0 cannot be deleted using delete by query because internal versioning does not support 0 as a valid version number. While processing a delete by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents to delete. A bulk delete request is performed for each batch of matching documents. If a search or bulk request is rejected, the requests are retried up to 10 times, with exponential back off. If the maximum retry limit is reached, processing halts and all failed requests are returned in the response. Any delete requests that completed successfully still stick, they are not rolled back. You can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`. Note that if you opt to count version conflicts the operation could attempt to delete more documents from the source than `max_docs` until it has successfully deleted `max_docs documents`, or it has gone through every document in the source query. **Throttling delete requests** To control the rate at which delete by query issues batches of delete operations, you can set `requests_per_second` to any positive decimal number. This pads each batch with a wait time to throttle the rate. Set `requests_per_second` to `-1` to disable throttling. Throttling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account. The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. By default the batch size is `1000`, so if `requests_per_second` is set to `500`: ``` target_time = 1000 / 500 per second = 2 seconds wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds ``` Since the batch is issued as a single `_bulk` request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. This is "bursty" instead of "smooth". **Slicing** Delete by query supports sliced scroll to parallelize the delete process. This can improve efficiency and provide a convenient way to break the request down into smaller parts. Setting `slices` to `auto` lets Elasticsearch choose the number of slices to use. This setting will use one slice per shard, up to a certain limit. If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards. Adding slices to the delete by query operation creates sub-requests which means it has some quirks: * You can see these requests in the tasks APIs. These sub-requests are "child" tasks of the task for the request with slices. * Fetching the status of the task for the request with slices only contains the status of completed slices. * These sub-requests are individually addressable for things like cancellation and rethrottling. * Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. * Canceling the request with `slices` will cancel each sub-request. * Due to the nature of `slices` each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. * Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the earlier point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being deleted. * Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time. If you're slicing manually or otherwise tuning automatic slicing, keep in mind that: * Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many `slices` hurts performance. Setting `slices` higher than the number of shards generally does not improve efficiency and adds overhead. * Delete performance scales linearly across available resources with the number of slices. Whether query or delete performance dominates the runtime depends on the documents being reindexed and cluster resources. **Cancel a delete by query operation** Any delete by query can be canceled using the task cancel API. For example: ``` POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel ``` The task ID can be found by using the get tasks API. Cancellation should happen quickly but might take a few seconds. The get task status API will continue to list the delete by query task until this task checks that it has been cancelled and terminates itself. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-by-query | Elasticsearch API documentation} + */ +export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest, options?: TransportRequestOptions): Promise +export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.delete_by_query + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/${encodeURIComponent(params.index.toString())}/_delete_by_query` + const meta: TransportRequestMetadata = { + name: 'delete_by_query', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'max_docs', + 'query', + 'slice', + 'sort', + 'allow_no_indices', + 'analyzer', + 'analyze_wildcard', + 'conflicts', + 'default_operator', + 'df', + 'expand_wildcards', + 'from', + 'ignore_unavailable', + 'lenient', + 'max_docs', + 'preference', + 'refresh', + 'request_cache', + 'requests_per_second', + 'routing', + 'q', + 'scroll', + 'scroll_size', + 'search_timeout', + 'search_type', + 'slices', + 'sort', + 'stats', + 'terminate_after', + 'timeout', + 'version', + 'wait_for_active_shards', + 'wait_for_completion' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) +} diff --git a/src/api/api/delete_by_query_rethrottle.ts b/src/api/api/delete_by_query_rethrottle.ts new file mode 100644 index 000000000..fa5183cdc --- /dev/null +++ b/src/api/api/delete_by_query_rethrottle.ts @@ -0,0 +1,88 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + delete_by_query_rethrottle: { + path: [ + 'task_id' + ], + body: [], + query: [ + 'requests_per_second' + ] + } +} + +/** + * Throttle a delete by query operation. Change the number of requests per second for a particular delete by query operation. Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-by-query-rethrottle | Elasticsearch API documentation} + */ +export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest, options?: TransportRequestOptions): Promise +export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = acceptedParams.delete_by_query_rethrottle + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/_delete_by_query/${encodeURIComponent(params.task_id.toString())}/_rethrottle` + const meta: TransportRequestMetadata = { + name: 'delete_by_query_rethrottle', + pathParts: { + task_id: params.task_id + }, + acceptedParams: [ + 'task_id', + 'requests_per_second' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) +} diff --git a/src/api/api/delete_script.ts b/src/api/api/delete_script.ts new file mode 100644 index 000000000..85c17c0c1 --- /dev/null +++ b/src/api/api/delete_script.ts @@ -0,0 +1,90 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + delete_script: { + path: [ + 'id' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + } +} + +/** + * Delete a script or search template. Deletes a stored script or search template. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-script | Elasticsearch API documentation} + */ +export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest, options?: TransportRequestOptions): Promise +export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = acceptedParams.delete_script + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_scripts/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'delete_script', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) +} diff --git a/src/api/api/enrich.ts b/src/api/api/enrich.ts new file mode 100644 index 000000000..eb2246366 --- /dev/null +++ b/src/api/api/enrich.ts @@ -0,0 +1,355 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import { kAcceptedParams } from '../../client' + +interface That { + transport: Transport + [kAcceptedParams]: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +export default class Enrich { + transport: Transport + [kAcceptedParams]: Record + constructor (transport: Transport) { + this.transport = transport + this[kAcceptedParams] = { + 'enrich.delete_policy': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'enrich.execute_policy': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'wait_for_completion' + ] + }, + 'enrich.get_policy': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'enrich.put_policy': { + path: [ + 'name' + ], + body: [ + 'geo_match', + 'match', + 'range' + ], + query: [ + 'master_timeout' + ] + }, + 'enrich.stats': { + path: [], + body: [], + query: [ + 'master_timeout' + ] + } + } + } + + /** + * Delete an enrich policy. Deletes an existing enrich policy and its enrich index. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-delete-policy | Elasticsearch API documentation} + */ + async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest, options?: TransportRequestOptions): Promise + async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['enrich.delete_policy'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_enrich/policy/${encodeURIComponent(params.name.toString())}` + const meta: TransportRequestMetadata = { + name: 'enrich.delete_policy', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Run an enrich policy. Create the enrich index for an existing enrich policy. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-execute-policy | Elasticsearch API documentation} + */ + async executePolicy (this: That, params: T.EnrichExecutePolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async executePolicy (this: That, params: T.EnrichExecutePolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async executePolicy (this: That, params: T.EnrichExecutePolicyRequest, options?: TransportRequestOptions): Promise + async executePolicy (this: That, params: T.EnrichExecutePolicyRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['enrich.execute_policy'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_enrich/policy/${encodeURIComponent(params.name.toString())}/_execute` + const meta: TransportRequestMetadata = { + name: 'enrich.execute_policy', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'master_timeout', + 'wait_for_completion' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get an enrich policy. Returns information about an enrich policy. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-get-policy | Elasticsearch API documentation} + */ + async getPolicy (this: That, params?: T.EnrichGetPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getPolicy (this: That, params?: T.EnrichGetPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getPolicy (this: That, params?: T.EnrichGetPolicyRequest, options?: TransportRequestOptions): Promise + async getPolicy (this: That, params?: T.EnrichGetPolicyRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['enrich.get_policy'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.name != null) { + method = 'GET' + path = `/_enrich/policy/${encodeURIComponent(params.name.toString())}` + } else { + method = 'GET' + path = '/_enrich/policy' + } + const meta: TransportRequestMetadata = { + name: 'enrich.get_policy', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create an enrich policy. Creates an enrich policy. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-put-policy | Elasticsearch API documentation} + */ + async putPolicy (this: That, params: T.EnrichPutPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putPolicy (this: That, params: T.EnrichPutPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putPolicy (this: That, params: T.EnrichPutPolicyRequest, options?: TransportRequestOptions): Promise + async putPolicy (this: That, params: T.EnrichPutPolicyRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['enrich.put_policy'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_enrich/policy/${encodeURIComponent(params.name.toString())}` + const meta: TransportRequestMetadata = { + name: 'enrich.put_policy', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'geo_match', + 'match', + 'range', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get enrich stats. Returns enrich coordinator statistics and information about enrich policies that are currently executing. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-stats | Elasticsearch API documentation} + */ + async stats (this: That, params?: T.EnrichStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stats (this: That, params?: T.EnrichStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stats (this: That, params?: T.EnrichStatsRequest, options?: TransportRequestOptions): Promise + async stats (this: That, params?: T.EnrichStatsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['enrich.stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_enrich/_stats' + const meta: TransportRequestMetadata = { + name: 'enrich.stats', + acceptedParams: [ + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/api/eql.ts b/src/api/api/eql.ts new file mode 100644 index 000000000..4a0f403ff --- /dev/null +++ b/src/api/api/eql.ts @@ -0,0 +1,334 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import { kAcceptedParams } from '../../client' + +interface That { + transport: Transport + [kAcceptedParams]: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +export default class Eql { + transport: Transport + [kAcceptedParams]: Record + constructor (transport: Transport) { + this.transport = transport + this[kAcceptedParams] = { + 'eql.delete': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'eql.get': { + path: [ + 'id' + ], + body: [], + query: [ + 'keep_alive', + 'wait_for_completion_timeout' + ] + }, + 'eql.get_status': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'eql.search': { + path: [ + 'index' + ], + body: [ + 'query', + 'case_sensitive', + 'event_category_field', + 'tiebreaker_field', + 'timestamp_field', + 'fetch_size', + 'filter', + 'keep_alive', + 'keep_on_completion', + 'wait_for_completion_timeout', + 'allow_partial_search_results', + 'allow_partial_sequence_results', + 'size', + 'fields', + 'result_position', + 'runtime_mappings', + 'max_samples_per_key' + ], + query: [ + 'allow_no_indices', + 'allow_partial_search_results', + 'allow_partial_sequence_results', + 'expand_wildcards', + 'ccs_minimize_roundtrips', + 'ignore_unavailable', + 'keep_alive', + 'keep_on_completion', + 'project_routing', + 'wait_for_completion_timeout' + ] + } + } + } + + /** + * Delete an async EQL search. Delete an async EQL search or a stored synchronous EQL search. The API also deletes results for the search. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-delete | Elasticsearch API documentation} + */ + async delete (this: That, params: T.EqlDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async delete (this: That, params: T.EqlDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> + async delete (this: That, params: T.EqlDeleteRequest, options?: TransportRequestOptions): Promise + async delete (this: That, params: T.EqlDeleteRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['eql.delete'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_eql/search/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'eql.delete', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get async EQL search results. Get the current status and available results for an async EQL search or a stored synchronous EQL search. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-get | Elasticsearch API documentation} + */ + async get (this: That, params: T.EqlGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> + async get (this: That, params: T.EqlGetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> + async get (this: That, params: T.EqlGetRequest, options?: TransportRequestOptions): Promise> + async get (this: That, params: T.EqlGetRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['eql.get'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_eql/search/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'eql.get', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id', + 'keep_alive', + 'wait_for_completion_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get the async EQL status. Get the current status for an async EQL search or a stored synchronous EQL search without returning results. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-get-status | Elasticsearch API documentation} + */ + async getStatus (this: That, params: T.EqlGetStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getStatus (this: That, params: T.EqlGetStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getStatus (this: That, params: T.EqlGetStatusRequest, options?: TransportRequestOptions): Promise + async getStatus (this: That, params: T.EqlGetStatusRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['eql.get_status'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_eql/search/status/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'eql.get_status', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get EQL search results. Returns search results for an Event Query Language (EQL) query. EQL assumes each document in a data stream or index corresponds to an event. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-search | Elasticsearch API documentation} + */ + async search (this: That, params: T.EqlSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> + async search (this: That, params: T.EqlSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> + async search (this: That, params: T.EqlSearchRequest, options?: TransportRequestOptions): Promise> + async search (this: That, params: T.EqlSearchRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['eql.search'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = body != null ? 'POST' : 'GET' + const path = `/${encodeURIComponent(params.index.toString())}/_eql/search` + const meta: TransportRequestMetadata = { + name: 'eql.search', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'query', + 'case_sensitive', + 'event_category_field', + 'tiebreaker_field', + 'timestamp_field', + 'fetch_size', + 'filter', + 'keep_alive', + 'keep_on_completion', + 'wait_for_completion_timeout', + 'allow_partial_search_results', + 'allow_partial_sequence_results', + 'size', + 'fields', + 'result_position', + 'runtime_mappings', + 'max_samples_per_key', + 'allow_no_indices', + 'allow_partial_search_results', + 'allow_partial_sequence_results', + 'expand_wildcards', + 'ccs_minimize_roundtrips', + 'ignore_unavailable', + 'keep_alive', + 'keep_on_completion', + 'project_routing', + 'wait_for_completion_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/api/esql.ts b/src/api/api/esql.ts new file mode 100644 index 000000000..c6e87d7d1 --- /dev/null +++ b/src/api/api/esql.ts @@ -0,0 +1,507 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import { kAcceptedParams } from '../../client' + +interface That { + transport: Transport + [kAcceptedParams]: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +export default class Esql { + transport: Transport + [kAcceptedParams]: Record + constructor (transport: Transport) { + this.transport = transport + this[kAcceptedParams] = { + 'esql.async_query': { + path: [], + body: [ + 'columnar', + 'filter', + 'locale', + 'params', + 'profile', + 'query', + 'tables', + 'include_ccs_metadata', + 'include_execution_metadata', + 'wait_for_completion_timeout', + 'keep_alive', + 'keep_on_completion' + ], + query: [ + 'allow_partial_results', + 'delimiter', + 'drop_null_columns', + 'format' + ] + }, + 'esql.async_query_delete': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'esql.async_query_get': { + path: [ + 'id' + ], + body: [], + query: [ + 'drop_null_columns', + 'format', + 'keep_alive', + 'wait_for_completion_timeout' + ] + }, + 'esql.async_query_stop': { + path: [ + 'id' + ], + body: [], + query: [ + 'drop_null_columns' + ] + }, + 'esql.get_query': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'esql.list_queries': { + path: [], + body: [], + query: [] + }, + 'esql.query': { + path: [], + body: [ + 'columnar', + 'filter', + 'locale', + 'params', + 'profile', + 'query', + 'tables', + 'include_ccs_metadata', + 'include_execution_metadata' + ], + query: [ + 'format', + 'delimiter', + 'drop_null_columns', + 'allow_partial_results' + ] + } + } + } + + /** + * Run an async ES|QL query. Asynchronously run an ES|QL (Elasticsearch query language) query, monitor its progress, and retrieve results when they become available. The API accepts the same parameters and request body as the synchronous query API, along with additional async related properties. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query | Elasticsearch API documentation} + */ + async asyncQuery (this: That, params: T.EsqlAsyncQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async asyncQuery (this: That, params: T.EsqlAsyncQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async asyncQuery (this: That, params: T.EsqlAsyncQueryRequest, options?: TransportRequestOptions): Promise + async asyncQuery (this: That, params: T.EsqlAsyncQueryRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['esql.async_query'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = '/_query/async' + const meta: TransportRequestMetadata = { + name: 'esql.async_query', + acceptedParams: [ + 'columnar', + 'filter', + 'locale', + 'params', + 'profile', + 'query', + 'tables', + 'include_ccs_metadata', + 'include_execution_metadata', + 'wait_for_completion_timeout', + 'keep_alive', + 'keep_on_completion', + 'allow_partial_results', + 'delimiter', + 'drop_null_columns', + 'format' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Delete an async ES|QL query. If the query is still running, it is cancelled. Otherwise, the stored results are deleted. If the Elasticsearch security features are enabled, only the following users can use this API to delete a query: * The authenticated user that submitted the original query request * Users with the `cancel_task` cluster privilege + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query-delete | Elasticsearch API documentation} + */ + async asyncQueryDelete (this: That, params: T.EsqlAsyncQueryDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async asyncQueryDelete (this: That, params: T.EsqlAsyncQueryDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> + async asyncQueryDelete (this: That, params: T.EsqlAsyncQueryDeleteRequest, options?: TransportRequestOptions): Promise + async asyncQueryDelete (this: That, params: T.EsqlAsyncQueryDeleteRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['esql.async_query_delete'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_query/async/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'esql.async_query_delete', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get async ES|QL query results. Get the current status and available results or stored results for an ES|QL asynchronous query. If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can retrieve the results using this API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query-get | Elasticsearch API documentation} + */ + async asyncQueryGet (this: That, params: T.EsqlAsyncQueryGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async asyncQueryGet (this: That, params: T.EsqlAsyncQueryGetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async asyncQueryGet (this: That, params: T.EsqlAsyncQueryGetRequest, options?: TransportRequestOptions): Promise + async asyncQueryGet (this: That, params: T.EsqlAsyncQueryGetRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['esql.async_query_get'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_query/async/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'esql.async_query_get', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id', + 'drop_null_columns', + 'format', + 'keep_alive', + 'wait_for_completion_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Stop async ES|QL query. This API interrupts the query execution and returns the results so far. If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can stop it. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query-stop | Elasticsearch API documentation} + */ + async asyncQueryStop (this: That, params: T.EsqlAsyncQueryStopRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async asyncQueryStop (this: That, params: T.EsqlAsyncQueryStopRequest, options?: TransportRequestOptionsWithMeta): Promise> + async asyncQueryStop (this: That, params: T.EsqlAsyncQueryStopRequest, options?: TransportRequestOptions): Promise + async asyncQueryStop (this: That, params: T.EsqlAsyncQueryStopRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['esql.async_query_stop'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/_query/async/${encodeURIComponent(params.id.toString())}/stop` + const meta: TransportRequestMetadata = { + name: 'esql.async_query_stop', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id', + 'drop_null_columns' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get a specific running ES|QL query information. Returns an object extended information about a running ES|QL query. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-get-query | Elasticsearch API documentation} + */ + async getQuery (this: That, params: T.EsqlGetQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getQuery (this: That, params: T.EsqlGetQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getQuery (this: That, params: T.EsqlGetQueryRequest, options?: TransportRequestOptions): Promise + async getQuery (this: That, params: T.EsqlGetQueryRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['esql.get_query'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_query/queries/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'esql.get_query', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get running ES|QL queries information. Returns an object containing IDs and other information about the running ES|QL queries. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-list-queries | Elasticsearch API documentation} + */ + async listQueries (this: That, params?: T.EsqlListQueriesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async listQueries (this: That, params?: T.EsqlListQueriesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async listQueries (this: That, params?: T.EsqlListQueriesRequest, options?: TransportRequestOptions): Promise + async listQueries (this: That, params?: T.EsqlListQueriesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['esql.list_queries'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_query/queries' + const meta: TransportRequestMetadata = { + name: 'esql.list_queries', + acceptedParams: [ + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Run an ES|QL query. Get search results for an ES|QL (Elasticsearch query language) query. + * @see {@link https://www.elastic.co/docs/explore-analyze/query-filter/languages/esql-rest | Elasticsearch API documentation} + */ + async query (this: That, params: T.EsqlQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async query (this: That, params: T.EsqlQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async query (this: That, params: T.EsqlQueryRequest, options?: TransportRequestOptions): Promise + async query (this: That, params: T.EsqlQueryRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['esql.query'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = '/_query' + const meta: TransportRequestMetadata = { + name: 'esql.query', + acceptedParams: [ + 'columnar', + 'filter', + 'locale', + 'params', + 'profile', + 'query', + 'tables', + 'include_ccs_metadata', + 'include_execution_metadata', + 'format', + 'delimiter', + 'drop_null_columns', + 'allow_partial_results' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/api/exists.ts b/src/api/api/exists.ts new file mode 100644 index 000000000..931870b88 --- /dev/null +++ b/src/api/api/exists.ts @@ -0,0 +1,109 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + exists: { + path: [ + 'id', + 'index' + ], + body: [], + query: [ + 'preference', + 'realtime', + 'refresh', + 'routing', + '_source', + '_source_excludes', + '_source_includes', + 'stored_fields', + 'version', + 'version_type' + ] + } +} + +/** + * Check a document. Verify that a document exists. For example, check to see if a document with the `_id` 0 exists: ``` HEAD my-index-000001/_doc/0 ``` If the document exists, the API returns a status code of `200 - OK`. If the document doesn’t exist, the API returns `404 - Not Found`. **Versioning support** You can use the `version` parameter to check the document only if its current version is equal to the specified one. Internally, Elasticsearch has marked the old document as deleted and added an entirely new document. The old version of the document doesn't disappear immediately, although you won't be able to access it. Elasticsearch cleans up deleted documents in the background as you continue to index more data. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get | Elasticsearch API documentation} + */ +export default async function ExistsApi (this: That, params: T.ExistsRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function ExistsApi (this: That, params: T.ExistsRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function ExistsApi (this: That, params: T.ExistsRequest, options?: TransportRequestOptions): Promise +export default async function ExistsApi (this: That, params: T.ExistsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = acceptedParams.exists + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'HEAD' + const path = `/${encodeURIComponent(params.index.toString())}/_doc/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'exists', + pathParts: { + id: params.id, + index: params.index + }, + acceptedParams: [ + 'id', + 'index', + 'preference', + 'realtime', + 'refresh', + 'routing', + '_source', + '_source_excludes', + '_source_includes', + 'stored_fields', + 'version', + 'version_type' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) +} diff --git a/src/api/api/exists_source.ts b/src/api/api/exists_source.ts new file mode 100644 index 000000000..5f03302ea --- /dev/null +++ b/src/api/api/exists_source.ts @@ -0,0 +1,107 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + exists_source: { + path: [ + 'id', + 'index' + ], + body: [], + query: [ + 'preference', + 'realtime', + 'refresh', + 'routing', + '_source', + '_source_excludes', + '_source_includes', + 'version', + 'version_type' + ] + } +} + +/** + * Check for a document source. Check whether a document source exists in an index. For example: ``` HEAD my-index-000001/_source/1 ``` A document's source is not available if it is disabled in the mapping. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get | Elasticsearch API documentation} + */ +export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest, options?: TransportRequestOptions): Promise +export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = acceptedParams.exists_source + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'HEAD' + const path = `/${encodeURIComponent(params.index.toString())}/_source/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'exists_source', + pathParts: { + id: params.id, + index: params.index + }, + acceptedParams: [ + 'id', + 'index', + 'preference', + 'realtime', + 'refresh', + 'routing', + '_source', + '_source_excludes', + '_source_includes', + 'version', + 'version_type' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) +} diff --git a/src/api/api/explain.ts b/src/api/api/explain.ts new file mode 100644 index 000000000..e8edd8afa --- /dev/null +++ b/src/api/api/explain.ts @@ -0,0 +1,130 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + explain: { + path: [ + 'id', + 'index' + ], + body: [ + 'query' + ], + query: [ + 'analyzer', + 'analyze_wildcard', + 'default_operator', + 'df', + 'lenient', + 'preference', + 'routing', + '_source', + '_source_excludes', + '_source_includes', + 'stored_fields', + 'q' + ] + } +} + +/** + * Explain a document match result. Get information about why a specific document matches, or doesn't match, a query. It computes a score explanation for a query and a specific document. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-explain | Elasticsearch API documentation} + */ +export default async function ExplainApi (this: That, params: T.ExplainRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function ExplainApi (this: That, params: T.ExplainRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function ExplainApi (this: That, params: T.ExplainRequest, options?: TransportRequestOptions): Promise> +export default async function ExplainApi (this: That, params: T.ExplainRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.explain + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = body != null ? 'POST' : 'GET' + const path = `/${encodeURIComponent(params.index.toString())}/_explain/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'explain', + pathParts: { + id: params.id, + index: params.index + }, + acceptedParams: [ + 'id', + 'index', + 'query', + 'analyzer', + 'analyze_wildcard', + 'default_operator', + 'df', + 'lenient', + 'preference', + 'routing', + '_source', + '_source_excludes', + '_source_includes', + 'stored_fields', + 'q' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) +} diff --git a/src/api/api/features.ts b/src/api/api/features.ts new file mode 100644 index 000000000..6e6d0dac8 --- /dev/null +++ b/src/api/api/features.ts @@ -0,0 +1,145 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import { kAcceptedParams } from '../../client' + +interface That { + transport: Transport + [kAcceptedParams]: Record +} + +export default class Features { + transport: Transport + [kAcceptedParams]: Record + constructor (transport: Transport) { + this.transport = transport + this[kAcceptedParams] = { + 'features.get_features': { + path: [], + body: [], + query: [ + 'master_timeout' + ] + }, + 'features.reset_features': { + path: [], + body: [], + query: [ + 'master_timeout' + ] + } + } + } + + /** + * Get the features. Get a list of features that can be included in snapshots using the `feature_states` field when creating a snapshot. You can use this API to determine which feature states to include when taking a snapshot. By default, all feature states are included in a snapshot if that snapshot includes the global state, or none if it does not. A feature state includes one or more system indices necessary for a given feature to function. In order to ensure data integrity, all system indices that comprise a feature state are snapshotted and restored together. The features listed by this API are a combination of built-in features and features defined by plugins. In order for a feature state to be listed in this API and recognized as a valid feature state by the create snapshot API, the plugin that defines that feature must be installed on the master node. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-features-get-features | Elasticsearch API documentation} + */ + async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest, options?: TransportRequestOptions): Promise + async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['features.get_features'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_features' + const meta: TransportRequestMetadata = { + name: 'features.get_features', + acceptedParams: [ + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Reset the features. Clear all of the state information stored in system indices by Elasticsearch features, including the security and machine learning indices. WARNING: Intended for development and testing use only. Do not reset features on a production cluster. Return a cluster to the same state as a new installation by resetting the feature state for all Elasticsearch features. This deletes all state information stored in system indices. The response code is HTTP 200 if the state is successfully reset for all features. It is HTTP 500 if the reset operation failed for any feature. Note that select features might provide a way to reset particular system indices. Using this API resets all features, both those that are built-in and implemented as plugins. To list the features that will be affected, use the get features API. IMPORTANT: The features installed on the node you submit this request to are the features that will be reset. Run on the master node if you have any doubts about which plugins are installed on individual nodes. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-features-reset-features | Elasticsearch API documentation} + */ + async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest, options?: TransportRequestOptions): Promise + async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['features.reset_features'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_features/_reset' + const meta: TransportRequestMetadata = { + name: 'features.reset_features', + acceptedParams: [ + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/api/field_caps.ts b/src/api/api/field_caps.ts new file mode 100644 index 000000000..000a51899 --- /dev/null +++ b/src/api/api/field_caps.ts @@ -0,0 +1,133 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + field_caps: { + path: [ + 'index' + ], + body: [ + 'fields', + 'index_filter', + 'runtime_mappings' + ], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'fields', + 'ignore_unavailable', + 'include_unmapped', + 'filters', + 'types', + 'include_empty_fields', + 'project_routing' + ] + } +} + +/** + * Get the field capabilities. Get information about the capabilities of fields among multiple indices. For data streams, the API returns field capabilities among the stream’s backing indices. It returns runtime fields like any other field. For example, a runtime field with a type of keyword is returned the same as any other field that belongs to the `keyword` family. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-field-caps | Elasticsearch API documentation} + */ +export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest, options?: TransportRequestOptions): Promise +export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.field_caps + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = body != null ? 'POST' : 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_field_caps` + } else { + method = body != null ? 'POST' : 'GET' + path = '/_field_caps' + } + const meta: TransportRequestMetadata = { + name: 'field_caps', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'fields', + 'index_filter', + 'runtime_mappings', + 'allow_no_indices', + 'expand_wildcards', + 'fields', + 'ignore_unavailable', + 'include_unmapped', + 'filters', + 'types', + 'include_empty_fields', + 'project_routing' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) +} diff --git a/src/api/api/fleet.ts b/src/api/api/fleet.ts new file mode 100644 index 000000000..6eae3eb52 --- /dev/null +++ b/src/api/api/fleet.ts @@ -0,0 +1,574 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import { kAcceptedParams } from '../../client' + +interface That { + transport: Transport + [kAcceptedParams]: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +export default class Fleet { + transport: Transport + [kAcceptedParams]: Record + constructor (transport: Transport) { + this.transport = transport + this[kAcceptedParams] = { + 'fleet.delete_secret': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'fleet.get_secret': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'fleet.global_checkpoints': { + path: [ + 'index' + ], + body: [], + query: [ + 'wait_for_advance', + 'wait_for_index', + 'checkpoints', + 'timeout' + ] + }, + 'fleet.msearch': { + path: [ + 'index' + ], + body: [ + 'searches' + ], + query: [ + 'allow_no_indices', + 'ccs_minimize_roundtrips', + 'expand_wildcards', + 'ignore_throttled', + 'ignore_unavailable', + 'max_concurrent_searches', + 'max_concurrent_shard_requests', + 'pre_filter_shard_size', + 'search_type', + 'rest_total_hits_as_int', + 'typed_keys', + 'wait_for_checkpoints', + 'allow_partial_search_results' + ] + }, + 'fleet.post_secret': { + path: [], + body: [], + query: [] + }, + 'fleet.search': { + path: [ + 'index' + ], + body: [ + 'aggregations', + 'aggs', + 'collapse', + 'explain', + 'ext', + 'from', + 'highlight', + 'track_total_hits', + 'indices_boost', + 'docvalue_fields', + 'min_score', + 'post_filter', + 'profile', + 'query', + 'rescore', + 'script_fields', + 'search_after', + 'size', + 'slice', + 'sort', + '_source', + 'fields', + 'suggest', + 'terminate_after', + 'timeout', + 'track_scores', + 'version', + 'seq_no_primary_term', + 'stored_fields', + 'pit', + 'runtime_mappings', + 'stats' + ], + query: [ + 'allow_no_indices', + 'analyzer', + 'analyze_wildcard', + 'batched_reduce_size', + 'ccs_minimize_roundtrips', + 'default_operator', + 'df', + 'docvalue_fields', + 'expand_wildcards', + 'explain', + 'ignore_throttled', + 'ignore_unavailable', + 'lenient', + 'max_concurrent_shard_requests', + 'preference', + 'pre_filter_shard_size', + 'request_cache', + 'routing', + 'scroll', + 'search_type', + 'stats', + 'stored_fields', + 'suggest_field', + 'suggest_mode', + 'suggest_size', + 'suggest_text', + 'terminate_after', + 'timeout', + 'track_total_hits', + 'track_scores', + 'typed_keys', + 'rest_total_hits_as_int', + 'version', + '_source', + '_source_excludes', + '_source_includes', + 'seq_no_primary_term', + 'q', + 'size', + 'from', + 'sort', + 'wait_for_checkpoints', + 'allow_partial_search_results' + ] + } + } + } + + /** + * Deletes a secret stored by Fleet + */ + async deleteSecret (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteSecret (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async deleteSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async deleteSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['fleet.delete_secret'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_fleet/secret/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'fleet.delete_secret', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Retrieves a secret stored by Fleet + */ + async getSecret (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async getSecret (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async getSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async getSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['fleet.get_secret'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_fleet/secret/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'fleet.get_secret', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get global checkpoints. Get the current global checkpoints for an index. This API is designed for internal use by the Fleet server project. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-fleet | Elasticsearch API documentation} + */ + async globalCheckpoints (this: That, params: T.FleetGlobalCheckpointsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async globalCheckpoints (this: That, params: T.FleetGlobalCheckpointsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async globalCheckpoints (this: That, params: T.FleetGlobalCheckpointsRequest, options?: TransportRequestOptions): Promise + async globalCheckpoints (this: That, params: T.FleetGlobalCheckpointsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['fleet.global_checkpoints'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/${encodeURIComponent(params.index.toString())}/_fleet/global_checkpoints` + const meta: TransportRequestMetadata = { + name: 'fleet.global_checkpoints', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'wait_for_advance', + 'wait_for_index', + 'checkpoints', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Run multiple Fleet searches. Run several Fleet searches with a single API request. The API follows the same structure as the multi search API. However, similar to the Fleet search API, it supports the `wait_for_checkpoints` parameter. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-fleet-msearch | Elasticsearch API documentation} + */ + async msearch (this: That, params: T.FleetMsearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> + async msearch (this: That, params: T.FleetMsearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> + async msearch (this: That, params: T.FleetMsearchRequest, options?: TransportRequestOptions): Promise> + async msearch (this: That, params: T.FleetMsearchRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['fleet.msearch'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: any = params.body ?? undefined + for (const key in params) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = body != null ? 'POST' : 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_fleet/_fleet_msearch` + } else { + method = body != null ? 'POST' : 'GET' + path = '/_fleet/_fleet_msearch' + } + const meta: TransportRequestMetadata = { + name: 'fleet.msearch', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'searches', + 'allow_no_indices', + 'ccs_minimize_roundtrips', + 'expand_wildcards', + 'ignore_throttled', + 'ignore_unavailable', + 'max_concurrent_searches', + 'max_concurrent_shard_requests', + 'pre_filter_shard_size', + 'search_type', + 'rest_total_hits_as_int', + 'typed_keys', + 'wait_for_checkpoints', + 'allow_partial_search_results' + ] + } + return await this.transport.request({ path, method, querystring, bulkBody: body, meta }, options) + } + + /** + * Creates a secret stored by Fleet + */ + async postSecret (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async postSecret (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async postSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async postSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['fleet.post_secret'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_fleet/secret' + const meta: TransportRequestMetadata = { + name: 'fleet.post_secret', + acceptedParams: [ + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Run a Fleet search. The purpose of the Fleet search API is to provide an API where the search will be run only after the provided checkpoint has been processed and is visible for searches inside of Elasticsearch. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-fleet-search | Elasticsearch API documentation} + */ + async search (this: That, params: T.FleetSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> + async search (this: That, params: T.FleetSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> + async search (this: That, params: T.FleetSearchRequest, options?: TransportRequestOptions): Promise> + async search (this: That, params: T.FleetSearchRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['fleet.search'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = body != null ? 'POST' : 'GET' + const path = `/${encodeURIComponent(params.index.toString())}/_fleet/_fleet_search` + const meta: TransportRequestMetadata = { + name: 'fleet.search', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'aggregations', + 'aggs', + 'collapse', + 'explain', + 'ext', + 'from', + 'highlight', + 'track_total_hits', + 'indices_boost', + 'docvalue_fields', + 'min_score', + 'post_filter', + 'profile', + 'query', + 'rescore', + 'script_fields', + 'search_after', + 'size', + 'slice', + 'sort', + '_source', + 'fields', + 'suggest', + 'terminate_after', + 'timeout', + 'track_scores', + 'version', + 'seq_no_primary_term', + 'stored_fields', + 'pit', + 'runtime_mappings', + 'stats', + 'allow_no_indices', + 'analyzer', + 'analyze_wildcard', + 'batched_reduce_size', + 'ccs_minimize_roundtrips', + 'default_operator', + 'df', + 'docvalue_fields', + 'expand_wildcards', + 'explain', + 'ignore_throttled', + 'ignore_unavailable', + 'lenient', + 'max_concurrent_shard_requests', + 'preference', + 'pre_filter_shard_size', + 'request_cache', + 'routing', + 'scroll', + 'search_type', + 'stats', + 'stored_fields', + 'suggest_field', + 'suggest_mode', + 'suggest_size', + 'suggest_text', + 'terminate_after', + 'timeout', + 'track_total_hits', + 'track_scores', + 'typed_keys', + 'rest_total_hits_as_int', + 'version', + '_source', + '_source_excludes', + '_source_includes', + 'seq_no_primary_term', + 'q', + 'size', + 'from', + 'sort', + 'wait_for_checkpoints', + 'allow_partial_search_results' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/api/get.ts b/src/api/api/get.ts new file mode 100644 index 000000000..80f660e0d --- /dev/null +++ b/src/api/api/get.ts @@ -0,0 +1,113 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + get: { + path: [ + 'id', + 'index' + ], + body: [], + query: [ + 'force_synthetic_source', + 'preference', + 'realtime', + 'refresh', + 'routing', + '_source', + '_source_excludes', + '_source_exclude_vectors', + '_source_includes', + 'stored_fields', + 'version', + 'version_type' + ] + } +} + +/** + * Get a document by its ID. Get a document and its source or stored fields from an index. By default, this API is realtime and is not affected by the refresh rate of the index (when data will become visible for search). In the case where stored fields are requested with the `stored_fields` parameter and the document has been updated but is not yet refreshed, the API will have to parse and analyze the source to extract the stored fields. To turn off realtime behavior, set the `realtime` parameter to false. **Source filtering** By default, the API returns the contents of the `_source` field unless you have used the `stored_fields` parameter or the `_source` field is turned off. You can turn off `_source` retrieval by using the `_source` parameter: ``` GET my-index-000001/_doc/0?_source=false ``` If you only need one or two fields from the `_source`, use the `_source_includes` or `_source_excludes` parameters to include or filter out particular fields. This can be helpful with large documents where partial retrieval can save on network overhead Both parameters take a comma separated list of fields or wildcard expressions. For example: ``` GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities ``` If you only want to specify includes, you can use a shorter notation: ``` GET my-index-000001/_doc/0?_source=*.id ``` **Routing** If routing is used during indexing, the routing value also needs to be specified to retrieve a document. For example: ``` GET my-index-000001/_doc/2?routing=user1 ``` This request gets the document with ID 2, but it is routed based on the user. The document is not fetched if the correct routing is not specified. **Distributed** The GET operation is hashed into a specific shard ID. It is then redirected to one of the replicas within that shard ID and returns the result. The replicas are the primary shard and its replicas within that shard ID group. This means that the more replicas you have, the better your GET scaling will be. **Versioning support** You can use the `version` parameter to retrieve the document only if its current version is equal to the specified one. Internally, Elasticsearch has marked the old document as deleted and added an entirely new document. The old version of the document doesn't disappear immediately, although you won't be able to access it. Elasticsearch cleans up deleted documents in the background as you continue to index more data. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get | Elasticsearch API documentation} + */ +export default async function GetApi (this: That, params: T.GetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function GetApi (this: That, params: T.GetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function GetApi (this: That, params: T.GetRequest, options?: TransportRequestOptions): Promise> +export default async function GetApi (this: That, params: T.GetRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = acceptedParams.get + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/${encodeURIComponent(params.index.toString())}/_doc/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'get', + pathParts: { + id: params.id, + index: params.index + }, + acceptedParams: [ + 'id', + 'index', + 'force_synthetic_source', + 'preference', + 'realtime', + 'refresh', + 'routing', + '_source', + '_source_excludes', + '_source_exclude_vectors', + '_source_includes', + 'stored_fields', + 'version', + 'version_type' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) +} diff --git a/src/api/api/get_script.ts b/src/api/api/get_script.ts new file mode 100644 index 000000000..863fc3260 --- /dev/null +++ b/src/api/api/get_script.ts @@ -0,0 +1,88 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + get_script: { + path: [ + 'id' + ], + body: [], + query: [ + 'master_timeout' + ] + } +} + +/** + * Get a script or search template. Retrieves a stored script or search template. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script | Elasticsearch API documentation} + */ +export default async function GetScriptApi (this: That, params: T.GetScriptRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function GetScriptApi (this: That, params: T.GetScriptRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function GetScriptApi (this: That, params: T.GetScriptRequest, options?: TransportRequestOptions): Promise +export default async function GetScriptApi (this: That, params: T.GetScriptRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = acceptedParams.get_script + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_scripts/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'get_script', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) +} diff --git a/src/api/api/get_script_context.ts b/src/api/api/get_script_context.ts new file mode 100644 index 000000000..e80b010bd --- /dev/null +++ b/src/api/api/get_script_context.ts @@ -0,0 +1,80 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + get_script_context: { + path: [], + body: [], + query: [] + } +} + +/** + * Get script contexts. Get a list of supported script contexts and their methods. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script-context | Elasticsearch API documentation} + */ +export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest, options?: TransportRequestOptions): Promise +export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = acceptedParams.get_script_context + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_script_context' + const meta: TransportRequestMetadata = { + name: 'get_script_context', + acceptedParams: [ + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) +} diff --git a/src/api/api/get_script_languages.ts b/src/api/api/get_script_languages.ts new file mode 100644 index 000000000..3b3827019 --- /dev/null +++ b/src/api/api/get_script_languages.ts @@ -0,0 +1,80 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + get_script_languages: { + path: [], + body: [], + query: [] + } +} + +/** + * Get script languages. Get a list of available script types, languages, and contexts. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script-languages | Elasticsearch API documentation} + */ +export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest, options?: TransportRequestOptions): Promise +export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = acceptedParams.get_script_languages + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_script_language' + const meta: TransportRequestMetadata = { + name: 'get_script_languages', + acceptedParams: [ + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) +} diff --git a/src/api/api/get_source.ts b/src/api/api/get_source.ts new file mode 100644 index 000000000..3f30b7030 --- /dev/null +++ b/src/api/api/get_source.ts @@ -0,0 +1,107 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + get_source: { + path: [ + 'id', + 'index' + ], + body: [], + query: [ + 'preference', + 'realtime', + 'refresh', + 'routing', + '_source', + '_source_excludes', + '_source_includes', + 'version', + 'version_type' + ] + } +} + +/** + * Get a document's source. Get the source of a document. For example: ``` GET my-index-000001/_source/1 ``` You can use the source filtering parameters to control which parts of the `_source` are returned: ``` GET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities ``` + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get | Elasticsearch API documentation} + */ +export default async function GetSourceApi (this: That, params: T.GetSourceRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function GetSourceApi (this: That, params: T.GetSourceRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function GetSourceApi (this: That, params: T.GetSourceRequest, options?: TransportRequestOptions): Promise> +export default async function GetSourceApi (this: That, params: T.GetSourceRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = acceptedParams.get_source + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/${encodeURIComponent(params.index.toString())}/_source/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'get_source', + pathParts: { + id: params.id, + index: params.index + }, + acceptedParams: [ + 'id', + 'index', + 'preference', + 'realtime', + 'refresh', + 'routing', + '_source', + '_source_excludes', + '_source_includes', + 'version', + 'version_type' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) +} diff --git a/src/api/api/graph.ts b/src/api/api/graph.ts new file mode 100644 index 000000000..580a73e4a --- /dev/null +++ b/src/api/api/graph.ts @@ -0,0 +1,122 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import { kAcceptedParams } from '../../client' + +interface That { + transport: Transport + [kAcceptedParams]: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +export default class Graph { + transport: Transport + [kAcceptedParams]: Record + constructor (transport: Transport) { + this.transport = transport + this[kAcceptedParams] = { + 'graph.explore': { + path: [ + 'index' + ], + body: [ + 'connections', + 'controls', + 'query', + 'vertices' + ], + query: [ + 'routing', + 'timeout' + ] + } + } + } + + /** + * Explore graph analytics. Extract and summarize information about the documents and terms in an Elasticsearch data stream or index. The easiest way to understand the behavior of this API is to use the Graph UI to explore connections. An initial request to the `_explore` API contains a seed query that identifies the documents of interest and specifies the fields that define the vertices and connections you want to include in the graph. Subsequent requests enable you to spider out from one more vertices of interest. You can exclude vertices that have already been returned. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-graph | Elasticsearch API documentation} + */ + async explore (this: That, params: T.GraphExploreRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async explore (this: That, params: T.GraphExploreRequest, options?: TransportRequestOptionsWithMeta): Promise> + async explore (this: That, params: T.GraphExploreRequest, options?: TransportRequestOptions): Promise + async explore (this: That, params: T.GraphExploreRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['graph.explore'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = body != null ? 'POST' : 'GET' + const path = `/${encodeURIComponent(params.index.toString())}/_graph/explore` + const meta: TransportRequestMetadata = { + name: 'graph.explore', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'connections', + 'controls', + 'query', + 'vertices', + 'routing', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/api/health_report.ts b/src/api/api/health_report.ts new file mode 100644 index 000000000..b9de60dd1 --- /dev/null +++ b/src/api/api/health_report.ts @@ -0,0 +1,100 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + health_report: { + path: [ + 'feature' + ], + body: [], + query: [ + 'timeout', + 'verbose', + 'size' + ] + } +} + +/** + * Get the cluster health. Get a report with the health status of an Elasticsearch cluster. The report contains a list of indicators that compose Elasticsearch functionality. Each indicator has a health status of: green, unknown, yellow or red. The indicator will provide an explanation and metadata describing the reason for its current health status. The cluster’s status is controlled by the worst indicator status. In the event that an indicator’s status is non-green, a list of impacts may be present in the indicator result which detail the functionalities that are negatively affected by the health issue. Each impact carries with it a severity level, an area of the system that is affected, and a simple description of the impact on the system. Some health indicators can determine the root cause of a health problem and prescribe a set of steps that can be performed in order to improve the health of the system. The root cause and remediation steps are encapsulated in a diagnosis. A diagnosis contains a cause detailing a root cause analysis, an action containing a brief description of the steps to take to fix the problem, the list of affected resources (if applicable), and a detailed step-by-step troubleshooting guide to fix the diagnosed problem. NOTE: The health indicators perform root cause analysis of non-green health statuses. This can be computationally expensive when called frequently. When setting up automated polling of the API for health status, set verbose to false to disable the more expensive analysis logic. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-health-report | Elasticsearch API documentation} + */ +export default async function HealthReportApi (this: That, params?: T.HealthReportRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function HealthReportApi (this: That, params?: T.HealthReportRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function HealthReportApi (this: That, params?: T.HealthReportRequest, options?: TransportRequestOptions): Promise +export default async function HealthReportApi (this: That, params?: T.HealthReportRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = acceptedParams.health_report + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.feature != null) { + method = 'GET' + path = `/_health_report/${encodeURIComponent(params.feature.toString())}` + } else { + method = 'GET' + path = '/_health_report' + } + const meta: TransportRequestMetadata = { + name: 'health_report', + pathParts: { + feature: params.feature + }, + acceptedParams: [ + 'feature', + 'timeout', + 'verbose', + 'size' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) +} diff --git a/src/api/api/ilm.ts b/src/api/api/ilm.ts new file mode 100644 index 000000000..70c52ab89 --- /dev/null +++ b/src/api/api/ilm.ts @@ -0,0 +1,720 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import { kAcceptedParams } from '../../client' + +interface That { + transport: Transport + [kAcceptedParams]: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +export default class Ilm { + transport: Transport + [kAcceptedParams]: Record + constructor (transport: Transport) { + this.transport = transport + this[kAcceptedParams] = { + 'ilm.delete_lifecycle': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ilm.explain_lifecycle': { + path: [ + 'index' + ], + body: [], + query: [ + 'only_errors', + 'only_managed', + 'master_timeout' + ] + }, + 'ilm.get_lifecycle': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ilm.get_status': { + path: [], + body: [], + query: [] + }, + 'ilm.migrate_to_data_tiers': { + path: [], + body: [ + 'legacy_template_to_delete', + 'node_attribute' + ], + query: [ + 'dry_run', + 'master_timeout' + ] + }, + 'ilm.move_to_step': { + path: [ + 'index' + ], + body: [ + 'current_step', + 'next_step' + ], + query: [] + }, + 'ilm.put_lifecycle': { + path: [ + 'name' + ], + body: [ + 'policy' + ], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ilm.remove_policy': { + path: [ + 'index' + ], + body: [], + query: [] + }, + 'ilm.retry': { + path: [ + 'index' + ], + body: [], + query: [] + }, + 'ilm.start': { + path: [], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ilm.stop': { + path: [], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + } + } + } + + /** + * Delete a lifecycle policy. You cannot delete policies that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-delete-lifecycle | Elasticsearch API documentation} + */ + async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise + async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ilm.delete_lifecycle'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_ilm/policy/${encodeURIComponent(params.name.toString())}` + const meta: TransportRequestMetadata = { + name: 'ilm.delete_lifecycle', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'policy', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Explain the lifecycle state. Get the current lifecycle status for one or more indices. For data streams, the API retrieves the current lifecycle status for the stream's backing indices. The response indicates when the index entered each lifecycle state, provides the definition of the running phase, and information about any failures. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-explain-lifecycle | Elasticsearch API documentation} + */ + async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest, options?: TransportRequestOptions): Promise + async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ilm.explain_lifecycle'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/${encodeURIComponent(params.index.toString())}/_ilm/explain` + const meta: TransportRequestMetadata = { + name: 'ilm.explain_lifecycle', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'only_errors', + 'only_managed', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get lifecycle policies. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-get-lifecycle | Elasticsearch API documentation} + */ + async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest, options?: TransportRequestOptions): Promise + async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ilm.get_lifecycle'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.name != null) { + method = 'GET' + path = `/_ilm/policy/${encodeURIComponent(params.name.toString())}` + } else { + method = 'GET' + path = '/_ilm/policy' + } + const meta: TransportRequestMetadata = { + name: 'ilm.get_lifecycle', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'policy', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get the ILM status. Get the current index lifecycle management status. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-get-status | Elasticsearch API documentation} + */ + async getStatus (this: That, params?: T.IlmGetStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getStatus (this: That, params?: T.IlmGetStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getStatus (this: That, params?: T.IlmGetStatusRequest, options?: TransportRequestOptions): Promise + async getStatus (this: That, params?: T.IlmGetStatusRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ilm.get_status'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_ilm/status' + const meta: TransportRequestMetadata = { + name: 'ilm.get_status', + acceptedParams: [ + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Migrate to data tiers routing. Switch the indices, ILM policies, and legacy, composable, and component templates from using custom node attributes and attribute-based allocation filters to using data tiers. Optionally, delete one legacy index template. Using node roles enables ILM to automatically move the indices between data tiers. Migrating away from custom node attributes routing can be manually performed. This API provides an automated way of performing three out of the four manual steps listed in the migration guide: 1. Stop setting the custom hot attribute on new indices. 1. Remove custom allocation settings from existing ILM policies. 1. Replace custom allocation settings from existing indices with the corresponding tier preference. ILM must be stopped before performing the migration. Use the stop ILM and get ILM status APIs to wait until the reported operation mode is `STOPPED`. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-migrate-to-data-tiers | Elasticsearch API documentation} + */ + async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest, options?: TransportRequestOptionsWithMeta): Promise> + async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest, options?: TransportRequestOptions): Promise + async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ilm.migrate_to_data_tiers'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = '/_ilm/migrate_to_data_tiers' + const meta: TransportRequestMetadata = { + name: 'ilm.migrate_to_data_tiers', + acceptedParams: [ + 'legacy_template_to_delete', + 'node_attribute', + 'dry_run', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Move to a lifecycle step. Manually move an index into a specific step in the lifecycle policy and run that step. WARNING: This operation can result in the loss of data. Manually moving an index into a specific step runs that step even if it has already been performed. This is a potentially destructive action and this should be considered an expert level API. You must specify both the current step and the step to be executed in the body of the request. The request will fail if the current step does not match the step currently running for the index This is to prevent the index from being moved from an unexpected step into the next step. When specifying the target (`next_step`) to which the index will be moved, either the name or both the action and name fields are optional. If only the phase is specified, the index will move to the first step of the first action in the target phase. If the phase and action are specified, the index will move to the first step of the specified action in the specified phase. Only actions specified in the ILM policy are considered valid. An index cannot move to a step that is not part of its policy. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-move-to-step | Elasticsearch API documentation} + */ + async moveToStep (this: That, params: T.IlmMoveToStepRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async moveToStep (this: That, params: T.IlmMoveToStepRequest, options?: TransportRequestOptionsWithMeta): Promise> + async moveToStep (this: That, params: T.IlmMoveToStepRequest, options?: TransportRequestOptions): Promise + async moveToStep (this: That, params: T.IlmMoveToStepRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ilm.move_to_step'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/_ilm/move/${encodeURIComponent(params.index.toString())}` + const meta: TransportRequestMetadata = { + name: 'ilm.move_to_step', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'current_step', + 'next_step' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create or update a lifecycle policy. If the specified policy exists, it is replaced and the policy version is incremented. NOTE: Only the latest version of the policy is stored, you cannot revert to previous versions. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-put-lifecycle | Elasticsearch API documentation} + */ + async putLifecycle (this: That, params: T.IlmPutLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putLifecycle (this: That, params: T.IlmPutLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putLifecycle (this: That, params: T.IlmPutLifecycleRequest, options?: TransportRequestOptions): Promise + async putLifecycle (this: That, params: T.IlmPutLifecycleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ilm.put_lifecycle'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_ilm/policy/${encodeURIComponent(params.name.toString())}` + const meta: TransportRequestMetadata = { + name: 'ilm.put_lifecycle', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'policy', + 'policy', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Remove policies from an index. Remove the assigned lifecycle policies from an index or a data stream's backing indices. It also stops managing the indices. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-remove-policy | Elasticsearch API documentation} + */ + async removePolicy (this: That, params: T.IlmRemovePolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async removePolicy (this: That, params: T.IlmRemovePolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async removePolicy (this: That, params: T.IlmRemovePolicyRequest, options?: TransportRequestOptions): Promise + async removePolicy (this: That, params: T.IlmRemovePolicyRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ilm.remove_policy'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/${encodeURIComponent(params.index.toString())}/_ilm/remove` + const meta: TransportRequestMetadata = { + name: 'ilm.remove_policy', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Retry a policy. Retry running the lifecycle policy for an index that is in the ERROR step. The API sets the policy back to the step where the error occurred and runs the step. Use the explain lifecycle state API to determine whether an index is in the ERROR step. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-retry | Elasticsearch API documentation} + */ + async retry (this: That, params: T.IlmRetryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async retry (this: That, params: T.IlmRetryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async retry (this: That, params: T.IlmRetryRequest, options?: TransportRequestOptions): Promise + async retry (this: That, params: T.IlmRetryRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ilm.retry'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/${encodeURIComponent(params.index.toString())}/_ilm/retry` + const meta: TransportRequestMetadata = { + name: 'ilm.retry', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Start the ILM plugin. Start the index lifecycle management plugin if it is currently stopped. ILM is started automatically when the cluster is formed. Restarting ILM is necessary only when it has been stopped using the stop ILM API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-start | Elasticsearch API documentation} + */ + async start (this: That, params?: T.IlmStartRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async start (this: That, params?: T.IlmStartRequest, options?: TransportRequestOptionsWithMeta): Promise> + async start (this: That, params?: T.IlmStartRequest, options?: TransportRequestOptions): Promise + async start (this: That, params?: T.IlmStartRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ilm.start'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_ilm/start' + const meta: TransportRequestMetadata = { + name: 'ilm.start', + acceptedParams: [ + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Stop the ILM plugin. Halt all lifecycle management operations and stop the index lifecycle management plugin. This is useful when you are performing maintenance on the cluster and need to prevent ILM from performing any actions on your indices. The API returns as soon as the stop request has been acknowledged, but the plugin might continue to run until in-progress operations complete and the plugin can be safely stopped. Use the get ILM status API to check whether ILM is running. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-stop | Elasticsearch API documentation} + */ + async stop (this: That, params?: T.IlmStopRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stop (this: That, params?: T.IlmStopRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stop (this: That, params?: T.IlmStopRequest, options?: TransportRequestOptions): Promise + async stop (this: That, params?: T.IlmStopRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ilm.stop'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_ilm/stop' + const meta: TransportRequestMetadata = { + name: 'ilm.stop', + acceptedParams: [ + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/api/index.ts b/src/api/api/index.ts new file mode 100644 index 000000000..e86110ed8 --- /dev/null +++ b/src/api/api/index.ts @@ -0,0 +1,129 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + index: { + path: [ + 'id', + 'index' + ], + body: [ + 'document' + ], + query: [ + 'if_primary_term', + 'if_seq_no', + 'include_source_on_error', + 'op_type', + 'pipeline', + 'refresh', + 'routing', + 'timeout', + 'version', + 'version_type', + 'wait_for_active_shards', + 'require_alias', + 'require_data_stream' + ] + } +} + +/** + * Create or update a document in an index. Add a JSON document to the specified data stream or index and make it searchable. If the target is an index and the document already exists, the request updates the document and increments its version. NOTE: You cannot use this API to send update requests for existing documents in a data stream. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: * To add or overwrite a document using the `PUT //_doc/<_id>` request format, you must have the `create`, `index`, or `write` index privilege. * To add a document using the `POST //_doc/` request format, you must have the `create_doc`, `create`, `index`, or `write` index privilege. * To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. Automatic data stream creation requires a matching index template with data stream enabled. NOTE: Replica shards might not all be started when an indexing operation returns successfully. By default, only the primary is required. Set `wait_for_active_shards` to change this default behavior. **Automatically create data streams and indices** If the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream. If the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates. NOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation. If no mapping exists, the index operation creates a dynamic mapping. By default, new fields and objects are automatically added to the mapping if needed. Automatic index creation is controlled by the `action.auto_create_index` setting. If it is `true`, any index can be created automatically. You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely. Specify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked. When a list is specified, the default behaviour is to disallow. NOTE: The `action.auto_create_index` setting affects the automatic creation of indices only. It does not affect the creation of data streams. **Optimistic concurrency control** Index operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters. If a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`. **Routing** By default, shard placement — or routing — is controlled by using a hash of the document's ID value. For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter. When setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself. This does come at the (very minimal) cost of an additional document parsing pass. If the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted. NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. **Distributed** The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard. After the primary shard completes the operation, if needed, the update is distributed to applicable replicas. **Active shards** To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation. If the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs. By default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`). This default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`. To alter this behavior per operation, use the `wait_for_active_shards request` parameter. Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1). Specifying a negative value or a number greater than the number of shard copies will throw an error. For example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). If you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding. This means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data. If `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. This requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard. However, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. The operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard. It is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts. After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. The `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed. **No operation (noop) updates** When updating a document by using this API, a new version of the document is always created even if the document hasn't changed. If this isn't acceptable use the `_update` API with `detect_noop` set to `true`. The `detect_noop` option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source. There isn't a definitive rule for when noop updates aren't acceptable. It's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates. **Versioning** Each indexed document is given a version number. By default, internal versioning is used that starts at 1 and increments with each update, deletes included. Optionally, the version number can be set to an external value (for example, if maintained in a database). To enable this functionality, `version_type` should be set to `external`. The value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`. NOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations. If no version is provided, the operation runs without any version checks. When using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document. If true, the document will be indexed and the new version number used. If the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example: ``` PUT my-index-000001/_doc/1?version=2&version_type=external { "user": { "id": "elkbee" } } In this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1. If the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code). A nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used. Even the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create | Elasticsearch API documentation} + */ +export default async function IndexApi (this: That, params: T.IndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function IndexApi (this: That, params: T.IndexRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function IndexApi (this: That, params: T.IndexRequest, options?: TransportRequestOptions): Promise +export default async function IndexApi (this: That, params: T.IndexRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.index + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: any = params.body ?? undefined + for (const key in params) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + let method = '' + let path = '' + if (params.index != null && params.id != null) { + method = 'PUT' + path = `/${encodeURIComponent(params.index.toString())}/_doc/${encodeURIComponent(params.id.toString())}` + } else { + method = 'POST' + path = `/${encodeURIComponent(params.index.toString())}/_doc` + } + const meta: TransportRequestMetadata = { + name: 'index', + pathParts: { + id: params.id, + index: params.index + }, + acceptedParams: [ + 'id', + 'index', + 'document', + 'if_primary_term', + 'if_seq_no', + 'include_source_on_error', + 'op_type', + 'pipeline', + 'refresh', + 'routing', + 'timeout', + 'version', + 'version_type', + 'wait_for_active_shards', + 'require_alias', + 'require_data_stream' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) +} diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts new file mode 100644 index 000000000..cb8478cdb --- /dev/null +++ b/src/api/api/indices.ts @@ -0,0 +1,5500 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import { kAcceptedParams } from '../../client' + +interface That { + transport: Transport + [kAcceptedParams]: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +export default class Indices { + transport: Transport + [kAcceptedParams]: Record + constructor (transport: Transport) { + this.transport = transport + this[kAcceptedParams] = { + 'indices.add_block': { + path: [ + 'index', + 'block' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout', + 'timeout' + ] + }, + 'indices.analyze': { + path: [ + 'index' + ], + body: [ + 'analyzer', + 'attributes', + 'char_filter', + 'explain', + 'field', + 'filter', + 'normalizer', + 'text', + 'tokenizer' + ], + query: [ + 'index' + ] + }, + 'indices.cancel_migrate_reindex': { + path: [ + 'index' + ], + body: [], + query: [] + }, + 'indices.clear_cache': { + path: [ + 'index' + ], + body: [], + query: [ + 'index', + 'allow_no_indices', + 'expand_wildcards', + 'fielddata', + 'fields', + 'ignore_unavailable', + 'query', + 'request' + ] + }, + 'indices.clone': { + path: [ + 'index', + 'target' + ], + body: [ + 'aliases', + 'settings' + ], + query: [ + 'master_timeout', + 'timeout', + 'wait_for_active_shards' + ] + }, + 'indices.close': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout', + 'timeout', + 'wait_for_active_shards' + ] + }, + 'indices.create': { + path: [ + 'index' + ], + body: [ + 'aliases', + 'mappings', + 'settings' + ], + query: [ + 'master_timeout', + 'timeout', + 'wait_for_active_shards' + ] + }, + 'indices.create_data_stream': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'indices.create_from': { + path: [ + 'source', + 'dest' + ], + body: [ + 'create_from' + ], + query: [] + }, + 'indices.data_streams_stats': { + path: [ + 'name' + ], + body: [], + query: [ + 'expand_wildcards' + ] + }, + 'indices.delete': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout', + 'timeout' + ] + }, + 'indices.delete_alias': { + path: [ + 'index', + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'indices.delete_data_lifecycle': { + path: [ + 'name' + ], + body: [], + query: [ + 'expand_wildcards', + 'master_timeout', + 'timeout' + ] + }, + 'indices.delete_data_stream': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'expand_wildcards' + ] + }, + 'indices.delete_data_stream_options': { + path: [ + 'name' + ], + body: [], + query: [ + 'expand_wildcards', + 'master_timeout', + 'timeout' + ] + }, + 'indices.delete_index_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'indices.delete_sample_configuration': { + path: [ + 'index' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'indices.delete_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'indices.disk_usage': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'flush', + 'ignore_unavailable', + 'run_expensive_tasks' + ] + }, + 'indices.downsample': { + path: [ + 'index', + 'target_index' + ], + body: [ + 'config' + ], + query: [] + }, + 'indices.exists': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'flat_settings', + 'ignore_unavailable', + 'include_defaults', + 'local' + ] + }, + 'indices.exists_alias': { + path: [ + 'name', + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout' + ] + }, + 'indices.exists_index_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'local', + 'flat_settings', + 'master_timeout' + ] + }, + 'indices.exists_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'flat_settings', + 'local', + 'master_timeout' + ] + }, + 'indices.explain_data_lifecycle': { + path: [ + 'index' + ], + body: [], + query: [ + 'include_defaults', + 'master_timeout' + ] + }, + 'indices.field_usage_stats': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'fields' + ] + }, + 'indices.flush': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'force', + 'ignore_unavailable', + 'wait_if_ongoing' + ] + }, + 'indices.forcemerge': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'flush', + 'ignore_unavailable', + 'max_num_segments', + 'only_expunge_deletes', + 'wait_for_completion' + ] + }, + 'indices.get': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'flat_settings', + 'ignore_unavailable', + 'include_defaults', + 'local', + 'master_timeout', + 'features' + ] + }, + 'indices.get_alias': { + path: [ + 'name', + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout' + ] + }, + 'indices.get_all_sample_configuration': { + path: [], + body: [], + query: [ + 'master_timeout' + ] + }, + 'indices.get_data_lifecycle': { + path: [ + 'name' + ], + body: [], + query: [ + 'expand_wildcards', + 'include_defaults', + 'master_timeout' + ] + }, + 'indices.get_data_lifecycle_stats': { + path: [], + body: [], + query: [] + }, + 'indices.get_data_stream': { + path: [ + 'name' + ], + body: [], + query: [ + 'expand_wildcards', + 'include_defaults', + 'master_timeout', + 'verbose' + ] + }, + 'indices.get_data_stream_mappings': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'indices.get_data_stream_options': { + path: [ + 'name' + ], + body: [], + query: [ + 'expand_wildcards', + 'master_timeout' + ] + }, + 'indices.get_data_stream_settings': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'indices.get_field_mapping': { + path: [ + 'fields', + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'include_defaults' + ] + }, + 'indices.get_index_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'local', + 'flat_settings', + 'master_timeout', + 'include_defaults' + ] + }, + 'indices.get_mapping': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'local', + 'master_timeout' + ] + }, + 'indices.get_migrate_reindex_status': { + path: [ + 'index' + ], + body: [], + query: [] + }, + 'indices.get_sample': { + path: [ + 'index' + ], + body: [], + query: [] + }, + 'indices.get_sample_configuration': { + path: [ + 'index' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'indices.get_sample_stats': { + path: [ + 'index' + ], + body: [], + query: [] + }, + 'indices.get_settings': { + path: [ + 'index', + 'name' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'flat_settings', + 'ignore_unavailable', + 'include_defaults', + 'local', + 'master_timeout' + ] + }, + 'indices.get_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'flat_settings', + 'local', + 'master_timeout' + ] + }, + 'indices.migrate_reindex': { + path: [], + body: [ + 'reindex' + ], + query: [] + }, + 'indices.migrate_to_data_stream': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'indices.modify_data_stream': { + path: [], + body: [ + 'actions' + ], + query: [] + }, + 'indices.open': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout', + 'timeout', + 'wait_for_active_shards' + ] + }, + 'indices.promote_data_stream': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'indices.put_alias': { + path: [ + 'index', + 'name' + ], + body: [ + 'filter', + 'index_routing', + 'is_write_index', + 'routing', + 'search_routing' + ], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'indices.put_data_lifecycle': { + path: [ + 'name' + ], + body: [ + 'data_retention', + 'downsampling', + 'enabled' + ], + query: [ + 'expand_wildcards', + 'master_timeout', + 'timeout' + ] + }, + 'indices.put_data_stream_mappings': { + path: [ + 'name' + ], + body: [ + 'mappings' + ], + query: [ + 'dry_run', + 'master_timeout', + 'timeout' + ] + }, + 'indices.put_data_stream_options': { + path: [ + 'name' + ], + body: [ + 'failure_store' + ], + query: [ + 'expand_wildcards', + 'master_timeout', + 'timeout' + ] + }, + 'indices.put_data_stream_settings': { + path: [ + 'name' + ], + body: [ + 'settings' + ], + query: [ + 'dry_run', + 'master_timeout', + 'timeout' + ] + }, + 'indices.put_index_template': { + path: [ + 'name' + ], + body: [ + 'index_patterns', + 'composed_of', + 'template', + 'data_stream', + 'priority', + 'version', + '_meta', + 'allow_auto_create', + 'ignore_missing_component_templates', + 'deprecated' + ], + query: [ + 'create', + 'master_timeout', + 'cause' + ] + }, + 'indices.put_mapping': { + path: [ + 'index' + ], + body: [ + 'date_detection', + 'dynamic', + 'dynamic_date_formats', + 'dynamic_templates', + '_field_names', + '_meta', + 'numeric_detection', + 'properties', + '_routing', + '_source', + 'runtime' + ], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout', + 'timeout', + 'write_index_only' + ] + }, + 'indices.put_sample_configuration': { + path: [ + 'index' + ], + body: [ + 'rate', + 'max_samples', + 'max_size', + 'time_to_live', + 'if' + ], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'indices.put_settings': { + path: [ + 'index' + ], + body: [ + 'settings' + ], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'flat_settings', + 'ignore_unavailable', + 'master_timeout', + 'preserve_existing', + 'reopen', + 'timeout' + ] + }, + 'indices.put_template': { + path: [ + 'name' + ], + body: [ + 'aliases', + 'index_patterns', + 'mappings', + 'order', + 'settings', + 'version' + ], + query: [ + 'create', + 'master_timeout', + 'order', + 'cause' + ] + }, + 'indices.recovery': { + path: [ + 'index' + ], + body: [], + query: [ + 'active_only', + 'detailed', + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable' + ] + }, + 'indices.refresh': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable' + ] + }, + 'indices.reload_search_analyzers': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'resource' + ] + }, + 'indices.remove_block': { + path: [ + 'index', + 'block' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout', + 'timeout' + ] + }, + 'indices.resolve_cluster': { + path: [ + 'name' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_throttled', + 'ignore_unavailable', + 'timeout' + ] + }, + 'indices.resolve_index': { + path: [ + 'name' + ], + body: [], + query: [ + 'expand_wildcards', + 'ignore_unavailable', + 'allow_no_indices', + 'mode', + 'project_routing' + ] + }, + 'indices.rollover': { + path: [ + 'alias', + 'new_index' + ], + body: [ + 'aliases', + 'conditions', + 'mappings', + 'settings' + ], + query: [ + 'dry_run', + 'master_timeout', + 'timeout', + 'wait_for_active_shards', + 'lazy' + ] + }, + 'indices.segments': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable' + ] + }, + 'indices.shard_stores': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'status' + ] + }, + 'indices.shrink': { + path: [ + 'index', + 'target' + ], + body: [ + 'aliases', + 'settings' + ], + query: [ + 'master_timeout', + 'timeout', + 'wait_for_active_shards' + ] + }, + 'indices.simulate_index_template': { + path: [ + 'name' + ], + body: [ + 'index_template' + ], + query: [ + 'create', + 'cause', + 'master_timeout', + 'include_defaults' + ] + }, + 'indices.simulate_template': { + path: [ + 'name' + ], + body: [ + 'allow_auto_create', + 'index_patterns', + 'composed_of', + 'template', + 'data_stream', + 'priority', + 'version', + '_meta', + 'ignore_missing_component_templates', + 'deprecated' + ], + query: [ + 'create', + 'cause', + 'master_timeout', + 'include_defaults' + ] + }, + 'indices.split': { + path: [ + 'index', + 'target' + ], + body: [ + 'aliases', + 'settings' + ], + query: [ + 'master_timeout', + 'timeout', + 'wait_for_active_shards' + ] + }, + 'indices.stats': { + path: [ + 'metric', + 'index' + ], + body: [], + query: [ + 'completion_fields', + 'expand_wildcards', + 'fielddata_fields', + 'fields', + 'forbid_closed_indices', + 'groups', + 'include_segment_file_sizes', + 'include_unloaded_segments', + 'level' + ] + }, + 'indices.update_aliases': { + path: [], + body: [ + 'actions' + ], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'indices.validate_query': { + path: [ + 'index' + ], + body: [ + 'query' + ], + query: [ + 'allow_no_indices', + 'all_shards', + 'analyzer', + 'analyze_wildcard', + 'default_operator', + 'df', + 'expand_wildcards', + 'explain', + 'ignore_unavailable', + 'lenient', + 'rewrite', + 'q' + ] + } + } + } + + /** + * Add an index block. Add an index block to an index. Index blocks limit the operations allowed on an index by blocking specific operation types. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-add-block | Elasticsearch API documentation} + */ + async addBlock (this: That, params: T.IndicesAddBlockRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async addBlock (this: That, params: T.IndicesAddBlockRequest, options?: TransportRequestOptionsWithMeta): Promise> + async addBlock (this: That, params: T.IndicesAddBlockRequest, options?: TransportRequestOptions): Promise + async addBlock (this: That, params: T.IndicesAddBlockRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.add_block'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/${encodeURIComponent(params.index.toString())}/_block/${encodeURIComponent(params.block.toString())}` + const meta: TransportRequestMetadata = { + name: 'indices.add_block', + pathParts: { + index: params.index, + block: params.block + }, + acceptedParams: [ + 'index', + 'block', + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get tokens from text analysis. The analyze API performs analysis on a text string and returns the resulting tokens. Generating excessive amount of tokens may cause a node to run out of memory. The `index.analyze.max_token_count` setting enables you to limit the number of tokens that can be produced. If more than this limit of tokens gets generated, an error occurs. The `_analyze` endpoint without a specified index will always use `10000` as its limit. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-analyze | Elasticsearch API documentation} + */ + async analyze (this: That, params?: T.IndicesAnalyzeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async analyze (this: That, params?: T.IndicesAnalyzeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async analyze (this: That, params?: T.IndicesAnalyzeRequest, options?: TransportRequestOptions): Promise + async analyze (this: That, params?: T.IndicesAnalyzeRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['indices.analyze'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = body != null ? 'POST' : 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_analyze` + } else { + method = body != null ? 'POST' : 'GET' + path = '/_analyze' + } + const meta: TransportRequestMetadata = { + name: 'indices.analyze', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'analyzer', + 'attributes', + 'char_filter', + 'explain', + 'field', + 'filter', + 'normalizer', + 'text', + 'tokenizer', + 'index' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Cancel a migration reindex operation. Cancel a migration reindex attempt for a data stream or index. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-cancel-migrate-reindex | Elasticsearch API documentation} + */ + async cancelMigrateReindex (this: That, params: T.IndicesCancelMigrateReindexRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async cancelMigrateReindex (this: That, params: T.IndicesCancelMigrateReindexRequest, options?: TransportRequestOptionsWithMeta): Promise> + async cancelMigrateReindex (this: That, params: T.IndicesCancelMigrateReindexRequest, options?: TransportRequestOptions): Promise + async cancelMigrateReindex (this: That, params: T.IndicesCancelMigrateReindexRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.cancel_migrate_reindex'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/_migration/reindex/${encodeURIComponent(params.index.toString())}/_cancel` + const meta: TransportRequestMetadata = { + name: 'indices.cancel_migrate_reindex', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Clear the cache. Clear the cache of one or more indices. For data streams, the API clears the caches of the stream's backing indices. By default, the clear cache API clears all caches. To clear only specific caches, use the `fielddata`, `query`, or `request` parameters. To clear the cache only of specific fields, use the `fields` parameter. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-clear-cache | Elasticsearch API documentation} + */ + async clearCache (this: That, params?: T.IndicesClearCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async clearCache (this: That, params?: T.IndicesClearCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> + async clearCache (this: That, params?: T.IndicesClearCacheRequest, options?: TransportRequestOptions): Promise + async clearCache (this: That, params?: T.IndicesClearCacheRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.clear_cache'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = 'POST' + path = `/${encodeURIComponent(params.index.toString())}/_cache/clear` + } else { + method = 'POST' + path = '/_cache/clear' + } + const meta: TransportRequestMetadata = { + name: 'indices.clear_cache', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'index', + 'allow_no_indices', + 'expand_wildcards', + 'fielddata', + 'fields', + 'ignore_unavailable', + 'query', + 'request' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Clone an index. Clone an existing index into a new index. Each original primary shard is cloned into a new primary shard in the new index. IMPORTANT: Elasticsearch does not apply index templates to the resulting index. The API also does not copy index metadata from the original index. Index metadata includes aliases, index lifecycle management phase definitions, and cross-cluster replication (CCR) follower information. For example, if you clone a CCR follower index, the resulting clone will not be a follower index. The clone API copies most index settings from the source index to the resulting index, with the exception of `index.number_of_replicas` and `index.auto_expand_replicas`. To set the number of replicas in the resulting index, configure these settings in the clone request. Cloning works as follows: * First, it creates a new target index with the same definition as the source index. * Then it hard-links segments from the source index into the target index. If the file system does not support hard-linking, all segments are copied into the new index, which is a much more time consuming process. * Finally, it recovers the target index as though it were a closed index which had just been re-opened. IMPORTANT: Indices can only be cloned if they meet the following requirements: * The index must be marked as read-only and have a cluster health status of green. * The target index must not exist. * The source index must have the same number of primary shards as the target index. * The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing index. The current write index on a data stream cannot be cloned. In order to clone the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be cloned. NOTE: Mappings cannot be specified in the `_clone` request. The mappings of the source index will be used for the target index. **Monitor the cloning process** The cloning process can be monitored with the cat recovery API or the cluster health API can be used to wait until all primary shards have been allocated by setting the `wait_for_status` parameter to `yellow`. The `_clone` API returns as soon as the target index has been added to the cluster state, before any shards have been allocated. At this point, all shards are in the state unassigned. If, for any reason, the target index can't be allocated, its primary shard will remain unassigned until it can be allocated on that node. Once the primary shard is allocated, it moves to state initializing, and the clone process begins. When the clone operation completes, the shard will become active. At that point, Elasticsearch will try to allocate any replicas and may decide to relocate the primary shard to another node. **Wait for active shards** Because the clone operation creates a new index to clone the shards to, the wait for active shards setting on index creation applies to the clone index action as well. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-clone | Elasticsearch API documentation} + */ + async clone (this: That, params: T.IndicesCloneRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async clone (this: That, params: T.IndicesCloneRequest, options?: TransportRequestOptionsWithMeta): Promise> + async clone (this: That, params: T.IndicesCloneRequest, options?: TransportRequestOptions): Promise + async clone (this: That, params: T.IndicesCloneRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['indices.clone'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/${encodeURIComponent(params.index.toString())}/_clone/${encodeURIComponent(params.target.toString())}` + const meta: TransportRequestMetadata = { + name: 'indices.clone', + pathParts: { + index: params.index, + target: params.target + }, + acceptedParams: [ + 'index', + 'target', + 'aliases', + 'settings', + 'master_timeout', + 'timeout', + 'wait_for_active_shards' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Close an index. A closed index is blocked for read or write operations and does not allow all operations that opened indices allow. It is not possible to index documents or to search for documents in a closed index. Closed indices do not have to maintain internal data structures for indexing or searching documents, which results in a smaller overhead on the cluster. When opening or closing an index, the master node is responsible for restarting the index shards to reflect the new state of the index. The shards will then go through the normal recovery process. The data of opened and closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times. You can open and close multiple indices. An error is thrown if the request explicitly refers to a missing index. This behaviour can be turned off using the `ignore_unavailable=true` parameter. By default, you must explicitly name the indices you are opening or closing. To open or close indices with `_all`, `*`, or other wildcard expressions, change the` action.destructive_requires_name` setting to `false`. This setting can also be changed with the cluster update settings API. Closed indices consume a significant amount of disk-space which can cause problems in managed environments. Closing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-close | Elasticsearch API documentation} + */ + async close (this: That, params: T.IndicesCloseRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async close (this: That, params: T.IndicesCloseRequest, options?: TransportRequestOptionsWithMeta): Promise> + async close (this: That, params: T.IndicesCloseRequest, options?: TransportRequestOptions): Promise + async close (this: That, params: T.IndicesCloseRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.close'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/${encodeURIComponent(params.index.toString())}/_close` + const meta: TransportRequestMetadata = { + name: 'indices.close', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout', + 'timeout', + 'wait_for_active_shards' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create an index. You can use the create index API to add a new index to an Elasticsearch cluster. When creating an index, you can specify the following: * Settings for the index. * Mappings for fields in the index. * Index aliases **Wait for active shards** By default, index creation will only return a response to the client when the primary copies of each shard have been started, or the request times out. The index creation response will indicate what happened. For example, `acknowledged` indicates whether the index was successfully created in the cluster, `while shards_acknowledged` indicates whether the requisite number of shard copies were started for each shard in the index before timing out. Note that it is still possible for either `acknowledged` or `shards_acknowledged` to be `false`, but for the index creation to be successful. These values simply indicate whether the operation completed before the timeout. If `acknowledged` is false, the request timed out before the cluster state was updated with the newly created index, but it probably will be created sometime soon. If `shards_acknowledged` is false, then the request timed out before the requisite number of shards were started (by default just the primaries), even if the cluster state was successfully updated to reflect the newly created index (that is to say, `acknowledged` is `true`). You can change the default of only waiting for the primary shards to start through the index setting `index.write.wait_for_active_shards`. Note that changing this setting will also affect the `wait_for_active_shards` value on all subsequent write operations. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create | Elasticsearch API documentation} + */ + async create (this: That, params: T.IndicesCreateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async create (this: That, params: T.IndicesCreateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async create (this: That, params: T.IndicesCreateRequest, options?: TransportRequestOptions): Promise + async create (this: That, params: T.IndicesCreateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['indices.create'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/${encodeURIComponent(params.index.toString())}` + const meta: TransportRequestMetadata = { + name: 'indices.create', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'aliases', + 'mappings', + 'settings', + 'master_timeout', + 'timeout', + 'wait_for_active_shards' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create a data stream. You must have a matching index template with data stream enabled. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create-data-stream | Elasticsearch API documentation} + */ + async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> + async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest, options?: TransportRequestOptions): Promise + async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.create_data_stream'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_data_stream/${encodeURIComponent(params.name.toString())}` + const meta: TransportRequestMetadata = { + name: 'indices.create_data_stream', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create an index from a source index. Copy the mappings and settings from the source index to a destination index while allowing request settings and mappings to override the source values. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create-from | Elasticsearch API documentation} + */ + async createFrom (this: That, params: T.IndicesCreateFromRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async createFrom (this: That, params: T.IndicesCreateFromRequest, options?: TransportRequestOptionsWithMeta): Promise> + async createFrom (this: That, params: T.IndicesCreateFromRequest, options?: TransportRequestOptions): Promise + async createFrom (this: That, params: T.IndicesCreateFromRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['indices.create_from'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: any = params.body ?? undefined + for (const key in params) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_create_from/${encodeURIComponent(params.source.toString())}/${encodeURIComponent(params.dest.toString())}` + const meta: TransportRequestMetadata = { + name: 'indices.create_from', + pathParts: { + source: params.source, + dest: params.dest + }, + acceptedParams: [ + 'source', + 'dest', + 'create_from' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get data stream stats. Get statistics for one or more data streams. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-data-streams-stats-1 | Elasticsearch API documentation} + */ + async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest, options?: TransportRequestOptions): Promise + async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.data_streams_stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.name != null) { + method = 'GET' + path = `/_data_stream/${encodeURIComponent(params.name.toString())}/_stats` + } else { + method = 'GET' + path = '/_data_stream/_stats' + } + const meta: TransportRequestMetadata = { + name: 'indices.data_streams_stats', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'expand_wildcards' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Delete indices. Deleting an index deletes its documents, shards, and metadata. It does not delete related Kibana components, such as data views, visualizations, or dashboards. You cannot delete the current write index of a data stream. To delete the index, you must roll over the data stream so a new write index is created. You can then use the delete index API to delete the previous write index. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete | Elasticsearch API documentation} + */ + async delete (this: That, params: T.IndicesDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async delete (this: That, params: T.IndicesDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> + async delete (this: That, params: T.IndicesDeleteRequest, options?: TransportRequestOptions): Promise + async delete (this: That, params: T.IndicesDeleteRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.delete'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/${encodeURIComponent(params.index.toString())}` + const meta: TransportRequestMetadata = { + name: 'indices.delete', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Delete an alias. Removes a data stream or index from an alias. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-alias | Elasticsearch API documentation} + */ + async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest, options?: TransportRequestOptions): Promise + async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.delete_alias'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null && params.name != null) { + method = 'DELETE' + path = `/${encodeURIComponent(params.index.toString())}/_alias/${encodeURIComponent(params.name.toString())}` + } else { + method = 'DELETE' + path = `/${encodeURIComponent(params.index.toString())}/_aliases/${encodeURIComponent(params.name.toString())}` + } + const meta: TransportRequestMetadata = { + name: 'indices.delete_alias', + pathParts: { + index: params.index, + name: params.name + }, + acceptedParams: [ + 'index', + 'name', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Delete data stream lifecycles. Removes the data stream lifecycle from a data stream, rendering it not managed by the data stream lifecycle. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-data-lifecycle | Elasticsearch API documentation} + */ + async deleteDataLifecycle (this: That, params: T.IndicesDeleteDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteDataLifecycle (this: That, params: T.IndicesDeleteDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteDataLifecycle (this: That, params: T.IndicesDeleteDataLifecycleRequest, options?: TransportRequestOptions): Promise + async deleteDataLifecycle (this: That, params: T.IndicesDeleteDataLifecycleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.delete_data_lifecycle'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_data_stream/${encodeURIComponent(params.name.toString())}/_lifecycle` + const meta: TransportRequestMetadata = { + name: 'indices.delete_data_lifecycle', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'expand_wildcards', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Delete data streams. Deletes one or more data streams and their backing indices. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-data-stream | Elasticsearch API documentation} + */ + async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest, options?: TransportRequestOptions): Promise + async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.delete_data_stream'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_data_stream/${encodeURIComponent(params.name.toString())}` + const meta: TransportRequestMetadata = { + name: 'indices.delete_data_stream', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'master_timeout', + 'expand_wildcards' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Delete data stream options. Removes the data stream options from a data stream. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-data-stream-options | Elasticsearch API documentation} + */ + async deleteDataStreamOptions (this: That, params: T.IndicesDeleteDataStreamOptionsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteDataStreamOptions (this: That, params: T.IndicesDeleteDataStreamOptionsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteDataStreamOptions (this: That, params: T.IndicesDeleteDataStreamOptionsRequest, options?: TransportRequestOptions): Promise + async deleteDataStreamOptions (this: That, params: T.IndicesDeleteDataStreamOptionsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.delete_data_stream_options'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_data_stream/${encodeURIComponent(params.name.toString())}/_options` + const meta: TransportRequestMetadata = { + name: 'indices.delete_data_stream_options', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'expand_wildcards', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Delete an index template. The provided may contain multiple template names separated by a comma. If multiple template names are specified then there is no wildcard support and the provided names should match completely with existing templates. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-index-template | Elasticsearch API documentation} + */ + async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptions): Promise + async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.delete_index_template'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_index_template/${encodeURIComponent(params.name.toString())}` + const meta: TransportRequestMetadata = { + name: 'indices.delete_index_template', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Delete sampling configuration. Delete the sampling configuration for the specified index. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/group/ingest-random-sampling | Elasticsearch API documentation} + */ + async deleteSampleConfiguration (this: That, params: T.IndicesDeleteSampleConfigurationRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteSampleConfiguration (this: That, params: T.IndicesDeleteSampleConfigurationRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteSampleConfiguration (this: That, params: T.IndicesDeleteSampleConfigurationRequest, options?: TransportRequestOptions): Promise + async deleteSampleConfiguration (this: That, params: T.IndicesDeleteSampleConfigurationRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.delete_sample_configuration'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/${encodeURIComponent(params.index.toString())}/_sample/config` + const meta: TransportRequestMetadata = { + name: 'indices.delete_sample_configuration', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Delete a legacy index template. IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-template | Elasticsearch API documentation} + */ + async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest, options?: TransportRequestOptions): Promise + async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.delete_template'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_template/${encodeURIComponent(params.name.toString())}` + const meta: TransportRequestMetadata = { + name: 'indices.delete_template', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Analyze the index disk usage. Analyze the disk usage of each field of an index or data stream. This API might not support indices created in previous Elasticsearch versions. The result of a small index can be inaccurate as some parts of an index might not be analyzed by the API. NOTE: The total size of fields of the analyzed shards of the index in the response is usually smaller than the index `store_size` value because some small metadata files are ignored and some parts of data files might not be scanned by the API. Since stored fields are stored together in a compressed format, the sizes of stored fields are also estimates and can be inaccurate. The stored size of the `_id` field is likely underestimated while the `_source` field is overestimated. For usage examples see the External documentation or refer to [Analyze the index disk usage example](https://www.elastic.co/docs/reference/elasticsearch/rest-apis/index-disk-usage) for an example. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-disk-usage | Elasticsearch API documentation} + */ + async diskUsage (this: That, params: T.IndicesDiskUsageRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async diskUsage (this: That, params: T.IndicesDiskUsageRequest, options?: TransportRequestOptionsWithMeta): Promise> + async diskUsage (this: That, params: T.IndicesDiskUsageRequest, options?: TransportRequestOptions): Promise + async diskUsage (this: That, params: T.IndicesDiskUsageRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.disk_usage'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/${encodeURIComponent(params.index.toString())}/_disk_usage` + const meta: TransportRequestMetadata = { + name: 'indices.disk_usage', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'allow_no_indices', + 'expand_wildcards', + 'flush', + 'ignore_unavailable', + 'run_expensive_tasks' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Downsample an index. Downsamples a time series (TSDS) index and reduces its size by keeping the last value or by pre-aggregating metrics: - When running in `aggregate` mode, it pre-calculates and stores statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped by a configured time interval and their dimensions. - When running in `last_value` mode, it keeps the last value for each metric in the configured interval and their dimensions. For example, a TSDS index that contains metrics sampled every 10 seconds can be downsampled to an hourly index. All documents within an hour interval are summarized and stored as a single document in the downsample index. NOTE: Only indices in a time series data stream are supported. Neither field nor document level security can be defined on the source index. The source index must be read-only (`index.blocks.write: true`). + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-downsample | Elasticsearch API documentation} + */ + async downsample (this: That, params: T.IndicesDownsampleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async downsample (this: That, params: T.IndicesDownsampleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async downsample (this: That, params: T.IndicesDownsampleRequest, options?: TransportRequestOptions): Promise + async downsample (this: That, params: T.IndicesDownsampleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['indices.downsample'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: any = params.body ?? undefined + for (const key in params) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/${encodeURIComponent(params.index.toString())}/_downsample/${encodeURIComponent(params.target_index.toString())}` + const meta: TransportRequestMetadata = { + name: 'indices.downsample', + pathParts: { + index: params.index, + target_index: params.target_index + }, + acceptedParams: [ + 'index', + 'target_index', + 'config' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Check indices. Check if one or more indices, index aliases, or data streams exist. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists | Elasticsearch API documentation} + */ + async exists (this: That, params: T.IndicesExistsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async exists (this: That, params: T.IndicesExistsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async exists (this: That, params: T.IndicesExistsRequest, options?: TransportRequestOptions): Promise + async exists (this: That, params: T.IndicesExistsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.exists'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'HEAD' + const path = `/${encodeURIComponent(params.index.toString())}` + const meta: TransportRequestMetadata = { + name: 'indices.exists', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'allow_no_indices', + 'expand_wildcards', + 'flat_settings', + 'ignore_unavailable', + 'include_defaults', + 'local' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Check aliases. Check if one or more data stream or index aliases exist. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-alias | Elasticsearch API documentation} + */ + async existsAlias (this: That, params: T.IndicesExistsAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async existsAlias (this: That, params: T.IndicesExistsAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> + async existsAlias (this: That, params: T.IndicesExistsAliasRequest, options?: TransportRequestOptions): Promise + async existsAlias (this: That, params: T.IndicesExistsAliasRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.exists_alias'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null && params.name != null) { + method = 'HEAD' + path = `/${encodeURIComponent(params.index.toString())}/_alias/${encodeURIComponent(params.name.toString())}` + } else { + method = 'HEAD' + path = `/_alias/${encodeURIComponent(params.name.toString())}` + } + const meta: TransportRequestMetadata = { + name: 'indices.exists_alias', + pathParts: { + name: params.name, + index: params.index + }, + acceptedParams: [ + 'name', + 'index', + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Check index templates. Check whether index templates exist. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-index-template | Elasticsearch API documentation} + */ + async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptions): Promise + async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.exists_index_template'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'HEAD' + const path = `/_index_template/${encodeURIComponent(params.name.toString())}` + const meta: TransportRequestMetadata = { + name: 'indices.exists_index_template', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'local', + 'flat_settings', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Check existence of index templates. Get information about whether index templates exist. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-template | Elasticsearch API documentation} + */ + async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest, options?: TransportRequestOptions): Promise + async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.exists_template'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'HEAD' + const path = `/_template/${encodeURIComponent(params.name.toString())}` + const meta: TransportRequestMetadata = { + name: 'indices.exists_template', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'flat_settings', + 'local', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get the status for a data stream lifecycle. Get information about an index or data stream's current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-explain-data-lifecycle | Elasticsearch API documentation} + */ + async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptions): Promise + async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.explain_data_lifecycle'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/${encodeURIComponent(params.index.toString())}/_lifecycle/explain` + const meta: TransportRequestMetadata = { + name: 'indices.explain_data_lifecycle', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'include_defaults', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get field usage stats. Get field usage information for each shard and field of an index. Field usage statistics are automatically captured when queries are running on a cluster. A shard-level search request that accesses a given field, even if multiple times during that request, is counted as a single use. The response body reports the per-shard usage count of the data structures that back the fields in the index. A given request will increment each count by a maximum value of 1, even if the request accesses the same field multiple times. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-field-usage-stats | Elasticsearch API documentation} + */ + async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest, options?: TransportRequestOptions): Promise + async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.field_usage_stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/${encodeURIComponent(params.index.toString())}/_field_usage_stats` + const meta: TransportRequestMetadata = { + name: 'indices.field_usage_stats', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'fields' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Flush data streams or indices. Flushing a data stream or index is the process of making sure that any data that is currently only stored in the transaction log is also permanently stored in the Lucene index. When restarting, Elasticsearch replays any unflushed operations from the transaction log into the Lucene index to bring it back into the state that it was in before the restart. Elasticsearch automatically triggers flushes as needed, using heuristics that trade off the size of the unflushed transaction log against the cost of performing each flush. After each operation has been flushed it is permanently stored in the Lucene index. This may mean that there is no need to maintain an additional copy of it in the transaction log. The transaction log is made up of multiple files, called generations, and Elasticsearch will delete any generation files when they are no longer needed, freeing up disk space. It is also possible to trigger a flush on one or more indices using the flush API, although it is rare for users to need to call this API directly. If you call the flush API after indexing some documents then a successful response indicates that Elasticsearch has flushed all the documents that were indexed before the flush API was called. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-flush | Elasticsearch API documentation} + */ + async flush (this: That, params?: T.IndicesFlushRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async flush (this: That, params?: T.IndicesFlushRequest, options?: TransportRequestOptionsWithMeta): Promise> + async flush (this: That, params?: T.IndicesFlushRequest, options?: TransportRequestOptions): Promise + async flush (this: That, params?: T.IndicesFlushRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.flush'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = body != null ? 'POST' : 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_flush` + } else { + method = body != null ? 'POST' : 'GET' + path = '/_flush' + } + const meta: TransportRequestMetadata = { + name: 'indices.flush', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'allow_no_indices', + 'expand_wildcards', + 'force', + 'ignore_unavailable', + 'wait_if_ongoing' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Force a merge. Perform the force merge operation on the shards of one or more indices. For data streams, the API forces a merge on the shards of the stream's backing indices. Merging reduces the number of segments in each shard by merging some of them together and also frees up the space used by deleted documents. Merging normally happens automatically, but sometimes it is useful to trigger a merge manually. WARNING: We recommend force merging only a read-only index (meaning the index is no longer receiving writes). When documents are updated or deleted, the old version is not immediately removed but instead soft-deleted and marked with a "tombstone". These soft-deleted documents are automatically cleaned up during regular segment merges. But force merge can cause very large (greater than 5 GB) segments to be produced, which are not eligible for regular merges. So the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance. If you regularly force merge an index receiving writes, this can also make snapshots more expensive, since the new documents can't be backed up incrementally. **Blocks during a force merge** Calls to this API block until the merge is complete (unless request contains `wait_for_completion=false`). If the client connection is lost before completion then the force merge process will continue in the background. Any new requests to force merge the same indices will also block until the ongoing force merge is complete. **Running force merge asynchronously** If the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to get the status of the task. However, you can not cancel this task as the force merge task is not cancelable. Elasticsearch creates a record of this task as a document at `_tasks/`. When you are done with a task, you should delete the task document so Elasticsearch can reclaim the space. **Force merging multiple indices** You can force merge multiple indices with a single request by targeting: * One or more data streams that contain multiple backing indices * Multiple indices * One or more aliases * All data streams and indices in a cluster Each targeted shard is force-merged separately using the force_merge threadpool. By default each node only has a single `force_merge` thread which means that the shards on that node are force-merged one at a time. If you expand the `force_merge` threadpool on a node then it will force merge its shards in parallel Force merge makes the storage for the shard being merged temporarily increase, as it may require free space up to triple its size in case `max_num_segments parameter` is set to `1`, to rewrite all segments into a new one. **Data streams and time-based indices** Force-merging is useful for managing a data stream's older backing indices and other time-based indices, particularly after a rollover. In these cases, each index only receives indexing traffic for a certain period of time. Once an index receive no more writes, its shards can be force-merged to a single segment. This can be a good idea because single-segment shards can sometimes use simpler and more efficient data structures to perform searches. For example: ``` POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1 ``` + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-forcemerge | Elasticsearch API documentation} + */ + async forcemerge (this: That, params?: T.IndicesForcemergeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async forcemerge (this: That, params?: T.IndicesForcemergeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async forcemerge (this: That, params?: T.IndicesForcemergeRequest, options?: TransportRequestOptions): Promise + async forcemerge (this: That, params?: T.IndicesForcemergeRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.forcemerge'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = 'POST' + path = `/${encodeURIComponent(params.index.toString())}/_forcemerge` + } else { + method = 'POST' + path = '/_forcemerge' + } + const meta: TransportRequestMetadata = { + name: 'indices.forcemerge', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'allow_no_indices', + 'expand_wildcards', + 'flush', + 'ignore_unavailable', + 'max_num_segments', + 'only_expunge_deletes', + 'wait_for_completion' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get index information. Get information about one or more indices. For data streams, the API returns information about the stream’s backing indices. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get | Elasticsearch API documentation} + */ + async get (this: That, params: T.IndicesGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async get (this: That, params: T.IndicesGetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async get (this: That, params: T.IndicesGetRequest, options?: TransportRequestOptions): Promise + async get (this: That, params: T.IndicesGetRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.get'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/${encodeURIComponent(params.index.toString())}` + const meta: TransportRequestMetadata = { + name: 'indices.get', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'allow_no_indices', + 'expand_wildcards', + 'flat_settings', + 'ignore_unavailable', + 'include_defaults', + 'local', + 'master_timeout', + 'features' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get aliases. Retrieves information for one or more data stream or index aliases. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-alias | Elasticsearch API documentation} + */ + async getAlias (this: That, params?: T.IndicesGetAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getAlias (this: That, params?: T.IndicesGetAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getAlias (this: That, params?: T.IndicesGetAliasRequest, options?: TransportRequestOptions): Promise + async getAlias (this: That, params?: T.IndicesGetAliasRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.get_alias'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null && params.name != null) { + method = 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_alias/${encodeURIComponent(params.name.toString())}` + } else if (params.name != null) { + method = 'GET' + path = `/_alias/${encodeURIComponent(params.name.toString())}` + } else if (params.index != null) { + method = 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_alias` + } else { + method = 'GET' + path = '/_alias' + } + const meta: TransportRequestMetadata = { + name: 'indices.get_alias', + pathParts: { + name: params.name, + index: params.index + }, + acceptedParams: [ + 'name', + 'index', + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get all sampling configurations. Get the sampling configurations for all indices. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/group/ingest-random-sampling | Elasticsearch API documentation} + */ + async getAllSampleConfiguration (this: That, params?: T.IndicesGetAllSampleConfigurationRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getAllSampleConfiguration (this: That, params?: T.IndicesGetAllSampleConfigurationRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getAllSampleConfiguration (this: That, params?: T.IndicesGetAllSampleConfigurationRequest, options?: TransportRequestOptions): Promise + async getAllSampleConfiguration (this: That, params?: T.IndicesGetAllSampleConfigurationRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.get_all_sample_configuration'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_sample/config' + const meta: TransportRequestMetadata = { + name: 'indices.get_all_sample_configuration', + acceptedParams: [ + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get data stream lifecycles. Get the data stream lifecycle configuration of one or more data streams. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-lifecycle | Elasticsearch API documentation} + */ + async getDataLifecycle (this: That, params: T.IndicesGetDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getDataLifecycle (this: That, params: T.IndicesGetDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getDataLifecycle (this: That, params: T.IndicesGetDataLifecycleRequest, options?: TransportRequestOptions): Promise + async getDataLifecycle (this: That, params: T.IndicesGetDataLifecycleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.get_data_lifecycle'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_data_stream/${encodeURIComponent(params.name.toString())}/_lifecycle` + const meta: TransportRequestMetadata = { + name: 'indices.get_data_lifecycle', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'expand_wildcards', + 'include_defaults', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get data stream lifecycle stats. Get statistics about the data streams that are managed by a data stream lifecycle. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-lifecycle-stats | Elasticsearch API documentation} + */ + async getDataLifecycleStats (this: That, params?: T.IndicesGetDataLifecycleStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getDataLifecycleStats (this: That, params?: T.IndicesGetDataLifecycleStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getDataLifecycleStats (this: That, params?: T.IndicesGetDataLifecycleStatsRequest, options?: TransportRequestOptions): Promise + async getDataLifecycleStats (this: That, params?: T.IndicesGetDataLifecycleStatsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.get_data_lifecycle_stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_lifecycle/stats' + const meta: TransportRequestMetadata = { + name: 'indices.get_data_lifecycle_stats', + acceptedParams: [ + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get data streams. Get information about one or more data streams. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-stream | Elasticsearch API documentation} + */ + async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest, options?: TransportRequestOptions): Promise + async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.get_data_stream'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.name != null) { + method = 'GET' + path = `/_data_stream/${encodeURIComponent(params.name.toString())}` + } else { + method = 'GET' + path = '/_data_stream' + } + const meta: TransportRequestMetadata = { + name: 'indices.get_data_stream', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'expand_wildcards', + 'include_defaults', + 'master_timeout', + 'verbose' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get data stream mappings. Get mapping information for one or more data streams. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-stream-mappings | Elasticsearch API documentation} + */ + async getDataStreamMappings (this: That, params: T.IndicesGetDataStreamMappingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getDataStreamMappings (this: That, params: T.IndicesGetDataStreamMappingsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getDataStreamMappings (this: That, params: T.IndicesGetDataStreamMappingsRequest, options?: TransportRequestOptions): Promise + async getDataStreamMappings (this: That, params: T.IndicesGetDataStreamMappingsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.get_data_stream_mappings'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_data_stream/${encodeURIComponent(params.name.toString())}/_mappings` + const meta: TransportRequestMetadata = { + name: 'indices.get_data_stream_mappings', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get data stream options. Get the data stream options configuration of one or more data streams. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-stream-options | Elasticsearch API documentation} + */ + async getDataStreamOptions (this: That, params: T.IndicesGetDataStreamOptionsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getDataStreamOptions (this: That, params: T.IndicesGetDataStreamOptionsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getDataStreamOptions (this: That, params: T.IndicesGetDataStreamOptionsRequest, options?: TransportRequestOptions): Promise + async getDataStreamOptions (this: That, params: T.IndicesGetDataStreamOptionsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.get_data_stream_options'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_data_stream/${encodeURIComponent(params.name.toString())}/_options` + const meta: TransportRequestMetadata = { + name: 'indices.get_data_stream_options', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'expand_wildcards', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get data stream settings. Get setting information for one or more data streams. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-stream-settings | Elasticsearch API documentation} + */ + async getDataStreamSettings (this: That, params: T.IndicesGetDataStreamSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getDataStreamSettings (this: That, params: T.IndicesGetDataStreamSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getDataStreamSettings (this: That, params: T.IndicesGetDataStreamSettingsRequest, options?: TransportRequestOptions): Promise + async getDataStreamSettings (this: That, params: T.IndicesGetDataStreamSettingsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.get_data_stream_settings'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_data_stream/${encodeURIComponent(params.name.toString())}/_settings` + const meta: TransportRequestMetadata = { + name: 'indices.get_data_stream_settings', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get mapping definitions. Retrieves mapping definitions for one or more fields. For data streams, the API retrieves field mappings for the stream’s backing indices. This API is useful if you don't need a complete mapping or if an index mapping contains a large number of fields. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping | Elasticsearch API documentation} + */ + async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest, options?: TransportRequestOptions): Promise + async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.get_field_mapping'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null && params.fields != null) { + method = 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_mapping/field/${encodeURIComponent(params.fields.toString())}` + } else { + method = 'GET' + path = `/_mapping/field/${encodeURIComponent(params.fields.toString())}` + } + const meta: TransportRequestMetadata = { + name: 'indices.get_field_mapping', + pathParts: { + fields: params.fields, + index: params.index + }, + acceptedParams: [ + 'fields', + 'index', + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'include_defaults' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get index templates. Get information about one or more index templates. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-index-template | Elasticsearch API documentation} + */ + async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest, options?: TransportRequestOptions): Promise + async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.get_index_template'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.name != null) { + method = 'GET' + path = `/_index_template/${encodeURIComponent(params.name.toString())}` + } else { + method = 'GET' + path = '/_index_template' + } + const meta: TransportRequestMetadata = { + name: 'indices.get_index_template', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'local', + 'flat_settings', + 'master_timeout', + 'include_defaults' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get mapping definitions. For data streams, the API retrieves mappings for the stream’s backing indices. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping | Elasticsearch API documentation} + */ + async getMapping (this: That, params?: T.IndicesGetMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getMapping (this: That, params?: T.IndicesGetMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getMapping (this: That, params?: T.IndicesGetMappingRequest, options?: TransportRequestOptions): Promise + async getMapping (this: That, params?: T.IndicesGetMappingRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.get_mapping'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_mapping` + } else { + method = 'GET' + path = '/_mapping' + } + const meta: TransportRequestMetadata = { + name: 'indices.get_mapping', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'local', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get the migration reindexing status. Get the status of a migration reindex attempt for a data stream or index. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-migration | Elasticsearch API documentation} + */ + async getMigrateReindexStatus (this: That, params: T.IndicesGetMigrateReindexStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getMigrateReindexStatus (this: That, params: T.IndicesGetMigrateReindexStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getMigrateReindexStatus (this: That, params: T.IndicesGetMigrateReindexStatusRequest, options?: TransportRequestOptions): Promise + async getMigrateReindexStatus (this: That, params: T.IndicesGetMigrateReindexStatusRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.get_migrate_reindex_status'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_migration/reindex/${encodeURIComponent(params.index.toString())}/_status` + const meta: TransportRequestMetadata = { + name: 'indices.get_migrate_reindex_status', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Request for a random sample of raw documents ingested into the given index or data stream. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/group/ingest-random-sampling | Elasticsearch API documentation} + */ + async getSample (this: That, params: T.IndicesGetSampleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getSample (this: That, params: T.IndicesGetSampleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getSample (this: That, params: T.IndicesGetSampleRequest, options?: TransportRequestOptions): Promise + async getSample (this: That, params: T.IndicesGetSampleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.get_sample'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/${encodeURIComponent(params.index.toString())}/_sample` + const meta: TransportRequestMetadata = { + name: 'indices.get_sample', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get sampling configuration. Get the sampling configuration for the specified index. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/group/ingest-random-sampling | Elasticsearch API documentation} + */ + async getSampleConfiguration (this: That, params: T.IndicesGetSampleConfigurationRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getSampleConfiguration (this: That, params: T.IndicesGetSampleConfigurationRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getSampleConfiguration (this: That, params: T.IndicesGetSampleConfigurationRequest, options?: TransportRequestOptions): Promise + async getSampleConfiguration (this: That, params: T.IndicesGetSampleConfigurationRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.get_sample_configuration'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/${encodeURIComponent(params.index.toString())}/_sample/config` + const meta: TransportRequestMetadata = { + name: 'indices.get_sample_configuration', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Request stats for a random sample of raw documents ingested into the given index or data stream. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/group/ingest-random-sampling | Elasticsearch API documentation} + */ + async getSampleStats (this: That, params: T.IndicesGetSampleStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getSampleStats (this: That, params: T.IndicesGetSampleStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getSampleStats (this: That, params: T.IndicesGetSampleStatsRequest, options?: TransportRequestOptions): Promise + async getSampleStats (this: That, params: T.IndicesGetSampleStatsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.get_sample_stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/${encodeURIComponent(params.index.toString())}/_sample/stats` + const meta: TransportRequestMetadata = { + name: 'indices.get_sample_stats', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get index settings. Get setting information for one or more indices. For data streams, it returns setting information for the stream's backing indices. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-settings | Elasticsearch API documentation} + */ + async getSettings (this: That, params?: T.IndicesGetSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getSettings (this: That, params?: T.IndicesGetSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getSettings (this: That, params?: T.IndicesGetSettingsRequest, options?: TransportRequestOptions): Promise + async getSettings (this: That, params?: T.IndicesGetSettingsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.get_settings'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null && params.name != null) { + method = 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_settings/${encodeURIComponent(params.name.toString())}` + } else if (params.index != null) { + method = 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_settings` + } else if (params.name != null) { + method = 'GET' + path = `/_settings/${encodeURIComponent(params.name.toString())}` + } else { + method = 'GET' + path = '/_settings' + } + const meta: TransportRequestMetadata = { + name: 'indices.get_settings', + pathParts: { + index: params.index, + name: params.name + }, + acceptedParams: [ + 'index', + 'name', + 'allow_no_indices', + 'expand_wildcards', + 'flat_settings', + 'ignore_unavailable', + 'include_defaults', + 'local', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get legacy index templates. Get information about one or more index templates. IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-template | Elasticsearch API documentation} + */ + async getTemplate (this: That, params?: T.IndicesGetTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getTemplate (this: That, params?: T.IndicesGetTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getTemplate (this: That, params?: T.IndicesGetTemplateRequest, options?: TransportRequestOptions): Promise + async getTemplate (this: That, params?: T.IndicesGetTemplateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.get_template'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.name != null) { + method = 'GET' + path = `/_template/${encodeURIComponent(params.name.toString())}` + } else { + method = 'GET' + path = '/_template' + } + const meta: TransportRequestMetadata = { + name: 'indices.get_template', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'flat_settings', + 'local', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Reindex legacy backing indices. Reindex all legacy backing indices for a data stream. This operation occurs in a persistent task. The persistent task ID is returned immediately and the reindexing work is completed in that task. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-migrate-reindex | Elasticsearch API documentation} + */ + async migrateReindex (this: That, params: T.IndicesMigrateReindexRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async migrateReindex (this: That, params: T.IndicesMigrateReindexRequest, options?: TransportRequestOptionsWithMeta): Promise> + async migrateReindex (this: That, params: T.IndicesMigrateReindexRequest, options?: TransportRequestOptions): Promise + async migrateReindex (this: That, params: T.IndicesMigrateReindexRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['indices.migrate_reindex'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: any = params.body ?? undefined + for (const key in params) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = '/_migration/reindex' + const meta: TransportRequestMetadata = { + name: 'indices.migrate_reindex', + acceptedParams: [ + 'reindex' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Convert an index alias to a data stream. Converts an index alias to a data stream. You must have a matching index template that is data stream enabled. The alias must meet the following criteria: The alias must have a write index; All indices for the alias must have a `@timestamp` field mapping of a `date` or `date_nanos` field type; The alias must not have any filters; The alias must not use custom routing. If successful, the request removes the alias and creates a data stream with the same name. The indices for the alias become hidden backing indices for the stream. The write index for the alias becomes the write index for the stream. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-migrate-to-data-stream | Elasticsearch API documentation} + */ + async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> + async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptions): Promise + async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.migrate_to_data_stream'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/_data_stream/_migrate/${encodeURIComponent(params.name.toString())}` + const meta: TransportRequestMetadata = { + name: 'indices.migrate_to_data_stream', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Update data streams. Performs one or more data stream modification actions in a single atomic operation. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-modify-data-stream | Elasticsearch API documentation} + */ + async modifyDataStream (this: That, params: T.IndicesModifyDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async modifyDataStream (this: That, params: T.IndicesModifyDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> + async modifyDataStream (this: That, params: T.IndicesModifyDataStreamRequest, options?: TransportRequestOptions): Promise + async modifyDataStream (this: That, params: T.IndicesModifyDataStreamRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['indices.modify_data_stream'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = '/_data_stream/_modify' + const meta: TransportRequestMetadata = { + name: 'indices.modify_data_stream', + acceptedParams: [ + 'actions' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Open a closed index. For data streams, the API opens any closed backing indices. A closed index is blocked for read/write operations and does not allow all operations that opened indices allow. It is not possible to index documents or to search for documents in a closed index. This allows closed indices to not have to maintain internal data structures for indexing or searching documents, resulting in a smaller overhead on the cluster. When opening or closing an index, the master is responsible for restarting the index shards to reflect the new state of the index. The shards will then go through the normal recovery process. The data of opened or closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times. You can open and close multiple indices. An error is thrown if the request explicitly refers to a missing index. This behavior can be turned off by using the `ignore_unavailable=true` parameter. By default, you must explicitly name the indices you are opening or closing. To open or close indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. This setting can also be changed with the cluster update settings API. Closed indices consume a significant amount of disk-space which can cause problems in managed environments. Closing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`. Because opening or closing an index allocates its shards, the `wait_for_active_shards` setting on index creation applies to the `_open` and `_close` index actions as well. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-open | Elasticsearch API documentation} + */ + async open (this: That, params: T.IndicesOpenRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async open (this: That, params: T.IndicesOpenRequest, options?: TransportRequestOptionsWithMeta): Promise> + async open (this: That, params: T.IndicesOpenRequest, options?: TransportRequestOptions): Promise + async open (this: That, params: T.IndicesOpenRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.open'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/${encodeURIComponent(params.index.toString())}/_open` + const meta: TransportRequestMetadata = { + name: 'indices.open', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout', + 'timeout', + 'wait_for_active_shards' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Promote a data stream. Promote a data stream from a replicated data stream managed by cross-cluster replication (CCR) to a regular data stream. With CCR auto following, a data stream from a remote cluster can be replicated to the local cluster. These data streams can't be rolled over in the local cluster. These replicated data streams roll over only if the upstream data stream rolls over. In the event that the remote cluster is no longer available, the data stream in the local cluster can be promoted to a regular data stream, which allows these data streams to be rolled over in the local cluster. NOTE: When promoting a data stream, ensure the local cluster has a data stream enabled index template that matches the data stream. If this is missing, the data stream will not be able to roll over until a matching index template is created. This will affect the lifecycle management of the data stream and interfere with the data stream size and retention. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-promote-data-stream | Elasticsearch API documentation} + */ + async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> + async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest, options?: TransportRequestOptions): Promise + async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.promote_data_stream'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/_data_stream/_promote/${encodeURIComponent(params.name.toString())}` + const meta: TransportRequestMetadata = { + name: 'indices.promote_data_stream', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create or update an alias. Adds a data stream or index to an alias. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-alias | Elasticsearch API documentation} + */ + async putAlias (this: That, params: T.IndicesPutAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putAlias (this: That, params: T.IndicesPutAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putAlias (this: That, params: T.IndicesPutAliasRequest, options?: TransportRequestOptions): Promise + async putAlias (this: That, params: T.IndicesPutAliasRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['indices.put_alias'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + let method = '' + let path = '' + if (params.index != null && params.name != null) { + method = 'PUT' + path = `/${encodeURIComponent(params.index.toString())}/_alias/${encodeURIComponent(params.name.toString())}` + } else { + method = 'PUT' + path = `/${encodeURIComponent(params.index.toString())}/_aliases/${encodeURIComponent(params.name.toString())}` + } + const meta: TransportRequestMetadata = { + name: 'indices.put_alias', + pathParts: { + index: params.index, + name: params.name + }, + acceptedParams: [ + 'index', + 'name', + 'filter', + 'index_routing', + 'is_write_index', + 'routing', + 'search_routing', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Update data stream lifecycles. Update the data stream lifecycle of the specified data streams. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-data-lifecycle | Elasticsearch API documentation} + */ + async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest, options?: TransportRequestOptions): Promise + async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['indices.put_data_lifecycle'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_data_stream/${encodeURIComponent(params.name.toString())}/_lifecycle` + const meta: TransportRequestMetadata = { + name: 'indices.put_data_lifecycle', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'data_retention', + 'downsampling', + 'enabled', + 'expand_wildcards', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Update data stream mappings. This API can be used to override mappings on specific data streams. These overrides will take precedence over what is specified in the template that the data stream matches. The mapping change is only applied to new write indices that are created during rollover after this API is called. No indices are changed by this API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-data-stream-mappings | Elasticsearch API documentation} + */ + async putDataStreamMappings (this: That, params: T.IndicesPutDataStreamMappingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putDataStreamMappings (this: That, params: T.IndicesPutDataStreamMappingsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putDataStreamMappings (this: That, params: T.IndicesPutDataStreamMappingsRequest, options?: TransportRequestOptions): Promise + async putDataStreamMappings (this: That, params: T.IndicesPutDataStreamMappingsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['indices.put_data_stream_mappings'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: any = params.body ?? undefined + for (const key in params) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_data_stream/${encodeURIComponent(params.name.toString())}/_mappings` + const meta: TransportRequestMetadata = { + name: 'indices.put_data_stream_mappings', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'mappings', + 'dry_run', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Update data stream options. Update the data stream options of the specified data streams. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-data-stream-options | Elasticsearch API documentation} + */ + async putDataStreamOptions (this: That, params: T.IndicesPutDataStreamOptionsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putDataStreamOptions (this: That, params: T.IndicesPutDataStreamOptionsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putDataStreamOptions (this: That, params: T.IndicesPutDataStreamOptionsRequest, options?: TransportRequestOptions): Promise + async putDataStreamOptions (this: That, params: T.IndicesPutDataStreamOptionsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['indices.put_data_stream_options'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_data_stream/${encodeURIComponent(params.name.toString())}/_options` + const meta: TransportRequestMetadata = { + name: 'indices.put_data_stream_options', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'failure_store', + 'expand_wildcards', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Update data stream settings. This API can be used to override settings on specific data streams. These overrides will take precedence over what is specified in the template that the data stream matches. To prevent your data stream from getting into an invalid state, only certain settings are allowed. If possible, the setting change is applied to all backing indices. Otherwise, it will be applied when the data stream is next rolled over. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-data-stream-settings | Elasticsearch API documentation} + */ + async putDataStreamSettings (this: That, params: T.IndicesPutDataStreamSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putDataStreamSettings (this: That, params: T.IndicesPutDataStreamSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putDataStreamSettings (this: That, params: T.IndicesPutDataStreamSettingsRequest, options?: TransportRequestOptions): Promise + async putDataStreamSettings (this: That, params: T.IndicesPutDataStreamSettingsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['indices.put_data_stream_settings'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: any = params.body ?? undefined + for (const key in params) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_data_stream/${encodeURIComponent(params.name.toString())}/_settings` + const meta: TransportRequestMetadata = { + name: 'indices.put_data_stream_settings', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'settings', + 'dry_run', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. Elasticsearch applies templates to new indices based on an wildcard pattern that matches the index name. Index templates are applied during data stream or index creation. For data streams, these settings and mappings are applied when the stream's backing indices are created. Settings and mappings specified in a create index API request override any settings or mappings specified in an index template. Changes to index templates do not affect existing indices, including the existing backing indices of a data stream. You can use C-style `/* *\/` block comments in index templates. You can include comments anywhere in the request body, except before the opening curly bracket. **Multiple matching templates** If multiple index templates match the name of a new index or data stream, the template with the highest priority is used. Multiple templates with overlapping index patterns at the same priority are not allowed and an error will be thrown when attempting to create a template matching an existing index template at identical priorities. **Composing aliases, mappings, and settings** When multiple component templates are specified in the `composed_of` field for an index template, they are merged in the order specified, meaning that later component templates override earlier component templates. Any mappings, settings, or aliases from the parent index template are merged in next. Finally, any configuration on the index request itself is merged. Mapping definitions are merged recursively, which means that later mapping components can introduce new field mappings and update the mapping configuration. If a field mapping is already contained in an earlier component, its definition will be completely overwritten by the later one. This recursive merging strategy applies not only to field mappings, but also root options like `dynamic_templates` and `meta`. If an earlier component contains a `dynamic_templates` block, then by default new `dynamic_templates` entries are appended onto the end. If an entry already exists with the same key, then it is overwritten by the new definition. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-index-template | Elasticsearch API documentation} + */ + async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest, options?: TransportRequestOptions): Promise + async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['indices.put_index_template'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_index_template/${encodeURIComponent(params.name.toString())}` + const meta: TransportRequestMetadata = { + name: 'indices.put_index_template', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'index_patterns', + 'composed_of', + 'template', + 'data_stream', + 'priority', + 'version', + '_meta', + 'allow_auto_create', + 'ignore_missing_component_templates', + 'deprecated', + 'create', + 'master_timeout', + 'cause' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Update field mappings. Add new fields to an existing data stream or index. You can use the update mapping API to: - Add a new field to an existing index - Update mappings for multiple indices in a single request - Add new properties to an object field - Enable multi-fields for an existing field - Update supported mapping parameters - Change a field's mapping using reindexing - Rename a field using a field alias Learn how to use the update mapping API with practical examples in the [Update mapping API examples](https://www.elastic.co/docs/manage-data/data-store/mapping/update-mappings-examples) guide. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-mapping | Elasticsearch API documentation} + */ + async putMapping (this: That, params: T.IndicesPutMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putMapping (this: That, params: T.IndicesPutMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putMapping (this: That, params: T.IndicesPutMappingRequest, options?: TransportRequestOptions): Promise + async putMapping (this: That, params: T.IndicesPutMappingRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['indices.put_mapping'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/${encodeURIComponent(params.index.toString())}/_mapping` + const meta: TransportRequestMetadata = { + name: 'indices.put_mapping', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'date_detection', + 'dynamic', + 'dynamic_date_formats', + 'dynamic_templates', + '_field_names', + '_meta', + 'numeric_detection', + 'properties', + '_routing', + '_source', + 'runtime', + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout', + 'timeout', + 'write_index_only' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create or update sampling configuration. Create or update the sampling configuration for the specified index. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/group/ingest-random-sampling | Elasticsearch API documentation} + */ + async putSampleConfiguration (this: That, params: T.IndicesPutSampleConfigurationRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putSampleConfiguration (this: That, params: T.IndicesPutSampleConfigurationRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putSampleConfiguration (this: That, params: T.IndicesPutSampleConfigurationRequest, options?: TransportRequestOptions): Promise + async putSampleConfiguration (this: That, params: T.IndicesPutSampleConfigurationRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['indices.put_sample_configuration'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/${encodeURIComponent(params.index.toString())}/_sample/config` + const meta: TransportRequestMetadata = { + name: 'indices.put_sample_configuration', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'rate', + 'max_samples', + 'max_size', + 'time_to_live', + 'if', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Update index settings. Changes dynamic index settings in real time. For data streams, index setting changes are applied to all backing indices by default. To revert a setting to the default value, use a null value. The list of per-index settings that can be updated dynamically on live indices can be found in index settings documentation. To preserve existing settings from being updated, set the `preserve_existing` parameter to `true`. For performance optimization during bulk indexing, you can disable the refresh interval. Refer to [disable refresh interval](https://www.elastic.co/docs/deploy-manage/production-guidance/optimize-performance/indexing-speed#disable-refresh-interval) for an example. There are multiple valid ways to represent index settings in the request body. You can specify only the setting, for example: ``` { "number_of_replicas": 1 } ``` Or you can use an `index` setting object: ``` { "index": { "number_of_replicas": 1 } } ``` Or you can use dot annotation: ``` { "index.number_of_replicas": 1 } ``` Or you can embed any of the aforementioned options in a `settings` object. For example: ``` { "settings": { "index": { "number_of_replicas": 1 } } } ``` NOTE: You can only define new analyzers on closed indices. To add an analyzer, you must close the index, define the analyzer, and reopen the index. You cannot close the write index of a data stream. To update the analyzer for a data stream's write index and future backing indices, update the analyzer in the index template used by the stream. Then roll over the data stream to apply the new analyzer to the stream's write index and future backing indices. This affects searches and any new data added to the stream after the rollover. However, it does not affect the data stream's backing indices or their existing data. To change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it. Refer to [updating analyzers on existing indices](https://www.elastic.co/docs/manage-data/data-store/text-analysis/specify-an-analyzer#update-analyzers-on-existing-indices) for step-by-step examples. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-settings | Elasticsearch API documentation} + */ + async putSettings (this: That, params: T.IndicesPutSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putSettings (this: That, params: T.IndicesPutSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putSettings (this: That, params: T.IndicesPutSettingsRequest, options?: TransportRequestOptions): Promise + async putSettings (this: That, params: T.IndicesPutSettingsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['indices.put_settings'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: any = params.body ?? undefined + for (const key in params) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = 'PUT' + path = `/${encodeURIComponent(params.index.toString())}/_settings` + } else { + method = 'PUT' + path = '/_settings' + } + const meta: TransportRequestMetadata = { + name: 'indices.put_settings', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'settings', + 'allow_no_indices', + 'expand_wildcards', + 'flat_settings', + 'ignore_unavailable', + 'master_timeout', + 'preserve_existing', + 'reopen', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create or update a legacy index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. Elasticsearch applies templates to new indices based on an index pattern that matches the index name. IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. Composable templates always take precedence over legacy templates. If no composable template matches a new index, matching legacy templates are applied according to their order. Index templates are only applied during index creation. Changes to index templates do not affect existing indices. Settings and mappings specified in create index API requests override any settings or mappings specified in an index template. You can use C-style `/* *\/` block comments in index templates. You can include comments anywhere in the request body, except before the opening curly bracket. **Indices matching multiple templates** Multiple index templates can potentially match an index, in this case, both the settings and mappings are merged into the final configuration of the index. The order of the merging can be controlled using the order parameter, with lower order being applied first, and higher orders overriding them. NOTE: Multiple matching templates with the same order value will result in a non-deterministic merging order. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-template | Elasticsearch API documentation} + */ + async putTemplate (this: That, params: T.IndicesPutTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putTemplate (this: That, params: T.IndicesPutTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putTemplate (this: That, params: T.IndicesPutTemplateRequest, options?: TransportRequestOptions): Promise + async putTemplate (this: That, params: T.IndicesPutTemplateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['indices.put_template'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_template/${encodeURIComponent(params.name.toString())}` + const meta: TransportRequestMetadata = { + name: 'indices.put_template', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'aliases', + 'index_patterns', + 'mappings', + 'order', + 'settings', + 'version', + 'create', + 'master_timeout', + 'order', + 'cause' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get index recovery information. Get information about ongoing and completed shard recoveries for one or more indices. For data streams, the API returns information for the stream's backing indices. All recoveries, whether ongoing or complete, are kept in the cluster state and may be reported on at any time. Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing. Recovery automatically occurs during the following processes: * When creating an index for the first time. * When a node rejoins the cluster and starts up any missing primary shard copies using the data that it holds in its data path. * Creation of new replica shard copies from the primary. * Relocation of a shard copy to a different node in the same cluster. * A snapshot restore operation. * A clone, shrink, or split operation. You can determine the cause of a shard recovery using the recovery or cat recovery APIs. The index recovery API reports information about completed recoveries only for shard copies that currently exist in the cluster. It only reports the last recovery for each shard copy and does not report historical information about earlier recoveries, nor does it report information about the recoveries of shard copies that no longer exist. This means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-recovery | Elasticsearch API documentation} + */ + async recovery (this: That, params?: T.IndicesRecoveryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async recovery (this: That, params?: T.IndicesRecoveryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async recovery (this: That, params?: T.IndicesRecoveryRequest, options?: TransportRequestOptions): Promise + async recovery (this: That, params?: T.IndicesRecoveryRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.recovery'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_recovery` + } else { + method = 'GET' + path = '/_recovery' + } + const meta: TransportRequestMetadata = { + name: 'indices.recovery', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'active_only', + 'detailed', + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Refresh an index. A refresh makes recent operations performed on one or more indices available for search. For data streams, the API runs the refresh operation on the stream’s backing indices. By default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds. You can change this default interval with the `index.refresh_interval` setting. In Elastic Cloud Serverless, the default refresh interval is 5 seconds across all indices. Refresh requests are synchronous and do not return a response until the refresh operation completes. Refreshes are resource-intensive. To ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible. If your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's `refresh=wait_for` query parameter option. This option ensures the indexing operation waits for a periodic refresh before running the search. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-refresh | Elasticsearch API documentation} + */ + async refresh (this: That, params?: T.IndicesRefreshRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async refresh (this: That, params?: T.IndicesRefreshRequest, options?: TransportRequestOptionsWithMeta): Promise> + async refresh (this: That, params?: T.IndicesRefreshRequest, options?: TransportRequestOptions): Promise + async refresh (this: That, params?: T.IndicesRefreshRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.refresh'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = body != null ? 'POST' : 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_refresh` + } else { + method = body != null ? 'POST' : 'GET' + path = '/_refresh' + } + const meta: TransportRequestMetadata = { + name: 'indices.refresh', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Reload search analyzers. Reload an index's search analyzers and their resources. For data streams, the API reloads search analyzers and resources for the stream's backing indices. IMPORTANT: After reloading the search analyzers you should clear the request cache to make sure it doesn't contain responses derived from the previous versions of the analyzer. You can use the reload search analyzers API to pick up changes to synonym files used in the `synonym_graph` or `synonym` token filter of a search analyzer. To be eligible, the token filter must have an `updateable` flag of `true` and only be used in search analyzers. NOTE: This API does not perform a reload for each shard of an index. Instead, it performs a reload for each node containing index shards. As a result, the total shard count returned by the API can differ from the number of index shards. Because reloading affects every node with an index shard, it is important to update the synonym file on every data node in the cluster--including nodes that don't contain a shard replica--before using this API. This ensures the synonym file is updated everywhere in the cluster in case shards are relocated in the future. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-reload-search-analyzers | Elasticsearch API documentation} + */ + async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptionsWithMeta): Promise> + async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptions): Promise + async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.reload_search_analyzers'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = body != null ? 'POST' : 'GET' + const path = `/${encodeURIComponent(params.index.toString())}/_reload_search_analyzers` + const meta: TransportRequestMetadata = { + name: 'indices.reload_search_analyzers', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'resource' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Remove an index block. Remove an index block from an index. Index blocks limit the operations allowed on an index by blocking specific operation types. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-remove-block | Elasticsearch API documentation} + */ + async removeBlock (this: That, params: T.IndicesRemoveBlockRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async removeBlock (this: That, params: T.IndicesRemoveBlockRequest, options?: TransportRequestOptionsWithMeta): Promise> + async removeBlock (this: That, params: T.IndicesRemoveBlockRequest, options?: TransportRequestOptions): Promise + async removeBlock (this: That, params: T.IndicesRemoveBlockRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.remove_block'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/${encodeURIComponent(params.index.toString())}/_block/${encodeURIComponent(params.block.toString())}` + const meta: TransportRequestMetadata = { + name: 'indices.remove_block', + pathParts: { + index: params.index, + block: params.block + }, + acceptedParams: [ + 'index', + 'block', + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Resolve the cluster. Resolve the specified index expressions to return information about each cluster, including the local "querying" cluster, if included. If no index expression is provided, the API will return information about all the remote clusters that are configured on the querying cluster. This endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search. You use the same index expression with this endpoint as you would for cross-cluster search. Index and cluster exclusions are also supported with this endpoint. For each cluster in the index expression, information is returned about: * Whether the querying ("local") cluster is currently connected to each remote cluster specified in the index expression. Note that this endpoint actively attempts to contact the remote clusters, unlike the `remote/info` endpoint. * Whether each remote cluster is configured with `skip_unavailable` as `true` or `false`. * Whether there are any indices, aliases, or data streams on that cluster that match the index expression. * Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index). * Cluster version information, including the Elasticsearch server version. For example, `GET /_resolve/cluster/my-index-*,cluster*:my-index-*` returns information about the local cluster and all remotely configured clusters that start with the alias `cluster*`. Each cluster returns information about whether it has any indices, aliases or data streams that match `my-index-*`. ## Note on backwards compatibility The ability to query without an index expression was added in version 8.18, so when querying remote clusters older than that, the local cluster will send the index expression `dummy*` to those remote clusters. Thus, if an errors occur, you may see a reference to that index expression even though you didn't request it. If it causes a problem, you can instead include an index expression like `*:*` to bypass the issue. ## Advantages of using this endpoint before a cross-cluster search You may want to exclude a cluster or index from a search when: * A remote cluster is not currently connected and is configured with `skip_unavailable=false`. Running a cross-cluster search under those conditions will cause the entire search to fail. * A cluster has no matching indices, aliases or data streams for the index expression (or your user does not have permissions to search them). For example, suppose your index expression is `logs*,remote1:logs*` and the remote1 cluster has no indices, aliases or data streams that match `logs*`. In that case, that cluster will return no results from that cluster if you include it in a cross-cluster search. * The index expression (combined with any query parameters you specify) will likely cause an exception to be thrown when you do the search. In these cases, the "error" field in the `_resolve/cluster` response will be present. (This is also where security/permission errors will be shown.) * A remote cluster is an older version that does not support the feature you want to use in your search. ## Test availability of remote clusters The `remote/info` endpoint is commonly used to test whether the "local" cluster (the cluster being queried) is connected to its remote clusters, but it does not necessarily reflect whether the remote cluster is available or not. The remote cluster may be available, while the local cluster is not currently connected to it. You can use the `_resolve/cluster` API to attempt to reconnect to remote clusters. For example with `GET _resolve/cluster` or `GET _resolve/cluster/*:*`. The `connected` field in the response will indicate whether it was successful. If a connection was (re-)established, this will also cause the `remote/info` endpoint to now indicate a connected status. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster | Elasticsearch API documentation} + */ + async resolveCluster (this: That, params?: T.IndicesResolveClusterRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async resolveCluster (this: That, params?: T.IndicesResolveClusterRequest, options?: TransportRequestOptionsWithMeta): Promise> + async resolveCluster (this: That, params?: T.IndicesResolveClusterRequest, options?: TransportRequestOptions): Promise + async resolveCluster (this: That, params?: T.IndicesResolveClusterRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.resolve_cluster'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.name != null) { + method = 'GET' + path = `/_resolve/cluster/${encodeURIComponent(params.name.toString())}` + } else { + method = 'GET' + path = '/_resolve/cluster' + } + const meta: TransportRequestMetadata = { + name: 'indices.resolve_cluster', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'allow_no_indices', + 'expand_wildcards', + 'ignore_throttled', + 'ignore_unavailable', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Resolve indices. Resolve the names and/or index patterns for indices, aliases, and data streams. Multiple patterns and remote clusters are supported. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-index | Elasticsearch API documentation} + */ + async resolveIndex (this: That, params: T.IndicesResolveIndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async resolveIndex (this: That, params: T.IndicesResolveIndexRequest, options?: TransportRequestOptionsWithMeta): Promise> + async resolveIndex (this: That, params: T.IndicesResolveIndexRequest, options?: TransportRequestOptions): Promise + async resolveIndex (this: That, params: T.IndicesResolveIndexRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.resolve_index'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_resolve/index/${encodeURIComponent(params.name.toString())}` + const meta: TransportRequestMetadata = { + name: 'indices.resolve_index', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'expand_wildcards', + 'ignore_unavailable', + 'allow_no_indices', + 'mode', + 'project_routing' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Roll over to a new index. TIP: We recommend using the index lifecycle rollover action to automate rollovers. However, Serverless does not support Index Lifecycle Management (ILM), so don't use this approach in the Serverless context. The rollover API creates a new index for a data stream or index alias. The API behavior depends on the rollover target. **Roll over a data stream** If you roll over a data stream, the API creates a new write index for the stream. The stream's previous write index becomes a regular backing index. A rollover also increments the data stream's generation. **Roll over an index alias with a write index** TIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a write index to manage time series data. Data streams replace this functionality, require less maintenance, and automatically integrate with data tiers. If an index alias points to multiple indices, one of the indices must be a write index. The rollover API creates a new write index for the alias with `is_write_index` set to `true`. The API also `sets is_write_index` to `false` for the previous write index. **Roll over an index alias with one index** If you roll over an index alias that points to only one index, the API creates a new index for the alias and removes the original index from the alias. NOTE: A rollover creates a new index and is subject to the `wait_for_active_shards` setting. **Increment index names for an alias** When you roll over an index alias, you can specify a name for the new index. If you don't specify a name and the current index ends with `-` and a number, such as `my-index-000001` or `my-index-3`, the new index name increments that number. For example, if you roll over an alias with a current index of `my-index-000001`, the rollover creates a new index named `my-index-000002`. This number is always six characters and zero-padded, regardless of the previous index's name. If you use an index alias for time series data, you can use date math in the index name to track the rollover date. For example, you can create an alias that points to an index named ``. If you create the index on May 6, 2099, the index's name is `my-index-2099.05.06-000001`. If you roll over the alias on May 7, 2099, the new index's name is `my-index-2099.05.07-000002`. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-rollover | Elasticsearch API documentation} + */ + async rollover (this: That, params: T.IndicesRolloverRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async rollover (this: That, params: T.IndicesRolloverRequest, options?: TransportRequestOptionsWithMeta): Promise> + async rollover (this: That, params: T.IndicesRolloverRequest, options?: TransportRequestOptions): Promise + async rollover (this: That, params: T.IndicesRolloverRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['indices.rollover'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + let method = '' + let path = '' + if (params.alias != null && params.new_index != null) { + method = 'POST' + path = `/${encodeURIComponent(params.alias.toString())}/_rollover/${encodeURIComponent(params.new_index.toString())}` + } else { + method = 'POST' + path = `/${encodeURIComponent(params.alias.toString())}/_rollover` + } + const meta: TransportRequestMetadata = { + name: 'indices.rollover', + pathParts: { + alias: params.alias, + new_index: params.new_index + }, + acceptedParams: [ + 'alias', + 'new_index', + 'aliases', + 'conditions', + 'mappings', + 'settings', + 'dry_run', + 'master_timeout', + 'timeout', + 'wait_for_active_shards', + 'lazy' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get index segments. Get low-level information about the Lucene segments in index shards. For data streams, the API returns information about the stream's backing indices. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-segments | Elasticsearch API documentation} + */ + async segments (this: That, params?: T.IndicesSegmentsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async segments (this: That, params?: T.IndicesSegmentsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async segments (this: That, params?: T.IndicesSegmentsRequest, options?: TransportRequestOptions): Promise + async segments (this: That, params?: T.IndicesSegmentsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.segments'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_segments` + } else { + method = 'GET' + path = '/_segments' + } + const meta: TransportRequestMetadata = { + name: 'indices.segments', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get index shard stores. Get store information about replica shards in one or more indices. For data streams, the API retrieves store information for the stream's backing indices. The index shard stores API returns the following information: * The node on which each replica shard exists. * The allocation ID for each replica shard. * A unique ID for each replica shard. * Any errors encountered while opening the shard index or from an earlier failure. By default, the API returns store information only for primary shards that are unassigned or have one or more unassigned replica shards. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-shard-stores | Elasticsearch API documentation} + */ + async shardStores (this: That, params?: T.IndicesShardStoresRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async shardStores (this: That, params?: T.IndicesShardStoresRequest, options?: TransportRequestOptionsWithMeta): Promise> + async shardStores (this: That, params?: T.IndicesShardStoresRequest, options?: TransportRequestOptions): Promise + async shardStores (this: That, params?: T.IndicesShardStoresRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.shard_stores'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_shard_stores` + } else { + method = 'GET' + path = '/_shard_stores' + } + const meta: TransportRequestMetadata = { + name: 'indices.shard_stores', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'status' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Shrink an index. Shrink an index into a new index with fewer primary shards. Before you can shrink an index: * The index must be read-only. * A copy of every shard in the index must reside on the same node. * The index must have a green health status. To make shard allocation easier, we recommend you also remove the index's replica shards. You can later re-add replica shards as part of the shrink operation. The requested number of primary shards in the target index must be a factor of the number of shards in the source index. For example an index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an index with 15 primary shards can be shrunk into 5, 3 or 1. If the number of shards in the index is a prime number it can only be shrunk into a single primary shard Before shrinking, a (primary or replica) copy of every shard in the index must be present on the same node. The current write index on a data stream cannot be shrunk. In order to shrink the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be shrunk. A shrink operation: * Creates a new target index with the same definition as the source index, but with a smaller number of primary shards. * Hard-links segments from the source index into the target index. If the file system does not support hard-linking, then all segments are copied into the new index, which is a much more time consuming process. Also if using multiple data paths, shards on different data paths require a full copy of segment files if they are not on the same disk since hardlinks do not work across disks. * Recovers the target index as though it were a closed index which had just been re-opened. Recovers shards to the `.routing.allocation.initial_recovery._id` index setting. IMPORTANT: Indices can only be shrunk if they satisfy the following requirements: * The target index must not exist. * The source index must have more primary shards than the target index. * The number of primary shards in the target index must be a factor of the number of primary shards in the source index. The source index must have more primary shards than the target index. * The index must not contain more than 2,147,483,519 documents in total across all shards that will be shrunk into a single shard on the target index as this is the maximum number of docs that can fit into a single shard. * The node handling the shrink process must have sufficient free disk space to accommodate a second copy of the existing index. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-shrink | Elasticsearch API documentation} + */ + async shrink (this: That, params: T.IndicesShrinkRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async shrink (this: That, params: T.IndicesShrinkRequest, options?: TransportRequestOptionsWithMeta): Promise> + async shrink (this: That, params: T.IndicesShrinkRequest, options?: TransportRequestOptions): Promise + async shrink (this: That, params: T.IndicesShrinkRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['indices.shrink'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/${encodeURIComponent(params.index.toString())}/_shrink/${encodeURIComponent(params.target.toString())}` + const meta: TransportRequestMetadata = { + name: 'indices.shrink', + pathParts: { + index: params.index, + target: params.target + }, + acceptedParams: [ + 'index', + 'target', + 'aliases', + 'settings', + 'master_timeout', + 'timeout', + 'wait_for_active_shards' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Simulate an index. Get the index configuration that would be applied to the specified index from an existing index template. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-simulate-index-template | Elasticsearch API documentation} + */ + async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptions): Promise + async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['indices.simulate_index_template'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: any = params.body ?? undefined + for (const key in params) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/_index_template/_simulate_index/${encodeURIComponent(params.name.toString())}` + const meta: TransportRequestMetadata = { + name: 'indices.simulate_index_template', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'index_template', + 'create', + 'cause', + 'master_timeout', + 'include_defaults' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Simulate an index template. Get the index configuration that would be applied by a particular index template. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-simulate-template | Elasticsearch API documentation} + */ + async simulateTemplate (this: That, params?: T.IndicesSimulateTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async simulateTemplate (this: That, params?: T.IndicesSimulateTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async simulateTemplate (this: That, params?: T.IndicesSimulateTemplateRequest, options?: TransportRequestOptions): Promise + async simulateTemplate (this: That, params?: T.IndicesSimulateTemplateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['indices.simulate_template'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + let method = '' + let path = '' + if (params.name != null) { + method = 'POST' + path = `/_index_template/_simulate/${encodeURIComponent(params.name.toString())}` + } else { + method = 'POST' + path = '/_index_template/_simulate' + } + const meta: TransportRequestMetadata = { + name: 'indices.simulate_template', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'allow_auto_create', + 'index_patterns', + 'composed_of', + 'template', + 'data_stream', + 'priority', + 'version', + '_meta', + 'ignore_missing_component_templates', + 'deprecated', + 'create', + 'cause', + 'master_timeout', + 'include_defaults' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Split an index. Split an index into a new index with more primary shards. * Before you can split an index: * The index must be read-only. * The cluster health status must be green. You can do make an index read-only with the following request using the add index block API: ``` PUT /my_source_index/_block/write ``` The current write index on a data stream cannot be split. In order to split the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be split. The number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` setting. The number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing. For instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3. A split operation: * Creates a new target index with the same definition as the source index, but with a larger number of primary shards. * Hard-links segments from the source index into the target index. If the file system doesn't support hard-linking, all segments are copied into the new index, which is a much more time consuming process. * Hashes all documents again, after low level files are created, to delete documents that belong to a different shard. * Recovers the target index as though it were a closed index which had just been re-opened. IMPORTANT: Indices can only be split if they satisfy the following requirements: * The target index must not exist. * The source index must have fewer primary shards than the target index. * The number of primary shards in the target index must be a multiple of the number of primary shards in the source index. * The node handling the split process must have sufficient free disk space to accommodate a second copy of the existing index. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-split | Elasticsearch API documentation} + */ + async split (this: That, params: T.IndicesSplitRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async split (this: That, params: T.IndicesSplitRequest, options?: TransportRequestOptionsWithMeta): Promise> + async split (this: That, params: T.IndicesSplitRequest, options?: TransportRequestOptions): Promise + async split (this: That, params: T.IndicesSplitRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['indices.split'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/${encodeURIComponent(params.index.toString())}/_split/${encodeURIComponent(params.target.toString())}` + const meta: TransportRequestMetadata = { + name: 'indices.split', + pathParts: { + index: params.index, + target: params.target + }, + acceptedParams: [ + 'index', + 'target', + 'aliases', + 'settings', + 'master_timeout', + 'timeout', + 'wait_for_active_shards' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get index statistics. For data streams, the API retrieves statistics for the stream's backing indices. By default, the returned statistics are index-level with `primaries` and `total` aggregations. `primaries` are the values for only the primary shards. `total` are the accumulated values for both primary and replica shards. To get shard-level statistics, set the `level` parameter to `shards`. NOTE: When moving to another node, the shard-level statistics for a shard are cleared. Although the shard is no longer part of the node, that node retains any node-level statistics to which the shard contributed. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-stats | Elasticsearch API documentation} + */ + async stats (this: That, params?: T.IndicesStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stats (this: That, params?: T.IndicesStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stats (this: That, params?: T.IndicesStatsRequest, options?: TransportRequestOptions): Promise + async stats (this: That, params?: T.IndicesStatsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['indices.stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null && params.metric != null) { + method = 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_stats/${encodeURIComponent(params.metric.toString())}` + } else if (params.metric != null) { + method = 'GET' + path = `/_stats/${encodeURIComponent(params.metric.toString())}` + } else if (params.index != null) { + method = 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_stats` + } else { + method = 'GET' + path = '/_stats' + } + const meta: TransportRequestMetadata = { + name: 'indices.stats', + pathParts: { + metric: params.metric, + index: params.index + }, + acceptedParams: [ + 'metric', + 'index', + 'completion_fields', + 'expand_wildcards', + 'fielddata_fields', + 'fields', + 'forbid_closed_indices', + 'groups', + 'include_segment_file_sizes', + 'include_unloaded_segments', + 'level' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create or update an alias. Adds a data stream or index to an alias. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-update-aliases | Elasticsearch API documentation} + */ + async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest, options?: TransportRequestOptions): Promise + async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['indices.update_aliases'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = '/_aliases' + const meta: TransportRequestMetadata = { + name: 'indices.update_aliases', + acceptedParams: [ + 'actions', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Validate a query. Validates a query without running it. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-validate-query | Elasticsearch API documentation} + */ + async validateQuery (this: That, params?: T.IndicesValidateQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async validateQuery (this: That, params?: T.IndicesValidateQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async validateQuery (this: That, params?: T.IndicesValidateQueryRequest, options?: TransportRequestOptions): Promise + async validateQuery (this: That, params?: T.IndicesValidateQueryRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['indices.validate_query'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = body != null ? 'POST' : 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_validate/query` + } else { + method = body != null ? 'POST' : 'GET' + path = '/_validate/query' + } + const meta: TransportRequestMetadata = { + name: 'indices.validate_query', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'query', + 'allow_no_indices', + 'all_shards', + 'analyzer', + 'analyze_wildcard', + 'default_operator', + 'df', + 'expand_wildcards', + 'explain', + 'ignore_unavailable', + 'lenient', + 'rewrite', + 'q' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/api/inference.ts b/src/api/api/inference.ts new file mode 100644 index 000000000..045c1aa61 --- /dev/null +++ b/src/api/api/inference.ts @@ -0,0 +1,2637 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import { kAcceptedParams } from '../../client' + +interface That { + transport: Transport + [kAcceptedParams]: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +export default class Inference { + transport: Transport + [kAcceptedParams]: Record + constructor (transport: Transport) { + this.transport = transport + this[kAcceptedParams] = { + 'inference.chat_completion_unified': { + path: [ + 'inference_id' + ], + body: [ + 'chat_completion_request' + ], + query: [ + 'timeout' + ] + }, + 'inference.completion': { + path: [ + 'inference_id' + ], + body: [ + 'input', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.delete': { + path: [ + 'task_type', + 'inference_id' + ], + body: [], + query: [ + 'dry_run', + 'force' + ] + }, + 'inference.get': { + path: [ + 'task_type', + 'inference_id' + ], + body: [], + query: [] + }, + 'inference.inference': { + path: [ + 'task_type', + 'inference_id' + ], + body: [ + 'query', + 'input', + 'input_type', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.put': { + path: [ + 'task_type', + 'inference_id' + ], + body: [ + 'inference_config' + ], + query: [ + 'timeout' + ] + }, + 'inference.put_ai21': { + path: [ + 'task_type', + 'ai21_inference_id' + ], + body: [ + 'service', + 'service_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.put_alibabacloud': { + path: [ + 'task_type', + 'alibabacloud_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.put_amazonbedrock': { + path: [ + 'task_type', + 'amazonbedrock_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.put_amazonsagemaker': { + path: [ + 'task_type', + 'amazonsagemaker_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.put_anthropic': { + path: [ + 'task_type', + 'anthropic_inference_id' + ], + body: [ + 'service', + 'service_settings', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.put_azureaistudio': { + path: [ + 'task_type', + 'azureaistudio_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.put_azureopenai': { + path: [ + 'task_type', + 'azureopenai_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.put_cohere': { + path: [ + 'task_type', + 'cohere_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.put_contextualai': { + path: [ + 'task_type', + 'contextualai_inference_id' + ], + body: [ + 'service', + 'service_settings', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.put_custom': { + path: [ + 'task_type', + 'custom_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [] + }, + 'inference.put_deepseek': { + path: [ + 'task_type', + 'deepseek_inference_id' + ], + body: [ + 'service', + 'service_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.put_elasticsearch': { + path: [ + 'task_type', + 'elasticsearch_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.put_elser': { + path: [ + 'task_type', + 'elser_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.put_googleaistudio': { + path: [ + 'task_type', + 'googleaistudio_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.put_googlevertexai': { + path: [ + 'task_type', + 'googlevertexai_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.put_hugging_face': { + path: [ + 'task_type', + 'huggingface_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.put_jinaai': { + path: [ + 'task_type', + 'jinaai_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.put_llama': { + path: [ + 'task_type', + 'llama_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.put_mistral': { + path: [ + 'task_type', + 'mistral_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.put_openai': { + path: [ + 'task_type', + 'openai_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.put_voyageai': { + path: [ + 'task_type', + 'voyageai_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.put_watsonx': { + path: [ + 'task_type', + 'watsonx_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.rerank': { + path: [ + 'inference_id' + ], + body: [ + 'query', + 'input', + 'return_documents', + 'top_n', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.sparse_embedding': { + path: [ + 'inference_id' + ], + body: [ + 'input', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.stream_completion': { + path: [ + 'inference_id' + ], + body: [ + 'input', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.text_embedding': { + path: [ + 'inference_id' + ], + body: [ + 'input', + 'input_type', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.update': { + path: [ + 'inference_id', + 'task_type' + ], + body: [ + 'inference_config' + ], + query: [] + } + } + } + + /** + * Perform chat completion inference on the service The chat completion inference API enables real-time responses for chat completion tasks by delivering answers incrementally, reducing response times during computation. It only works with the `chat_completion` task type for `openai` and `elastic` inference services. NOTE: The `chat_completion` task type is only available within the _stream API and only supports streaming. The Chat completion inference API and the Stream inference API differ in their response structure and capabilities. The Chat completion inference API provides more comprehensive customization options through more fields and function calling support. If you use the `openai`, `hugging_face` or the `elastic` service, use the Chat completion inference API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-unified-inference | Elasticsearch API documentation} + */ + async chatCompletionUnified (this: That, params: T.InferenceChatCompletionUnifiedRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async chatCompletionUnified (this: That, params: T.InferenceChatCompletionUnifiedRequest, options?: TransportRequestOptionsWithMeta): Promise> + async chatCompletionUnified (this: That, params: T.InferenceChatCompletionUnifiedRequest, options?: TransportRequestOptions): Promise + async chatCompletionUnified (this: That, params: T.InferenceChatCompletionUnifiedRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['inference.chat_completion_unified'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: any = params.body ?? undefined + for (const key in params) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/_inference/chat_completion/${encodeURIComponent(params.inference_id.toString())}/_stream` + const meta: TransportRequestMetadata = { + name: 'inference.chat_completion_unified', + pathParts: { + inference_id: params.inference_id + }, + acceptedParams: [ + 'inference_id', + 'chat_completion_request', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Perform completion inference on the service Get responses for completion tasks. This API works only with the completion task type. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. This API requires the `monitor_inference` cluster privilege (the built-in `inference_admin` and `inference_user` roles grant this privilege). + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference | Elasticsearch API documentation} + */ + async completion (this: That, params: T.InferenceCompletionRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async completion (this: That, params: T.InferenceCompletionRequest, options?: TransportRequestOptionsWithMeta): Promise> + async completion (this: That, params: T.InferenceCompletionRequest, options?: TransportRequestOptions): Promise + async completion (this: That, params: T.InferenceCompletionRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['inference.completion'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/_inference/completion/${encodeURIComponent(params.inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.completion', + pathParts: { + inference_id: params.inference_id + }, + acceptedParams: [ + 'inference_id', + 'input', + 'task_settings', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Delete an inference endpoint This API requires the manage_inference cluster privilege (the built-in `inference_admin` role grants this privilege). + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-delete | Elasticsearch API documentation} + */ + async delete (this: That, params: T.InferenceDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async delete (this: That, params: T.InferenceDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> + async delete (this: That, params: T.InferenceDeleteRequest, options?: TransportRequestOptions): Promise + async delete (this: That, params: T.InferenceDeleteRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['inference.delete'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.task_type != null && params.inference_id != null) { + method = 'DELETE' + path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.inference_id.toString())}` + } else { + method = 'DELETE' + path = `/_inference/${encodeURIComponent(params.inference_id.toString())}` + } + const meta: TransportRequestMetadata = { + name: 'inference.delete', + pathParts: { + task_type: params.task_type, + inference_id: params.inference_id + }, + acceptedParams: [ + 'task_type', + 'inference_id', + 'dry_run', + 'force' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get an inference endpoint This API requires the `monitor_inference` cluster privilege (the built-in `inference_admin` and `inference_user` roles grant this privilege). + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-get | Elasticsearch API documentation} + */ + async get (this: That, params?: T.InferenceGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async get (this: That, params?: T.InferenceGetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async get (this: That, params?: T.InferenceGetRequest, options?: TransportRequestOptions): Promise + async get (this: That, params?: T.InferenceGetRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['inference.get'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.task_type != null && params.inference_id != null) { + method = 'GET' + path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.inference_id.toString())}` + } else if (params.inference_id != null) { + method = 'GET' + path = `/_inference/${encodeURIComponent(params.inference_id.toString())}` + } else { + method = 'GET' + path = '/_inference' + } + const meta: TransportRequestMetadata = { + name: 'inference.get', + pathParts: { + task_type: params.task_type, + inference_id: params.inference_id + }, + acceptedParams: [ + 'task_type', + 'inference_id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Perform inference on the service. This API enables you to use machine learning models to perform specific tasks on data that you provide as an input. It returns a response with the results of the tasks. The inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API. For details about using this API with a service, such as Amazon Bedrock, Anthropic, or HuggingFace, refer to the service-specific documentation. > info > The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference | Elasticsearch API documentation} + */ + async inference (this: That, params: T.InferenceInferenceRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async inference (this: That, params: T.InferenceInferenceRequest, options?: TransportRequestOptionsWithMeta): Promise> + async inference (this: That, params: T.InferenceInferenceRequest, options?: TransportRequestOptions): Promise + async inference (this: That, params: T.InferenceInferenceRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['inference.inference'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + let method = '' + let path = '' + if (params.task_type != null && params.inference_id != null) { + method = 'POST' + path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.inference_id.toString())}` + } else { + method = 'POST' + path = `/_inference/${encodeURIComponent(params.inference_id.toString())}` + } + const meta: TransportRequestMetadata = { + name: 'inference.inference', + pathParts: { + task_type: params.task_type, + inference_id: params.inference_id + }, + acceptedParams: [ + 'task_type', + 'inference_id', + 'query', + 'input', + 'input_type', + 'task_settings', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create an inference endpoint. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. The following integrations are available through the inference API. You can find the available task types next to the integration name: * AI21 (`chat_completion`, `completion`) * AlibabaCloud AI Search (`completion`, `rerank`, `sparse_embedding`, `text_embedding`) * Amazon Bedrock (`completion`, `text_embedding`) * Amazon SageMaker (`chat_completion`, `completion`, `rerank`, `sparse_embedding`, `text_embedding`) * Anthropic (`completion`) * Azure AI Studio (`completion`, 'rerank', `text_embedding`) * Azure OpenAI (`completion`, `text_embedding`) * Cohere (`completion`, `rerank`, `text_embedding`) * DeepSeek (`chat_completion`, `completion`) * Elasticsearch (`rerank`, `sparse_embedding`, `text_embedding` - this service is for built-in models and models uploaded through Eland) * ELSER (`sparse_embedding`) * Google AI Studio (`completion`, `text_embedding`) * Google Vertex AI (`chat_completion`, `completion`, `rerank`, `text_embedding`) * Hugging Face (`chat_completion`, `completion`, `rerank`, `text_embedding`) * JinaAI (`rerank`, `text_embedding`) * Llama (`chat_completion`, `completion`, `text_embedding`) * Mistral (`chat_completion`, `completion`, `text_embedding`) * OpenAI (`chat_completion`, `completion`, `text_embedding`) * VoyageAI (`rerank`, `text_embedding`) * Watsonx inference integration (`text_embedding`) + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put | Elasticsearch API documentation} + */ + async put (this: That, params: T.InferencePutRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async put (this: That, params: T.InferencePutRequest, options?: TransportRequestOptionsWithMeta): Promise> + async put (this: That, params: T.InferencePutRequest, options?: TransportRequestOptions): Promise + async put (this: That, params: T.InferencePutRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['inference.put'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: any = params.body ?? undefined + for (const key in params) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + let method = '' + let path = '' + if (params.task_type != null && params.inference_id != null) { + method = 'PUT' + path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.inference_id.toString())}` + } else { + method = 'PUT' + path = `/_inference/${encodeURIComponent(params.inference_id.toString())}` + } + const meta: TransportRequestMetadata = { + name: 'inference.put', + pathParts: { + task_type: params.task_type, + inference_id: params.inference_id + }, + acceptedParams: [ + 'task_type', + 'inference_id', + 'inference_config', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create a AI21 inference endpoint. Create an inference endpoint to perform an inference task with the `ai21` service. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-ai21 | Elasticsearch API documentation} + */ + async putAi21 (this: That, params: T.InferencePutAi21Request, options?: TransportRequestOptionsWithOutMeta): Promise + async putAi21 (this: That, params: T.InferencePutAi21Request, options?: TransportRequestOptionsWithMeta): Promise> + async putAi21 (this: That, params: T.InferencePutAi21Request, options?: TransportRequestOptions): Promise + async putAi21 (this: That, params: T.InferencePutAi21Request, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['inference.put_ai21'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.ai21_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_ai21', + pathParts: { + task_type: params.task_type, + ai21_inference_id: params.ai21_inference_id + }, + acceptedParams: [ + 'task_type', + 'ai21_inference_id', + 'service', + 'service_settings', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create an AlibabaCloud AI Search inference endpoint. Create an inference endpoint to perform an inference task with the `alibabacloud-ai-search` service. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-alibabacloud | Elasticsearch API documentation} + */ + async putAlibabacloud (this: That, params: T.InferencePutAlibabacloudRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putAlibabacloud (this: That, params: T.InferencePutAlibabacloudRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putAlibabacloud (this: That, params: T.InferencePutAlibabacloudRequest, options?: TransportRequestOptions): Promise + async putAlibabacloud (this: That, params: T.InferencePutAlibabacloudRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['inference.put_alibabacloud'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.alibabacloud_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_alibabacloud', + pathParts: { + task_type: params.task_type, + alibabacloud_inference_id: params.alibabacloud_inference_id + }, + acceptedParams: [ + 'task_type', + 'alibabacloud_inference_id', + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create an Amazon Bedrock inference endpoint. Create an inference endpoint to perform an inference task with the `amazonbedrock` service. >info > You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-amazonbedrock | Elasticsearch API documentation} + */ + async putAmazonbedrock (this: That, params: T.InferencePutAmazonbedrockRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putAmazonbedrock (this: That, params: T.InferencePutAmazonbedrockRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putAmazonbedrock (this: That, params: T.InferencePutAmazonbedrockRequest, options?: TransportRequestOptions): Promise + async putAmazonbedrock (this: That, params: T.InferencePutAmazonbedrockRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['inference.put_amazonbedrock'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.amazonbedrock_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_amazonbedrock', + pathParts: { + task_type: params.task_type, + amazonbedrock_inference_id: params.amazonbedrock_inference_id + }, + acceptedParams: [ + 'task_type', + 'amazonbedrock_inference_id', + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create an Amazon SageMaker inference endpoint. Create an inference endpoint to perform an inference task with the `amazon_sagemaker` service. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-amazonsagemaker | Elasticsearch API documentation} + */ + async putAmazonsagemaker (this: That, params: T.InferencePutAmazonsagemakerRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putAmazonsagemaker (this: That, params: T.InferencePutAmazonsagemakerRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putAmazonsagemaker (this: That, params: T.InferencePutAmazonsagemakerRequest, options?: TransportRequestOptions): Promise + async putAmazonsagemaker (this: That, params: T.InferencePutAmazonsagemakerRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['inference.put_amazonsagemaker'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.amazonsagemaker_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_amazonsagemaker', + pathParts: { + task_type: params.task_type, + amazonsagemaker_inference_id: params.amazonsagemaker_inference_id + }, + acceptedParams: [ + 'task_type', + 'amazonsagemaker_inference_id', + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create an Anthropic inference endpoint. Create an inference endpoint to perform an inference task with the `anthropic` service. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-anthropic | Elasticsearch API documentation} + */ + async putAnthropic (this: That, params: T.InferencePutAnthropicRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putAnthropic (this: That, params: T.InferencePutAnthropicRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putAnthropic (this: That, params: T.InferencePutAnthropicRequest, options?: TransportRequestOptions): Promise + async putAnthropic (this: That, params: T.InferencePutAnthropicRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['inference.put_anthropic'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.anthropic_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_anthropic', + pathParts: { + task_type: params.task_type, + anthropic_inference_id: params.anthropic_inference_id + }, + acceptedParams: [ + 'task_type', + 'anthropic_inference_id', + 'service', + 'service_settings', + 'task_settings', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create an Azure AI studio inference endpoint. Create an inference endpoint to perform an inference task with the `azureaistudio` service. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-azureaistudio | Elasticsearch API documentation} + */ + async putAzureaistudio (this: That, params: T.InferencePutAzureaistudioRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putAzureaistudio (this: That, params: T.InferencePutAzureaistudioRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putAzureaistudio (this: That, params: T.InferencePutAzureaistudioRequest, options?: TransportRequestOptions): Promise + async putAzureaistudio (this: That, params: T.InferencePutAzureaistudioRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['inference.put_azureaistudio'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.azureaistudio_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_azureaistudio', + pathParts: { + task_type: params.task_type, + azureaistudio_inference_id: params.azureaistudio_inference_id + }, + acceptedParams: [ + 'task_type', + 'azureaistudio_inference_id', + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create an Azure OpenAI inference endpoint. Create an inference endpoint to perform an inference task with the `azureopenai` service. The list of chat completion models that you can choose from in your Azure OpenAI deployment include: * [GPT-4 and GPT-4 Turbo models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-4-and-gpt-4-turbo-models) * [GPT-3.5](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-35) The list of embeddings models that you can choose from in your deployment can be found in the [Azure models documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings). + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-azureopenai | Elasticsearch API documentation} + */ + async putAzureopenai (this: That, params: T.InferencePutAzureopenaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putAzureopenai (this: That, params: T.InferencePutAzureopenaiRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putAzureopenai (this: That, params: T.InferencePutAzureopenaiRequest, options?: TransportRequestOptions): Promise + async putAzureopenai (this: That, params: T.InferencePutAzureopenaiRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['inference.put_azureopenai'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.azureopenai_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_azureopenai', + pathParts: { + task_type: params.task_type, + azureopenai_inference_id: params.azureopenai_inference_id + }, + acceptedParams: [ + 'task_type', + 'azureopenai_inference_id', + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create a Cohere inference endpoint. Create an inference endpoint to perform an inference task with the `cohere` service. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-cohere | Elasticsearch API documentation} + */ + async putCohere (this: That, params: T.InferencePutCohereRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putCohere (this: That, params: T.InferencePutCohereRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putCohere (this: That, params: T.InferencePutCohereRequest, options?: TransportRequestOptions): Promise + async putCohere (this: That, params: T.InferencePutCohereRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['inference.put_cohere'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.cohere_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_cohere', + pathParts: { + task_type: params.task_type, + cohere_inference_id: params.cohere_inference_id + }, + acceptedParams: [ + 'task_type', + 'cohere_inference_id', + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create an Contextual AI inference endpoint. Create an inference endpoint to perform an inference task with the `contexualai` service. To review the available `rerank` models, refer to . + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-contextualai | Elasticsearch API documentation} + */ + async putContextualai (this: That, params: T.InferencePutContextualaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putContextualai (this: That, params: T.InferencePutContextualaiRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putContextualai (this: That, params: T.InferencePutContextualaiRequest, options?: TransportRequestOptions): Promise + async putContextualai (this: That, params: T.InferencePutContextualaiRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['inference.put_contextualai'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.contextualai_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_contextualai', + pathParts: { + task_type: params.task_type, + contextualai_inference_id: params.contextualai_inference_id + }, + acceptedParams: [ + 'task_type', + 'contextualai_inference_id', + 'service', + 'service_settings', + 'task_settings', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create a custom inference endpoint. The custom service gives more control over how to interact with external inference services that aren't explicitly supported through dedicated integrations. The custom service gives you the ability to define the headers, url, query parameters, request body, and secrets. The custom service supports the template replacement functionality, which enables you to define a template that can be replaced with the value associated with that key. Templates are portions of a string that start with `${` and end with `}`. The parameters `secret_parameters` and `task_settings` are checked for keys for template replacement. Template replacement is supported in the `request`, `headers`, `url`, and `query_parameters`. If the definition (key) is not found for a template, an error message is returned. In case of an endpoint definition like the following: ``` PUT _inference/text_embedding/test-text-embedding { "service": "custom", "service_settings": { "secret_parameters": { "api_key": "" }, "url": "...endpoints.huggingface.cloud/v1/embeddings", "headers": { "Authorization": "Bearer ${api_key}", "Content-Type": "application/json" }, "request": "{\"input\": ${input}}", "response": { "json_parser": { "text_embeddings":"$.data[*].embedding[*]" } } } } ``` To replace `${api_key}` the `secret_parameters` and `task_settings` are checked for a key named `api_key`. > info > Templates should not be surrounded by quotes. Pre-defined templates: * `${input}` refers to the array of input strings that comes from the `input` field of the subsequent inference requests. * `${input_type}` refers to the input type translation values. * `${query}` refers to the query field used specifically for reranking tasks. * `${top_n}` refers to the `top_n` field available when performing rerank requests. * `${return_documents}` refers to the `return_documents` field available when performing rerank requests. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-custom | Elasticsearch API documentation} + */ + async putCustom (this: That, params: T.InferencePutCustomRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putCustom (this: That, params: T.InferencePutCustomRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putCustom (this: That, params: T.InferencePutCustomRequest, options?: TransportRequestOptions): Promise + async putCustom (this: That, params: T.InferencePutCustomRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['inference.put_custom'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.custom_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_custom', + pathParts: { + task_type: params.task_type, + custom_inference_id: params.custom_inference_id + }, + acceptedParams: [ + 'task_type', + 'custom_inference_id', + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create a DeepSeek inference endpoint. Create an inference endpoint to perform an inference task with the `deepseek` service. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-deepseek | Elasticsearch API documentation} + */ + async putDeepseek (this: That, params: T.InferencePutDeepseekRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putDeepseek (this: That, params: T.InferencePutDeepseekRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putDeepseek (this: That, params: T.InferencePutDeepseekRequest, options?: TransportRequestOptions): Promise + async putDeepseek (this: That, params: T.InferencePutDeepseekRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['inference.put_deepseek'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.deepseek_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_deepseek', + pathParts: { + task_type: params.task_type, + deepseek_inference_id: params.deepseek_inference_id + }, + acceptedParams: [ + 'task_type', + 'deepseek_inference_id', + 'service', + 'service_settings', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create an Elasticsearch inference endpoint. Create an inference endpoint to perform an inference task with the `elasticsearch` service. > info > Your Elasticsearch deployment contains preconfigured ELSER and E5 inference endpoints, you only need to create the enpoints using the API if you want to customize the settings. If you use the ELSER or the E5 model through the `elasticsearch` service, the API request will automatically download and deploy the model if it isn't downloaded yet. > info > You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-elasticsearch | Elasticsearch API documentation} + */ + async putElasticsearch (this: That, params: T.InferencePutElasticsearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putElasticsearch (this: That, params: T.InferencePutElasticsearchRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putElasticsearch (this: That, params: T.InferencePutElasticsearchRequest, options?: TransportRequestOptions): Promise + async putElasticsearch (this: That, params: T.InferencePutElasticsearchRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['inference.put_elasticsearch'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.elasticsearch_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_elasticsearch', + pathParts: { + task_type: params.task_type, + elasticsearch_inference_id: params.elasticsearch_inference_id + }, + acceptedParams: [ + 'task_type', + 'elasticsearch_inference_id', + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create an ELSER inference endpoint. Create an inference endpoint to perform an inference task with the `elser` service. You can also deploy ELSER by using the Elasticsearch inference integration. > info > Your Elasticsearch deployment contains a preconfigured ELSER inference endpoint, you only need to create the enpoint using the API if you want to customize the settings. The API request will automatically download and deploy the ELSER model if it isn't already downloaded. > info > You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-elser | Elasticsearch API documentation} + */ + async putElser (this: That, params: T.InferencePutElserRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putElser (this: That, params: T.InferencePutElserRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putElser (this: That, params: T.InferencePutElserRequest, options?: TransportRequestOptions): Promise + async putElser (this: That, params: T.InferencePutElserRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['inference.put_elser'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.elser_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_elser', + pathParts: { + task_type: params.task_type, + elser_inference_id: params.elser_inference_id + }, + acceptedParams: [ + 'task_type', + 'elser_inference_id', + 'chunking_settings', + 'service', + 'service_settings', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create an Google AI Studio inference endpoint. Create an inference endpoint to perform an inference task with the `googleaistudio` service. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-googleaistudio | Elasticsearch API documentation} + */ + async putGoogleaistudio (this: That, params: T.InferencePutGoogleaistudioRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putGoogleaistudio (this: That, params: T.InferencePutGoogleaistudioRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putGoogleaistudio (this: That, params: T.InferencePutGoogleaistudioRequest, options?: TransportRequestOptions): Promise + async putGoogleaistudio (this: That, params: T.InferencePutGoogleaistudioRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['inference.put_googleaistudio'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.googleaistudio_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_googleaistudio', + pathParts: { + task_type: params.task_type, + googleaistudio_inference_id: params.googleaistudio_inference_id + }, + acceptedParams: [ + 'task_type', + 'googleaistudio_inference_id', + 'chunking_settings', + 'service', + 'service_settings', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create a Google Vertex AI inference endpoint. Create an inference endpoint to perform an inference task with the `googlevertexai` service. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-googlevertexai | Elasticsearch API documentation} + */ + async putGooglevertexai (this: That, params: T.InferencePutGooglevertexaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putGooglevertexai (this: That, params: T.InferencePutGooglevertexaiRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putGooglevertexai (this: That, params: T.InferencePutGooglevertexaiRequest, options?: TransportRequestOptions): Promise + async putGooglevertexai (this: That, params: T.InferencePutGooglevertexaiRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['inference.put_googlevertexai'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.googlevertexai_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_googlevertexai', + pathParts: { + task_type: params.task_type, + googlevertexai_inference_id: params.googlevertexai_inference_id + }, + acceptedParams: [ + 'task_type', + 'googlevertexai_inference_id', + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create a Hugging Face inference endpoint. Create an inference endpoint to perform an inference task with the `hugging_face` service. Supported tasks include: `text_embedding`, `completion`, and `chat_completion`. To configure the endpoint, first visit the Hugging Face Inference Endpoints page and create a new endpoint. Select a model that supports the task you intend to use. For Elastic's `text_embedding` task: The selected model must support the `Sentence Embeddings` task. On the new endpoint creation page, select the `Sentence Embeddings` task under the `Advanced Configuration` section. After the endpoint has initialized, copy the generated endpoint URL. Recommended models for `text_embedding` task: * `all-MiniLM-L6-v2` * `all-MiniLM-L12-v2` * `all-mpnet-base-v2` * `e5-base-v2` * `e5-small-v2` * `multilingual-e5-base` * `multilingual-e5-small` For Elastic's `chat_completion` and `completion` tasks: The selected model must support the `Text Generation` task and expose OpenAI API. HuggingFace supports both serverless and dedicated endpoints for `Text Generation`. When creating dedicated endpoint select the `Text Generation` task. After the endpoint is initialized (for dedicated) or ready (for serverless), ensure it supports the OpenAI API and includes `/v1/chat/completions` part in URL. Then, copy the full endpoint URL for use. Recommended models for `chat_completion` and `completion` tasks: * `Mistral-7B-Instruct-v0.2` * `QwQ-32B` * `Phi-3-mini-128k-instruct` For Elastic's `rerank` task: The selected model must support the `sentence-ranking` task and expose OpenAI API. HuggingFace supports only dedicated (not serverless) endpoints for `Rerank` so far. After the endpoint is initialized, copy the full endpoint URL for use. Tested models for `rerank` task: * `bge-reranker-base` * `jina-reranker-v1-turbo-en-GGUF` + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-hugging-face | Elasticsearch API documentation} + */ + async putHuggingFace (this: That, params: T.InferencePutHuggingFaceRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putHuggingFace (this: That, params: T.InferencePutHuggingFaceRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putHuggingFace (this: That, params: T.InferencePutHuggingFaceRequest, options?: TransportRequestOptions): Promise + async putHuggingFace (this: That, params: T.InferencePutHuggingFaceRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['inference.put_hugging_face'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.huggingface_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_hugging_face', + pathParts: { + task_type: params.task_type, + huggingface_inference_id: params.huggingface_inference_id + }, + acceptedParams: [ + 'task_type', + 'huggingface_inference_id', + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create an JinaAI inference endpoint. Create an inference endpoint to perform an inference task with the `jinaai` service. To review the available `rerank` models, refer to . To review the available `text_embedding` models, refer to the . + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-jinaai | Elasticsearch API documentation} + */ + async putJinaai (this: That, params: T.InferencePutJinaaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putJinaai (this: That, params: T.InferencePutJinaaiRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putJinaai (this: That, params: T.InferencePutJinaaiRequest, options?: TransportRequestOptions): Promise + async putJinaai (this: That, params: T.InferencePutJinaaiRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['inference.put_jinaai'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.jinaai_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_jinaai', + pathParts: { + task_type: params.task_type, + jinaai_inference_id: params.jinaai_inference_id + }, + acceptedParams: [ + 'task_type', + 'jinaai_inference_id', + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create a Llama inference endpoint. Create an inference endpoint to perform an inference task with the `llama` service. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-llama | Elasticsearch API documentation} + */ + async putLlama (this: That, params: T.InferencePutLlamaRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putLlama (this: That, params: T.InferencePutLlamaRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putLlama (this: That, params: T.InferencePutLlamaRequest, options?: TransportRequestOptions): Promise + async putLlama (this: That, params: T.InferencePutLlamaRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['inference.put_llama'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.llama_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_llama', + pathParts: { + task_type: params.task_type, + llama_inference_id: params.llama_inference_id + }, + acceptedParams: [ + 'task_type', + 'llama_inference_id', + 'chunking_settings', + 'service', + 'service_settings', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create a Mistral inference endpoint. Create an inference endpoint to perform an inference task with the `mistral` service. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-mistral | Elasticsearch API documentation} + */ + async putMistral (this: That, params: T.InferencePutMistralRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putMistral (this: That, params: T.InferencePutMistralRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putMistral (this: That, params: T.InferencePutMistralRequest, options?: TransportRequestOptions): Promise + async putMistral (this: That, params: T.InferencePutMistralRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['inference.put_mistral'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.mistral_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_mistral', + pathParts: { + task_type: params.task_type, + mistral_inference_id: params.mistral_inference_id + }, + acceptedParams: [ + 'task_type', + 'mistral_inference_id', + 'chunking_settings', + 'service', + 'service_settings', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create an OpenAI inference endpoint. Create an inference endpoint to perform an inference task with the `openai` service or `openai` compatible APIs. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-openai | Elasticsearch API documentation} + */ + async putOpenai (this: That, params: T.InferencePutOpenaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putOpenai (this: That, params: T.InferencePutOpenaiRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putOpenai (this: That, params: T.InferencePutOpenaiRequest, options?: TransportRequestOptions): Promise + async putOpenai (this: That, params: T.InferencePutOpenaiRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['inference.put_openai'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.openai_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_openai', + pathParts: { + task_type: params.task_type, + openai_inference_id: params.openai_inference_id + }, + acceptedParams: [ + 'task_type', + 'openai_inference_id', + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create a VoyageAI inference endpoint. Create an inference endpoint to perform an inference task with the `voyageai` service. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-voyageai | Elasticsearch API documentation} + */ + async putVoyageai (this: That, params: T.InferencePutVoyageaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putVoyageai (this: That, params: T.InferencePutVoyageaiRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putVoyageai (this: That, params: T.InferencePutVoyageaiRequest, options?: TransportRequestOptions): Promise + async putVoyageai (this: That, params: T.InferencePutVoyageaiRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['inference.put_voyageai'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.voyageai_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_voyageai', + pathParts: { + task_type: params.task_type, + voyageai_inference_id: params.voyageai_inference_id + }, + acceptedParams: [ + 'task_type', + 'voyageai_inference_id', + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create a Watsonx inference endpoint. Create an inference endpoint to perform an inference task with the `watsonxai` service. You need an IBM Cloud Databases for Elasticsearch deployment to use the `watsonxai` inference service. You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-watsonx | Elasticsearch API documentation} + */ + async putWatsonx (this: That, params: T.InferencePutWatsonxRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putWatsonx (this: That, params: T.InferencePutWatsonxRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putWatsonx (this: That, params: T.InferencePutWatsonxRequest, options?: TransportRequestOptions): Promise + async putWatsonx (this: That, params: T.InferencePutWatsonxRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['inference.put_watsonx'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.watsonx_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_watsonx', + pathParts: { + task_type: params.task_type, + watsonx_inference_id: params.watsonx_inference_id + }, + acceptedParams: [ + 'task_type', + 'watsonx_inference_id', + 'chunking_settings', + 'service', + 'service_settings', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Perform reranking inference on the service + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference | Elasticsearch API documentation} + */ + async rerank (this: That, params: T.InferenceRerankRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async rerank (this: That, params: T.InferenceRerankRequest, options?: TransportRequestOptionsWithMeta): Promise> + async rerank (this: That, params: T.InferenceRerankRequest, options?: TransportRequestOptions): Promise + async rerank (this: That, params: T.InferenceRerankRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['inference.rerank'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/_inference/rerank/${encodeURIComponent(params.inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.rerank', + pathParts: { + inference_id: params.inference_id + }, + acceptedParams: [ + 'inference_id', + 'query', + 'input', + 'return_documents', + 'top_n', + 'task_settings', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Perform sparse embedding inference on the service + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference | Elasticsearch API documentation} + */ + async sparseEmbedding (this: That, params: T.InferenceSparseEmbeddingRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async sparseEmbedding (this: That, params: T.InferenceSparseEmbeddingRequest, options?: TransportRequestOptionsWithMeta): Promise> + async sparseEmbedding (this: That, params: T.InferenceSparseEmbeddingRequest, options?: TransportRequestOptions): Promise + async sparseEmbedding (this: That, params: T.InferenceSparseEmbeddingRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['inference.sparse_embedding'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/_inference/sparse_embedding/${encodeURIComponent(params.inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.sparse_embedding', + pathParts: { + inference_id: params.inference_id + }, + acceptedParams: [ + 'inference_id', + 'input', + 'task_settings', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Perform streaming completion inference on the service Get real-time responses for completion tasks by delivering answers incrementally, reducing response times during computation. This API works only with the completion task type. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. This API requires the `monitor_inference` cluster privilege (the built-in `inference_admin` and `inference_user` roles grant this privilege). You must use a client that supports streaming. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-stream-inference | Elasticsearch API documentation} + */ + async streamCompletion (this: That, params: T.InferenceStreamCompletionRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async streamCompletion (this: That, params: T.InferenceStreamCompletionRequest, options?: TransportRequestOptionsWithMeta): Promise> + async streamCompletion (this: That, params: T.InferenceStreamCompletionRequest, options?: TransportRequestOptions): Promise + async streamCompletion (this: That, params: T.InferenceStreamCompletionRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['inference.stream_completion'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/_inference/completion/${encodeURIComponent(params.inference_id.toString())}/_stream` + const meta: TransportRequestMetadata = { + name: 'inference.stream_completion', + pathParts: { + inference_id: params.inference_id + }, + acceptedParams: [ + 'inference_id', + 'input', + 'task_settings', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Perform text embedding inference on the service + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference | Elasticsearch API documentation} + */ + async textEmbedding (this: That, params: T.InferenceTextEmbeddingRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async textEmbedding (this: That, params: T.InferenceTextEmbeddingRequest, options?: TransportRequestOptionsWithMeta): Promise> + async textEmbedding (this: That, params: T.InferenceTextEmbeddingRequest, options?: TransportRequestOptions): Promise + async textEmbedding (this: That, params: T.InferenceTextEmbeddingRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['inference.text_embedding'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/_inference/text_embedding/${encodeURIComponent(params.inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.text_embedding', + pathParts: { + inference_id: params.inference_id + }, + acceptedParams: [ + 'inference_id', + 'input', + 'input_type', + 'task_settings', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Update an inference endpoint. Modify `task_settings`, secrets (within `service_settings`), or `num_allocations` for an inference endpoint, depending on the specific endpoint service and `task_type`. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-update | Elasticsearch API documentation} + */ + async update (this: That, params: T.InferenceUpdateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async update (this: That, params: T.InferenceUpdateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async update (this: That, params: T.InferenceUpdateRequest, options?: TransportRequestOptions): Promise + async update (this: That, params: T.InferenceUpdateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['inference.update'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: any = params.body ?? undefined + for (const key in params) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + let method = '' + let path = '' + if (params.task_type != null && params.inference_id != null) { + method = 'PUT' + path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.inference_id.toString())}/_update` + } else { + method = 'PUT' + path = `/_inference/${encodeURIComponent(params.inference_id.toString())}/_update` + } + const meta: TransportRequestMetadata = { + name: 'inference.update', + pathParts: { + inference_id: params.inference_id, + task_type: params.task_type + }, + acceptedParams: [ + 'inference_id', + 'task_type', + 'inference_config' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/api/info.ts b/src/api/api/info.ts new file mode 100644 index 000000000..225d118f6 --- /dev/null +++ b/src/api/api/info.ts @@ -0,0 +1,80 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + info: { + path: [], + body: [], + query: [] + } +} + +/** + * Get cluster info. Get basic build, version, and cluster information. ::: In Serverless, this API is retained for backward compatibility only. Some response fields, such as the version number, should be ignored. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-info | Elasticsearch API documentation} + */ +export default async function InfoApi (this: That, params?: T.InfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function InfoApi (this: That, params?: T.InfoRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function InfoApi (this: That, params?: T.InfoRequest, options?: TransportRequestOptions): Promise +export default async function InfoApi (this: That, params?: T.InfoRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = acceptedParams.info + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/' + const meta: TransportRequestMetadata = { + name: 'info', + acceptedParams: [ + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) +} diff --git a/src/api/api/ingest.ts b/src/api/api/ingest.ts new file mode 100644 index 000000000..2214ab286 --- /dev/null +++ b/src/api/api/ingest.ts @@ -0,0 +1,828 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import { kAcceptedParams } from '../../client' + +interface That { + transport: Transport + [kAcceptedParams]: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +export default class Ingest { + transport: Transport + [kAcceptedParams]: Record + constructor (transport: Transport) { + this.transport = transport + this[kAcceptedParams] = { + 'ingest.delete_geoip_database': { + path: [ + 'id' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ingest.delete_ip_location_database': { + path: [ + 'id' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ingest.delete_pipeline': { + path: [ + 'id' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ingest.geo_ip_stats': { + path: [], + body: [], + query: [] + }, + 'ingest.get_geoip_database': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'ingest.get_ip_location_database': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'ingest.get_pipeline': { + path: [ + 'id' + ], + body: [], + query: [ + 'master_timeout', + 'summary' + ] + }, + 'ingest.processor_grok': { + path: [], + body: [], + query: [] + }, + 'ingest.put_geoip_database': { + path: [ + 'id' + ], + body: [ + 'name', + 'maxmind' + ], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ingest.put_ip_location_database': { + path: [ + 'id' + ], + body: [ + 'configuration' + ], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ingest.put_pipeline': { + path: [ + 'id' + ], + body: [ + '_meta', + 'description', + 'on_failure', + 'processors', + 'version', + 'deprecated', + 'field_access_pattern' + ], + query: [ + 'master_timeout', + 'timeout', + 'if_version' + ] + }, + 'ingest.simulate': { + path: [ + 'id' + ], + body: [ + 'docs', + 'pipeline' + ], + query: [ + 'verbose' + ] + } + } + } + + /** + * Delete GeoIP database configurations. Delete one or more IP geolocation database configurations. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-geoip-database | Elasticsearch API documentation} + */ + async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptions): Promise + async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ingest.delete_geoip_database'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_ingest/geoip/database/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'ingest.delete_geoip_database', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Delete IP geolocation database configurations. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-ip-location-database | Elasticsearch API documentation} + */ + async deleteIpLocationDatabase (this: That, params: T.IngestDeleteIpLocationDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteIpLocationDatabase (this: That, params: T.IngestDeleteIpLocationDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteIpLocationDatabase (this: That, params: T.IngestDeleteIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise + async deleteIpLocationDatabase (this: That, params: T.IngestDeleteIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ingest.delete_ip_location_database'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_ingest/ip_location/database/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'ingest.delete_ip_location_database', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Delete pipelines. Delete one or more ingest pipelines. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-pipeline | Elasticsearch API documentation} + */ + async deletePipeline (this: That, params: T.IngestDeletePipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deletePipeline (this: That, params: T.IngestDeletePipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deletePipeline (this: That, params: T.IngestDeletePipelineRequest, options?: TransportRequestOptions): Promise + async deletePipeline (this: That, params: T.IngestDeletePipelineRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ingest.delete_pipeline'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_ingest/pipeline/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'ingest.delete_pipeline', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get GeoIP statistics. Get download statistics for GeoIP2 databases that are used with the GeoIP processor. + * @see {@link https://www.elastic.co/docs/reference/enrich-processor/geoip-processor | Elasticsearch API documentation} + */ + async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest, options?: TransportRequestOptions): Promise + async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ingest.geo_ip_stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_ingest/geoip/stats' + const meta: TransportRequestMetadata = { + name: 'ingest.geo_ip_stats', + acceptedParams: [ + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get GeoIP database configurations. Get information about one or more IP geolocation database configurations. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-geoip-database | Elasticsearch API documentation} + */ + async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptions): Promise + async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ingest.get_geoip_database'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.id != null) { + method = 'GET' + path = `/_ingest/geoip/database/${encodeURIComponent(params.id.toString())}` + } else { + method = 'GET' + path = '/_ingest/geoip/database' + } + const meta: TransportRequestMetadata = { + name: 'ingest.get_geoip_database', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get IP geolocation database configurations. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-ip-location-database | Elasticsearch API documentation} + */ + async getIpLocationDatabase (this: That, params?: T.IngestGetIpLocationDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getIpLocationDatabase (this: That, params?: T.IngestGetIpLocationDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getIpLocationDatabase (this: That, params?: T.IngestGetIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise + async getIpLocationDatabase (this: That, params?: T.IngestGetIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ingest.get_ip_location_database'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.id != null) { + method = 'GET' + path = `/_ingest/ip_location/database/${encodeURIComponent(params.id.toString())}` + } else { + method = 'GET' + path = '/_ingest/ip_location/database' + } + const meta: TransportRequestMetadata = { + name: 'ingest.get_ip_location_database', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get pipelines. Get information about one or more ingest pipelines. This API returns a local reference of the pipeline. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-pipeline | Elasticsearch API documentation} + */ + async getPipeline (this: That, params?: T.IngestGetPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getPipeline (this: That, params?: T.IngestGetPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getPipeline (this: That, params?: T.IngestGetPipelineRequest, options?: TransportRequestOptions): Promise + async getPipeline (this: That, params?: T.IngestGetPipelineRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ingest.get_pipeline'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.id != null) { + method = 'GET' + path = `/_ingest/pipeline/${encodeURIComponent(params.id.toString())}` + } else { + method = 'GET' + path = '/_ingest/pipeline' + } + const meta: TransportRequestMetadata = { + name: 'ingest.get_pipeline', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id', + 'master_timeout', + 'summary' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Run a grok processor. Extract structured fields out of a single text field within a document. You must choose which field to extract matched fields from, as well as the grok pattern you expect will match. A grok pattern is like a regular expression that supports aliased expressions that can be reused. + * @see {@link https://www.elastic.co/docs/reference/enrich-processor/grok-processor | Elasticsearch API documentation} + */ + async processorGrok (this: That, params?: T.IngestProcessorGrokRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async processorGrok (this: That, params?: T.IngestProcessorGrokRequest, options?: TransportRequestOptionsWithMeta): Promise> + async processorGrok (this: That, params?: T.IngestProcessorGrokRequest, options?: TransportRequestOptions): Promise + async processorGrok (this: That, params?: T.IngestProcessorGrokRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ingest.processor_grok'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_ingest/processor/grok' + const meta: TransportRequestMetadata = { + name: 'ingest.processor_grok', + acceptedParams: [ + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create or update a GeoIP database configuration. Refer to the create or update IP geolocation database configuration API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-put-geoip-database | Elasticsearch API documentation} + */ + async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptions): Promise + async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ingest.put_geoip_database'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_ingest/geoip/database/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'ingest.put_geoip_database', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id', + 'name', + 'maxmind', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create or update an IP geolocation database configuration. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-put-ip-location-database | Elasticsearch API documentation} + */ + async putIpLocationDatabase (this: That, params: T.IngestPutIpLocationDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putIpLocationDatabase (this: That, params: T.IngestPutIpLocationDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putIpLocationDatabase (this: That, params: T.IngestPutIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise + async putIpLocationDatabase (this: That, params: T.IngestPutIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ingest.put_ip_location_database'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: any = params.body ?? undefined + for (const key in params) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_ingest/ip_location/database/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'ingest.put_ip_location_database', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id', + 'configuration', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create or update a pipeline. Changes made using this API take effect immediately. + * @see {@link https://www.elastic.co/docs/manage-data/ingest/transform-enrich/ingest-pipelines | Elasticsearch API documentation} + */ + async putPipeline (this: That, params: T.IngestPutPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putPipeline (this: That, params: T.IngestPutPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putPipeline (this: That, params: T.IngestPutPipelineRequest, options?: TransportRequestOptions): Promise + async putPipeline (this: That, params: T.IngestPutPipelineRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ingest.put_pipeline'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_ingest/pipeline/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'ingest.put_pipeline', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id', + '_meta', + 'description', + 'on_failure', + 'processors', + 'version', + 'deprecated', + 'field_access_pattern', + 'master_timeout', + 'timeout', + 'if_version' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Simulate a pipeline. Run an ingest pipeline against a set of provided documents. You can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-simulate | Elasticsearch API documentation} + */ + async simulate (this: That, params: T.IngestSimulateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async simulate (this: That, params: T.IngestSimulateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async simulate (this: That, params: T.IngestSimulateRequest, options?: TransportRequestOptions): Promise + async simulate (this: That, params: T.IngestSimulateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ingest.simulate'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + let method = '' + let path = '' + if (params.id != null) { + method = body != null ? 'POST' : 'GET' + path = `/_ingest/pipeline/${encodeURIComponent(params.id.toString())}/_simulate` + } else { + method = body != null ? 'POST' : 'GET' + path = '/_ingest/pipeline/_simulate' + } + const meta: TransportRequestMetadata = { + name: 'ingest.simulate', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id', + 'docs', + 'pipeline', + 'verbose' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/api/knn_search.ts b/src/api/api/knn_search.ts new file mode 100644 index 000000000..737050353 --- /dev/null +++ b/src/api/api/knn_search.ts @@ -0,0 +1,84 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + knn_search: { + path: [ + 'index' + ], + body: [], + query: [] + } +} + +/** + * Performs a kNN search + */ +export default async function KnnSearchApi (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function KnnSearchApi (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> +export default async function KnnSearchApi (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise +export default async function KnnSearchApi (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = acceptedParams.knn_search + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + querystring[key] = params[key] + } + } + + const method = body != null ? 'POST' : 'GET' + const path = `/${encodeURIComponent(params.index.toString())}/_knn_search` + const meta: TransportRequestMetadata = { + name: 'knn_search', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) +} diff --git a/src/api/api/license.ts b/src/api/api/license.ts new file mode 100644 index 000000000..6b501374b --- /dev/null +++ b/src/api/api/license.ts @@ -0,0 +1,439 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import { kAcceptedParams } from '../../client' + +interface That { + transport: Transport + [kAcceptedParams]: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +export default class License { + transport: Transport + [kAcceptedParams]: Record + constructor (transport: Transport) { + this.transport = transport + this[kAcceptedParams] = { + 'license.delete': { + path: [], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'license.get': { + path: [], + body: [], + query: [ + 'accept_enterprise', + 'local' + ] + }, + 'license.get_basic_status': { + path: [], + body: [], + query: [] + }, + 'license.get_trial_status': { + path: [], + body: [], + query: [] + }, + 'license.post': { + path: [], + body: [ + 'license', + 'licenses' + ], + query: [ + 'acknowledge', + 'master_timeout', + 'timeout' + ] + }, + 'license.post_start_basic': { + path: [], + body: [], + query: [ + 'acknowledge', + 'master_timeout', + 'timeout' + ] + }, + 'license.post_start_trial': { + path: [], + body: [], + query: [ + 'acknowledge', + 'type', + 'master_timeout' + ] + } + } + } + + /** + * Delete the license. When the license expires, your subscription level reverts to Basic. If the operator privileges feature is enabled, only operator users can use this API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-delete | Elasticsearch API documentation} + */ + async delete (this: That, params?: T.LicenseDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async delete (this: That, params?: T.LicenseDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> + async delete (this: That, params?: T.LicenseDeleteRequest, options?: TransportRequestOptions): Promise + async delete (this: That, params?: T.LicenseDeleteRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['license.delete'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = '/_license' + const meta: TransportRequestMetadata = { + name: 'license.delete', + acceptedParams: [ + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get license information. Get information about your Elastic license including its type, its status, when it was issued, and when it expires. >info > If the master node is generating a new cluster state, the get license API may return a `404 Not Found` response. > If you receive an unexpected 404 response after cluster startup, wait a short period and retry the request. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get | Elasticsearch API documentation} + */ + async get (this: That, params?: T.LicenseGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async get (this: That, params?: T.LicenseGetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async get (this: That, params?: T.LicenseGetRequest, options?: TransportRequestOptions): Promise + async get (this: That, params?: T.LicenseGetRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['license.get'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_license' + const meta: TransportRequestMetadata = { + name: 'license.get', + acceptedParams: [ + 'accept_enterprise', + 'local' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get the basic license status. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get-basic-status | Elasticsearch API documentation} + */ + async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest, options?: TransportRequestOptions): Promise + async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['license.get_basic_status'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_license/basic_status' + const meta: TransportRequestMetadata = { + name: 'license.get_basic_status', + acceptedParams: [ + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get the trial status. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get-trial-status | Elasticsearch API documentation} + */ + async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest, options?: TransportRequestOptions): Promise + async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['license.get_trial_status'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_license/trial_status' + const meta: TransportRequestMetadata = { + name: 'license.get_trial_status', + acceptedParams: [ + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Update the license. You can update your license at runtime without shutting down your nodes. License updates take effect immediately. If the license you are installing does not support all of the features that were available with your previous license, however, you are notified in the response. You must then re-submit the API request with the acknowledge parameter set to true. NOTE: If Elasticsearch security features are enabled and you are installing a gold or higher license, you must enable TLS on the transport networking layer before you install the license. If the operator privileges feature is enabled, only operator users can use this API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post | Elasticsearch API documentation} + */ + async post (this: That, params?: T.LicensePostRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async post (this: That, params?: T.LicensePostRequest, options?: TransportRequestOptionsWithMeta): Promise> + async post (this: That, params?: T.LicensePostRequest, options?: TransportRequestOptions): Promise + async post (this: That, params?: T.LicensePostRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['license.post'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = '/_license' + const meta: TransportRequestMetadata = { + name: 'license.post', + acceptedParams: [ + 'license', + 'licenses', + 'acknowledge', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Start a basic license. Start an indefinite basic license, which gives access to all the basic features. NOTE: In order to start a basic license, you must not currently have a basic license. If the basic license does not support all of the features that are available with your current license, however, you are notified in the response. You must then re-submit the API request with the `acknowledge` parameter set to `true`. To check the status of your basic license, use the get basic license API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post-start-basic | Elasticsearch API documentation} + */ + async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest, options?: TransportRequestOptionsWithMeta): Promise> + async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest, options?: TransportRequestOptions): Promise + async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['license.post_start_basic'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_license/start_basic' + const meta: TransportRequestMetadata = { + name: 'license.post_start_basic', + acceptedParams: [ + 'acknowledge', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Start a trial. Start a 30-day trial, which gives access to all subscription features. NOTE: You are allowed to start a trial only if your cluster has not already activated a trial for the current major product version. For example, if you have already activated a trial for v8.0, you cannot start a new trial until v9.0. You can, however, request an extended trial at https://www.elastic.co/trialextension. To check the status of your trial, use the get trial status API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post-start-trial | Elasticsearch API documentation} + */ + async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest, options?: TransportRequestOptionsWithMeta): Promise> + async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest, options?: TransportRequestOptions): Promise + async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['license.post_start_trial'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_license/start_trial' + const meta: TransportRequestMetadata = { + name: 'license.post_start_trial', + acceptedParams: [ + 'acknowledge', + 'type', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/api/logstash.ts b/src/api/api/logstash.ts new file mode 100644 index 000000000..4832165c8 --- /dev/null +++ b/src/api/api/logstash.ts @@ -0,0 +1,219 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import { kAcceptedParams } from '../../client' + +interface That { + transport: Transport + [kAcceptedParams]: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +export default class Logstash { + transport: Transport + [kAcceptedParams]: Record + constructor (transport: Transport) { + this.transport = transport + this[kAcceptedParams] = { + 'logstash.delete_pipeline': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'logstash.get_pipeline': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'logstash.put_pipeline': { + path: [ + 'id' + ], + body: [ + 'pipeline' + ], + query: [] + } + } + } + + /** + * Delete a Logstash pipeline. Delete a pipeline that is used for Logstash Central Management. If the request succeeds, you receive an empty response with an appropriate status code. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-delete-pipeline | Elasticsearch API documentation} + */ + async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest, options?: TransportRequestOptions): Promise + async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['logstash.delete_pipeline'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_logstash/pipeline/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'logstash.delete_pipeline', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get Logstash pipelines. Get pipelines that are used for Logstash Central Management. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-get-pipeline | Elasticsearch API documentation} + */ + async getPipeline (this: That, params?: T.LogstashGetPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getPipeline (this: That, params?: T.LogstashGetPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getPipeline (this: That, params?: T.LogstashGetPipelineRequest, options?: TransportRequestOptions): Promise + async getPipeline (this: That, params?: T.LogstashGetPipelineRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['logstash.get_pipeline'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.id != null) { + method = 'GET' + path = `/_logstash/pipeline/${encodeURIComponent(params.id.toString())}` + } else { + method = 'GET' + path = '/_logstash/pipeline' + } + const meta: TransportRequestMetadata = { + name: 'logstash.get_pipeline', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create or update a Logstash pipeline. Create a pipeline that is used for Logstash Central Management. If the specified pipeline exists, it is replaced. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-put-pipeline | Elasticsearch API documentation} + */ + async putPipeline (this: That, params: T.LogstashPutPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putPipeline (this: That, params: T.LogstashPutPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putPipeline (this: That, params: T.LogstashPutPipelineRequest, options?: TransportRequestOptions): Promise + async putPipeline (this: That, params: T.LogstashPutPipelineRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['logstash.put_pipeline'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: any = params.body ?? undefined + for (const key in params) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_logstash/pipeline/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'logstash.put_pipeline', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id', + 'pipeline' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/api/mget.ts b/src/api/api/mget.ts new file mode 100644 index 000000000..3857df0f9 --- /dev/null +++ b/src/api/api/mget.ts @@ -0,0 +1,131 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + mget: { + path: [ + 'index' + ], + body: [ + 'docs', + 'ids' + ], + query: [ + 'force_synthetic_source', + 'preference', + 'realtime', + 'refresh', + 'routing', + '_source', + '_source_excludes', + '_source_includes', + 'stored_fields' + ] + } +} + +/** + * Get multiple documents. Get multiple JSON documents by ID from one or more indices. If you specify an index in the request URI, you only need to specify the document IDs in the request body. To ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail. **Filter source fields** By default, the `_source` field is returned for every document (if stored). Use the `_source` and `_source_include` or `source_exclude` attributes to filter what fields are returned for a particular document. You can include the `_source`, `_source_includes`, and `_source_excludes` query parameters in the request URI to specify the defaults to use when there are no per-document instructions. **Get stored fields** Use the `stored_fields` attribute to specify the set of stored fields you want to retrieve. Any requested fields that are not stored are ignored. You can include the `stored_fields` query parameter in the request URI to specify the defaults to use when there are no per-document instructions. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mget | Elasticsearch API documentation} + */ +export default async function MgetApi (this: That, params?: T.MgetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function MgetApi (this: That, params?: T.MgetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function MgetApi (this: That, params?: T.MgetRequest, options?: TransportRequestOptions): Promise> +export default async function MgetApi (this: That, params?: T.MgetRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.mget + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = body != null ? 'POST' : 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_mget` + } else { + method = body != null ? 'POST' : 'GET' + path = '/_mget' + } + const meta: TransportRequestMetadata = { + name: 'mget', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'docs', + 'ids', + 'force_synthetic_source', + 'preference', + 'realtime', + 'refresh', + 'routing', + '_source', + '_source_excludes', + '_source_includes', + 'stored_fields' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) +} diff --git a/src/api/api/migration.ts b/src/api/api/migration.ts new file mode 100644 index 000000000..dd61c399d --- /dev/null +++ b/src/api/api/migration.ts @@ -0,0 +1,202 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import { kAcceptedParams } from '../../client' + +interface That { + transport: Transport + [kAcceptedParams]: Record +} + +export default class Migration { + transport: Transport + [kAcceptedParams]: Record + constructor (transport: Transport) { + this.transport = transport + this[kAcceptedParams] = { + 'migration.deprecations': { + path: [ + 'index' + ], + body: [], + query: [] + }, + 'migration.get_feature_upgrade_status': { + path: [], + body: [], + query: [] + }, + 'migration.post_feature_upgrade': { + path: [], + body: [], + query: [] + } + } + } + + /** + * Get deprecation information. Get information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version. TIP: This APIs is designed for indirect use by the Upgrade Assistant. You are strongly recommended to use the Upgrade Assistant. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-deprecations | Elasticsearch API documentation} + */ + async deprecations (this: That, params?: T.MigrationDeprecationsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deprecations (this: That, params?: T.MigrationDeprecationsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deprecations (this: That, params?: T.MigrationDeprecationsRequest, options?: TransportRequestOptions): Promise + async deprecations (this: That, params?: T.MigrationDeprecationsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['migration.deprecations'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_migration/deprecations` + } else { + method = 'GET' + path = '/_migration/deprecations' + } + const meta: TransportRequestMetadata = { + name: 'migration.deprecations', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get feature migration information. Version upgrades sometimes require changes to how features store configuration information and data in system indices. Check which features need to be migrated and the status of any migrations that are in progress. TIP: This API is designed for indirect use by the Upgrade Assistant. You are strongly recommended to use the Upgrade Assistant. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-get-feature-upgrade-status | Elasticsearch API documentation} + */ + async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptions): Promise + async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['migration.get_feature_upgrade_status'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_migration/system_features' + const meta: TransportRequestMetadata = { + name: 'migration.get_feature_upgrade_status', + acceptedParams: [ + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Start the feature migration. Version upgrades sometimes require changes to how features store configuration information and data in system indices. This API starts the automatic migration process. Some functionality might be temporarily unavailable during the migration process. TIP: The API is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-get-feature-upgrade-status | Elasticsearch API documentation} + */ + async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptions): Promise + async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['migration.post_feature_upgrade'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_migration/system_features' + const meta: TransportRequestMetadata = { + name: 'migration.post_feature_upgrade', + acceptedParams: [ + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/api/ml.ts b/src/api/api/ml.ts new file mode 100644 index 000000000..a7c86168c --- /dev/null +++ b/src/api/api/ml.ts @@ -0,0 +1,5435 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import { kAcceptedParams } from '../../client' + +interface That { + transport: Transport + [kAcceptedParams]: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +export default class Ml { + transport: Transport + [kAcceptedParams]: Record + constructor (transport: Transport) { + this.transport = transport + this[kAcceptedParams] = { + 'ml.clear_trained_model_deployment_cache': { + path: [ + 'model_id' + ], + body: [], + query: [] + }, + 'ml.close_job': { + path: [ + 'job_id' + ], + body: [ + 'allow_no_match', + 'force', + 'timeout' + ], + query: [ + 'allow_no_match', + 'force', + 'timeout' + ] + }, + 'ml.delete_calendar': { + path: [ + 'calendar_id' + ], + body: [], + query: [] + }, + 'ml.delete_calendar_event': { + path: [ + 'calendar_id', + 'event_id' + ], + body: [], + query: [] + }, + 'ml.delete_calendar_job': { + path: [ + 'calendar_id', + 'job_id' + ], + body: [], + query: [] + }, + 'ml.delete_data_frame_analytics': { + path: [ + 'id' + ], + body: [], + query: [ + 'force', + 'timeout' + ] + }, + 'ml.delete_datafeed': { + path: [ + 'datafeed_id' + ], + body: [], + query: [ + 'force' + ] + }, + 'ml.delete_expired_data': { + path: [ + 'job_id' + ], + body: [ + 'requests_per_second', + 'timeout' + ], + query: [ + 'requests_per_second', + 'timeout' + ] + }, + 'ml.delete_filter': { + path: [ + 'filter_id' + ], + body: [], + query: [] + }, + 'ml.delete_forecast': { + path: [ + 'job_id', + 'forecast_id' + ], + body: [], + query: [ + 'allow_no_forecasts', + 'timeout' + ] + }, + 'ml.delete_job': { + path: [ + 'job_id' + ], + body: [], + query: [ + 'force', + 'delete_user_annotations', + 'wait_for_completion' + ] + }, + 'ml.delete_model_snapshot': { + path: [ + 'job_id', + 'snapshot_id' + ], + body: [], + query: [] + }, + 'ml.delete_trained_model': { + path: [ + 'model_id' + ], + body: [], + query: [ + 'force', + 'timeout' + ] + }, + 'ml.delete_trained_model_alias': { + path: [ + 'model_alias', + 'model_id' + ], + body: [], + query: [] + }, + 'ml.estimate_model_memory': { + path: [], + body: [ + 'analysis_config', + 'max_bucket_cardinality', + 'overall_cardinality' + ], + query: [] + }, + 'ml.evaluate_data_frame': { + path: [], + body: [ + 'evaluation', + 'index', + 'query' + ], + query: [] + }, + 'ml.explain_data_frame_analytics': { + path: [ + 'id' + ], + body: [ + 'source', + 'dest', + 'analysis', + 'description', + 'model_memory_limit', + 'max_num_threads', + 'analyzed_fields', + 'allow_lazy_start' + ], + query: [] + }, + 'ml.flush_job': { + path: [ + 'job_id' + ], + body: [ + 'advance_time', + 'calc_interim', + 'end', + 'skip_time', + 'start' + ], + query: [ + 'advance_time', + 'calc_interim', + 'end', + 'skip_time', + 'start' + ] + }, + 'ml.forecast': { + path: [ + 'job_id' + ], + body: [ + 'duration', + 'expires_in', + 'max_model_memory' + ], + query: [ + 'duration', + 'expires_in', + 'max_model_memory' + ] + }, + 'ml.get_buckets': { + path: [ + 'job_id', + 'timestamp' + ], + body: [ + 'anomaly_score', + 'desc', + 'end', + 'exclude_interim', + 'expand', + 'page', + 'sort', + 'start' + ], + query: [ + 'anomaly_score', + 'desc', + 'end', + 'exclude_interim', + 'expand', + 'from', + 'size', + 'sort', + 'start' + ] + }, + 'ml.get_calendar_events': { + path: [ + 'calendar_id' + ], + body: [], + query: [ + 'end', + 'from', + 'job_id', + 'size', + 'start' + ] + }, + 'ml.get_calendars': { + path: [ + 'calendar_id' + ], + body: [ + 'page' + ], + query: [ + 'from', + 'size' + ] + }, + 'ml.get_categories': { + path: [ + 'job_id', + 'category_id' + ], + body: [ + 'page' + ], + query: [ + 'from', + 'partition_field_value', + 'size' + ] + }, + 'ml.get_data_frame_analytics': { + path: [ + 'id' + ], + body: [], + query: [ + 'allow_no_match', + 'from', + 'size', + 'exclude_generated' + ] + }, + 'ml.get_data_frame_analytics_stats': { + path: [ + 'id' + ], + body: [], + query: [ + 'allow_no_match', + 'from', + 'size', + 'verbose' + ] + }, + 'ml.get_datafeed_stats': { + path: [ + 'datafeed_id' + ], + body: [], + query: [ + 'allow_no_match' + ] + }, + 'ml.get_datafeeds': { + path: [ + 'datafeed_id' + ], + body: [], + query: [ + 'allow_no_match', + 'exclude_generated' + ] + }, + 'ml.get_filters': { + path: [ + 'filter_id' + ], + body: [], + query: [ + 'from', + 'size' + ] + }, + 'ml.get_influencers': { + path: [ + 'job_id' + ], + body: [ + 'page' + ], + query: [ + 'desc', + 'end', + 'exclude_interim', + 'influencer_score', + 'from', + 'size', + 'sort', + 'start' + ] + }, + 'ml.get_job_stats': { + path: [ + 'job_id' + ], + body: [], + query: [ + 'allow_no_match' + ] + }, + 'ml.get_jobs': { + path: [ + 'job_id' + ], + body: [], + query: [ + 'allow_no_match', + 'exclude_generated' + ] + }, + 'ml.get_memory_stats': { + path: [ + 'node_id' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ml.get_model_snapshot_upgrade_stats': { + path: [ + 'job_id', + 'snapshot_id' + ], + body: [], + query: [ + 'allow_no_match' + ] + }, + 'ml.get_model_snapshots': { + path: [ + 'job_id', + 'snapshot_id' + ], + body: [ + 'desc', + 'end', + 'page', + 'sort', + 'start' + ], + query: [ + 'desc', + 'end', + 'from', + 'size', + 'sort', + 'start' + ] + }, + 'ml.get_overall_buckets': { + path: [ + 'job_id' + ], + body: [ + 'allow_no_match', + 'bucket_span', + 'end', + 'exclude_interim', + 'overall_score', + 'start', + 'top_n' + ], + query: [ + 'allow_no_match', + 'bucket_span', + 'end', + 'exclude_interim', + 'overall_score', + 'start', + 'top_n' + ] + }, + 'ml.get_records': { + path: [ + 'job_id' + ], + body: [ + 'desc', + 'end', + 'exclude_interim', + 'page', + 'record_score', + 'sort', + 'start' + ], + query: [ + 'desc', + 'end', + 'exclude_interim', + 'from', + 'record_score', + 'size', + 'sort', + 'start' + ] + }, + 'ml.get_trained_models': { + path: [ + 'model_id' + ], + body: [], + query: [ + 'allow_no_match', + 'decompress_definition', + 'exclude_generated', + 'from', + 'include', + 'size', + 'tags' + ] + }, + 'ml.get_trained_models_stats': { + path: [ + 'model_id' + ], + body: [], + query: [ + 'allow_no_match', + 'from', + 'size' + ] + }, + 'ml.infer_trained_model': { + path: [ + 'model_id' + ], + body: [ + 'docs', + 'inference_config' + ], + query: [ + 'timeout' + ] + }, + 'ml.info': { + path: [], + body: [], + query: [] + }, + 'ml.open_job': { + path: [ + 'job_id' + ], + body: [ + 'timeout' + ], + query: [ + 'timeout' + ] + }, + 'ml.post_calendar_events': { + path: [ + 'calendar_id' + ], + body: [ + 'events' + ], + query: [] + }, + 'ml.post_data': { + path: [ + 'job_id' + ], + body: [ + 'data' + ], + query: [ + 'reset_end', + 'reset_start' + ] + }, + 'ml.preview_data_frame_analytics': { + path: [ + 'id' + ], + body: [ + 'config' + ], + query: [] + }, + 'ml.preview_datafeed': { + path: [ + 'datafeed_id' + ], + body: [ + 'datafeed_config', + 'job_config' + ], + query: [ + 'start', + 'end' + ] + }, + 'ml.put_calendar': { + path: [ + 'calendar_id' + ], + body: [ + 'job_ids', + 'description' + ], + query: [] + }, + 'ml.put_calendar_job': { + path: [ + 'calendar_id', + 'job_id' + ], + body: [], + query: [] + }, + 'ml.put_data_frame_analytics': { + path: [ + 'id' + ], + body: [ + 'allow_lazy_start', + 'analysis', + 'analyzed_fields', + 'description', + 'dest', + 'max_num_threads', + '_meta', + 'model_memory_limit', + 'source', + 'headers', + 'version' + ], + query: [] + }, + 'ml.put_datafeed': { + path: [ + 'datafeed_id' + ], + body: [ + 'aggregations', + 'aggs', + 'chunking_config', + 'delayed_data_check_config', + 'frequency', + 'indices', + 'indexes', + 'indices_options', + 'job_id', + 'max_empty_searches', + 'query', + 'query_delay', + 'runtime_mappings', + 'script_fields', + 'scroll_size', + 'headers' + ], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_throttled', + 'ignore_unavailable' + ] + }, + 'ml.put_filter': { + path: [ + 'filter_id' + ], + body: [ + 'description', + 'items' + ], + query: [] + }, + 'ml.put_job': { + path: [], + body: [ + 'allow_lazy_open', + 'analysis_config', + 'analysis_limits', + 'background_persist_interval', + 'custom_settings', + 'daily_model_snapshot_retention_after_days', + 'data_description', + 'datafeed_config', + 'description', + 'job_id', + 'groups', + 'model_plot_config', + 'model_snapshot_retention_days', + 'renormalization_window_days', + 'results_index_name', + 'results_retention_days' + ], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_throttled', + 'ignore_unavailable' + ] + }, + 'ml.put_trained_model': { + path: [ + 'model_id' + ], + body: [ + 'compressed_definition', + 'definition', + 'description', + 'inference_config', + 'input', + 'metadata', + 'model_type', + 'model_size_bytes', + 'platform_architecture', + 'tags', + 'prefix_strings' + ], + query: [ + 'defer_definition_decompression', + 'wait_for_completion' + ] + }, + 'ml.put_trained_model_alias': { + path: [ + 'model_alias', + 'model_id' + ], + body: [], + query: [ + 'reassign' + ] + }, + 'ml.put_trained_model_definition_part': { + path: [ + 'model_id', + 'part' + ], + body: [ + 'definition', + 'total_definition_length', + 'total_parts' + ], + query: [] + }, + 'ml.put_trained_model_vocabulary': { + path: [ + 'model_id' + ], + body: [ + 'vocabulary', + 'merges', + 'scores' + ], + query: [] + }, + 'ml.reset_job': { + path: [ + 'job_id' + ], + body: [], + query: [ + 'wait_for_completion', + 'delete_user_annotations' + ] + }, + 'ml.revert_model_snapshot': { + path: [ + 'job_id', + 'snapshot_id' + ], + body: [ + 'delete_intervening_results' + ], + query: [ + 'delete_intervening_results' + ] + }, + 'ml.set_upgrade_mode': { + path: [], + body: [], + query: [ + 'enabled', + 'timeout' + ] + }, + 'ml.start_data_frame_analytics': { + path: [ + 'id' + ], + body: [], + query: [ + 'timeout' + ] + }, + 'ml.start_datafeed': { + path: [ + 'datafeed_id' + ], + body: [ + 'end', + 'start', + 'timeout' + ], + query: [ + 'end', + 'start', + 'timeout' + ] + }, + 'ml.start_trained_model_deployment': { + path: [ + 'model_id' + ], + body: [ + 'adaptive_allocations' + ], + query: [ + 'cache_size', + 'deployment_id', + 'number_of_allocations', + 'priority', + 'queue_capacity', + 'threads_per_allocation', + 'timeout', + 'wait_for' + ] + }, + 'ml.stop_data_frame_analytics': { + path: [ + 'id' + ], + body: [], + query: [ + 'allow_no_match', + 'force', + 'timeout' + ] + }, + 'ml.stop_datafeed': { + path: [ + 'datafeed_id' + ], + body: [ + 'allow_no_match', + 'force', + 'timeout' + ], + query: [ + 'allow_no_match', + 'force', + 'timeout' + ] + }, + 'ml.stop_trained_model_deployment': { + path: [ + 'model_id' + ], + body: [], + query: [ + 'allow_no_match', + 'force' + ] + }, + 'ml.update_data_frame_analytics': { + path: [ + 'id' + ], + body: [ + 'description', + 'model_memory_limit', + 'max_num_threads', + 'allow_lazy_start' + ], + query: [] + }, + 'ml.update_datafeed': { + path: [ + 'datafeed_id' + ], + body: [ + 'aggregations', + 'chunking_config', + 'delayed_data_check_config', + 'frequency', + 'indices', + 'indexes', + 'indices_options', + 'job_id', + 'max_empty_searches', + 'query', + 'query_delay', + 'runtime_mappings', + 'script_fields', + 'scroll_size' + ], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_throttled', + 'ignore_unavailable' + ] + }, + 'ml.update_filter': { + path: [ + 'filter_id' + ], + body: [ + 'add_items', + 'description', + 'remove_items' + ], + query: [] + }, + 'ml.update_job': { + path: [ + 'job_id' + ], + body: [ + 'allow_lazy_open', + 'analysis_limits', + 'background_persist_interval', + 'custom_settings', + 'categorization_filters', + 'description', + 'model_plot_config', + 'model_prune_window', + 'daily_model_snapshot_retention_after_days', + 'model_snapshot_retention_days', + 'renormalization_window_days', + 'results_retention_days', + 'groups', + 'detectors', + 'per_partition_categorization' + ], + query: [] + }, + 'ml.update_model_snapshot': { + path: [ + 'job_id', + 'snapshot_id' + ], + body: [ + 'description', + 'retain' + ], + query: [] + }, + 'ml.update_trained_model_deployment': { + path: [ + 'model_id' + ], + body: [ + 'number_of_allocations', + 'adaptive_allocations' + ], + query: [ + 'number_of_allocations' + ] + }, + 'ml.upgrade_job_snapshot': { + path: [ + 'job_id', + 'snapshot_id' + ], + body: [], + query: [ + 'wait_for_completion', + 'timeout' + ] + }, + 'ml.validate': { + path: [], + body: [ + 'job_id', + 'analysis_config', + 'analysis_limits', + 'data_description', + 'description', + 'model_plot', + 'model_snapshot_id', + 'model_snapshot_retention_days', + 'results_index_name' + ], + query: [] + }, + 'ml.validate_detector': { + path: [], + body: [ + 'detector' + ], + query: [] + } + } + } + + /** + * Clear trained model deployment cache. Cache will be cleared on all nodes where the trained model is assigned. A trained model deployment may have an inference cache enabled. As requests are handled by each allocated node, their responses may be cached on that individual node. Calling this API clears the caches without restarting the deployment. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-clear-trained-model-deployment-cache | Elasticsearch API documentation} + */ + async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> + async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptions): Promise + async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ml.clear_trained_model_deployment_cache'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/_ml/trained_models/${encodeURIComponent(params.model_id.toString())}/deployment/cache/_clear` + const meta: TransportRequestMetadata = { + name: 'ml.clear_trained_model_deployment_cache', + pathParts: { + model_id: params.model_id + }, + acceptedParams: [ + 'model_id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Close anomaly detection jobs. A job can be opened and closed multiple times throughout its lifecycle. A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results. When you close a job, it runs housekeeping tasks such as pruning the model history, flushing buffers, calculating final results and persisting the model snapshots. Depending upon the size of the job, it could take several minutes to close and the equivalent time to re-open. After it is closed, the job has a minimal overhead on the cluster except for maintaining its meta data. Therefore it is a best practice to close jobs that are no longer required to process data. If you close an anomaly detection job whose datafeed is running, the request first tries to stop the datafeed. This behavior is equivalent to calling stop datafeed API with the same timeout and force parameters as the close job request. When a datafeed that has a specified end date stops, it automatically closes its associated job. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-close-job | Elasticsearch API documentation} + */ + async closeJob (this: That, params: T.MlCloseJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async closeJob (this: That, params: T.MlCloseJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async closeJob (this: That, params: T.MlCloseJobRequest, options?: TransportRequestOptions): Promise + async closeJob (this: That, params: T.MlCloseJobRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ml.close_job'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/_close` + const meta: TransportRequestMetadata = { + name: 'ml.close_job', + pathParts: { + job_id: params.job_id + }, + acceptedParams: [ + 'job_id', + 'allow_no_match', + 'force', + 'timeout', + 'allow_no_match', + 'force', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Delete a calendar. Remove all scheduled events from a calendar, then delete it. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar | Elasticsearch API documentation} + */ + async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest, options?: TransportRequestOptions): Promise + async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ml.delete_calendar'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_ml/calendars/${encodeURIComponent(params.calendar_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'ml.delete_calendar', + pathParts: { + calendar_id: params.calendar_id + }, + acceptedParams: [ + 'calendar_id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Delete events from a calendar. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar-event | Elasticsearch API documentation} + */ + async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest, options?: TransportRequestOptions): Promise + async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ml.delete_calendar_event'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_ml/calendars/${encodeURIComponent(params.calendar_id.toString())}/events/${encodeURIComponent(params.event_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'ml.delete_calendar_event', + pathParts: { + calendar_id: params.calendar_id, + event_id: params.event_id + }, + acceptedParams: [ + 'calendar_id', + 'event_id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Delete anomaly jobs from a calendar. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar-job | Elasticsearch API documentation} + */ + async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest, options?: TransportRequestOptions): Promise + async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ml.delete_calendar_job'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_ml/calendars/${encodeURIComponent(params.calendar_id.toString())}/jobs/${encodeURIComponent(params.job_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'ml.delete_calendar_job', + pathParts: { + calendar_id: params.calendar_id, + job_id: params.job_id + }, + acceptedParams: [ + 'calendar_id', + 'job_id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Delete a data frame analytics job. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-data-frame-analytics | Elasticsearch API documentation} + */ + async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise + async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ml.delete_data_frame_analytics'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_ml/data_frame/analytics/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'ml.delete_data_frame_analytics', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id', + 'force', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Delete a datafeed. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-datafeed | Elasticsearch API documentation} + */ + async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest, options?: TransportRequestOptions): Promise + async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ml.delete_datafeed'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_ml/datafeeds/${encodeURIComponent(params.datafeed_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'ml.delete_datafeed', + pathParts: { + datafeed_id: params.datafeed_id + }, + acceptedParams: [ + 'datafeed_id', + 'force' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Delete expired ML data. Delete all job results, model snapshots and forecast data that have exceeded their retention days period. Machine learning state documents that are not associated with any job are also deleted. You can limit the request to a single or set of anomaly detection jobs by using a job identifier, a group name, a comma-separated list of jobs, or a wildcard expression. You can delete expired data for all anomaly detection jobs by using `_all`, by specifying `*` as the ``, or by omitting the ``. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-expired-data | Elasticsearch API documentation} + */ + async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest, options?: TransportRequestOptions): Promise + async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ml.delete_expired_data'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + let method = '' + let path = '' + if (params.job_id != null) { + method = 'DELETE' + path = `/_ml/_delete_expired_data/${encodeURIComponent(params.job_id.toString())}` + } else { + method = 'DELETE' + path = '/_ml/_delete_expired_data' + } + const meta: TransportRequestMetadata = { + name: 'ml.delete_expired_data', + pathParts: { + job_id: params.job_id + }, + acceptedParams: [ + 'job_id', + 'requests_per_second', + 'timeout', + 'requests_per_second', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Delete a filter. If an anomaly detection job references the filter, you cannot delete the filter. You must update or delete the job before you can delete the filter. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-filter | Elasticsearch API documentation} + */ + async deleteFilter (this: That, params: T.MlDeleteFilterRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteFilter (this: That, params: T.MlDeleteFilterRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteFilter (this: That, params: T.MlDeleteFilterRequest, options?: TransportRequestOptions): Promise + async deleteFilter (this: That, params: T.MlDeleteFilterRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ml.delete_filter'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_ml/filters/${encodeURIComponent(params.filter_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'ml.delete_filter', + pathParts: { + filter_id: params.filter_id + }, + acceptedParams: [ + 'filter_id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Delete forecasts from a job. By default, forecasts are retained for 14 days. You can specify a different retention period with the `expires_in` parameter in the forecast jobs API. The delete forecast API enables you to delete one or more forecasts before they expire. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-forecast | Elasticsearch API documentation} + */ + async deleteForecast (this: That, params: T.MlDeleteForecastRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteForecast (this: That, params: T.MlDeleteForecastRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteForecast (this: That, params: T.MlDeleteForecastRequest, options?: TransportRequestOptions): Promise + async deleteForecast (this: That, params: T.MlDeleteForecastRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ml.delete_forecast'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.job_id != null && params.forecast_id != null) { + method = 'DELETE' + path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/_forecast/${encodeURIComponent(params.forecast_id.toString())}` + } else { + method = 'DELETE' + path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/_forecast` + } + const meta: TransportRequestMetadata = { + name: 'ml.delete_forecast', + pathParts: { + job_id: params.job_id, + forecast_id: params.forecast_id + }, + acceptedParams: [ + 'job_id', + 'forecast_id', + 'allow_no_forecasts', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Delete an anomaly detection job. All job configuration, model state and results are deleted. It is not currently possible to delete multiple jobs using wildcards or a comma separated list. If you delete a job that has a datafeed, the request first tries to delete the datafeed. This behavior is equivalent to calling the delete datafeed API with the same timeout and force parameters as the delete job request. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-job | Elasticsearch API documentation} + */ + async deleteJob (this: That, params: T.MlDeleteJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteJob (this: That, params: T.MlDeleteJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteJob (this: That, params: T.MlDeleteJobRequest, options?: TransportRequestOptions): Promise + async deleteJob (this: That, params: T.MlDeleteJobRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ml.delete_job'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'ml.delete_job', + pathParts: { + job_id: params.job_id + }, + acceptedParams: [ + 'job_id', + 'force', + 'delete_user_annotations', + 'wait_for_completion' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Delete a model snapshot. You cannot delete the active model snapshot. To delete that snapshot, first revert to a different one. To identify the active model snapshot, refer to the `model_snapshot_id` in the results from the get jobs API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-model-snapshot | Elasticsearch API documentation} + */ + async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest, options?: TransportRequestOptions): Promise + async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ml.delete_model_snapshot'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/model_snapshots/${encodeURIComponent(params.snapshot_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'ml.delete_model_snapshot', + pathParts: { + job_id: params.job_id, + snapshot_id: params.snapshot_id + }, + acceptedParams: [ + 'job_id', + 'snapshot_id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Delete an unreferenced trained model. The request deletes a trained inference model that is not referenced by an ingest pipeline. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-trained-model | Elasticsearch API documentation} + */ + async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest, options?: TransportRequestOptions): Promise + async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ml.delete_trained_model'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_ml/trained_models/${encodeURIComponent(params.model_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'ml.delete_trained_model', + pathParts: { + model_id: params.model_id + }, + acceptedParams: [ + 'model_id', + 'force', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Delete a trained model alias. This API deletes an existing model alias that refers to a trained model. If the model alias is missing or refers to a model other than the one identified by the `model_id`, this API returns an error. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-trained-model-alias | Elasticsearch API documentation} + */ + async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptions): Promise + async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ml.delete_trained_model_alias'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_ml/trained_models/${encodeURIComponent(params.model_id.toString())}/model_aliases/${encodeURIComponent(params.model_alias.toString())}` + const meta: TransportRequestMetadata = { + name: 'ml.delete_trained_model_alias', + pathParts: { + model_alias: params.model_alias, + model_id: params.model_id + }, + acceptedParams: [ + 'model_alias', + 'model_id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Estimate job model memory usage. Make an estimation of the memory usage for an anomaly detection job model. The estimate is based on analysis configuration details for the job and cardinality estimates for the fields it references. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-estimate-model-memory | Elasticsearch API documentation} + */ + async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest, options?: TransportRequestOptions): Promise + async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ml.estimate_model_memory'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = '/_ml/anomaly_detectors/_estimate_model_memory' + const meta: TransportRequestMetadata = { + name: 'ml.estimate_model_memory', + acceptedParams: [ + 'analysis_config', + 'max_bucket_cardinality', + 'overall_cardinality' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Evaluate data frame analytics. The API packages together commonly used evaluation metrics for various types of machine learning features. This has been designed for use on indexes created by data frame analytics. Evaluation requires both a ground truth field and an analytics result field to be present. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-evaluate-data-frame | Elasticsearch API documentation} + */ + async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest, options?: TransportRequestOptionsWithMeta): Promise> + async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest, options?: TransportRequestOptions): Promise + async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ml.evaluate_data_frame'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = '/_ml/data_frame/_evaluate' + const meta: TransportRequestMetadata = { + name: 'ml.evaluate_data_frame', + acceptedParams: [ + 'evaluation', + 'index', + 'query' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Explain data frame analytics config. This API provides explanations for a data frame analytics config that either exists already or one that has not been created yet. The following explanations are provided: * which fields are included or not in the analysis and why, * how much memory is estimated to be required. The estimate can be used when deciding the appropriate value for model_memory_limit setting later on. If you have object fields or fields that are excluded via source filtering, they are not included in the explanation. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-explain-data-frame-analytics | Elasticsearch API documentation} + */ + async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise + async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ml.explain_data_frame_analytics'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + let method = '' + let path = '' + if (params.id != null) { + method = body != null ? 'POST' : 'GET' + path = `/_ml/data_frame/analytics/${encodeURIComponent(params.id.toString())}/_explain` + } else { + method = body != null ? 'POST' : 'GET' + path = '/_ml/data_frame/analytics/_explain' + } + const meta: TransportRequestMetadata = { + name: 'ml.explain_data_frame_analytics', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id', + 'source', + 'dest', + 'analysis', + 'description', + 'model_memory_limit', + 'max_num_threads', + 'analyzed_fields', + 'allow_lazy_start' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Force buffered data to be processed. The flush jobs API is only applicable when sending data for analysis using the post data API. Depending on the content of the buffer, then it might additionally calculate new results. Both flush and close operations are similar, however the flush is more efficient if you are expecting to send more data for analysis. When flushing, the job remains open and is available to continue analyzing data. A close operation additionally prunes and persists the model state to disk and the job must be opened again before analyzing further data. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-flush-job | Elasticsearch API documentation} + */ + async flushJob (this: That, params: T.MlFlushJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async flushJob (this: That, params: T.MlFlushJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async flushJob (this: That, params: T.MlFlushJobRequest, options?: TransportRequestOptions): Promise + async flushJob (this: That, params: T.MlFlushJobRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ml.flush_job'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/_flush` + const meta: TransportRequestMetadata = { + name: 'ml.flush_job', + pathParts: { + job_id: params.job_id + }, + acceptedParams: [ + 'job_id', + 'advance_time', + 'calc_interim', + 'end', + 'skip_time', + 'start', + 'advance_time', + 'calc_interim', + 'end', + 'skip_time', + 'start' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Predict future behavior of a time series. Forecasts are not supported for jobs that perform population analysis; an error occurs if you try to create a forecast for a job that has an `over_field_name` in its configuration. Forcasts predict future behavior based on historical data. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-forecast | Elasticsearch API documentation} + */ + async forecast (this: That, params: T.MlForecastRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async forecast (this: That, params: T.MlForecastRequest, options?: TransportRequestOptionsWithMeta): Promise> + async forecast (this: That, params: T.MlForecastRequest, options?: TransportRequestOptions): Promise + async forecast (this: That, params: T.MlForecastRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ml.forecast'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/_forecast` + const meta: TransportRequestMetadata = { + name: 'ml.forecast', + pathParts: { + job_id: params.job_id + }, + acceptedParams: [ + 'job_id', + 'duration', + 'expires_in', + 'max_model_memory', + 'duration', + 'expires_in', + 'max_model_memory' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get anomaly detection job results for buckets. The API presents a chronological view of the records, grouped by bucket. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-buckets | Elasticsearch API documentation} + */ + async getBuckets (this: That, params: T.MlGetBucketsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getBuckets (this: That, params: T.MlGetBucketsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getBuckets (this: That, params: T.MlGetBucketsRequest, options?: TransportRequestOptions): Promise + async getBuckets (this: That, params: T.MlGetBucketsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ml.get_buckets'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + let method = '' + let path = '' + if (params.job_id != null && params.timestamp != null) { + method = body != null ? 'POST' : 'GET' + path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/results/buckets/${encodeURIComponent(params.timestamp.toString())}` + } else { + method = body != null ? 'POST' : 'GET' + path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/results/buckets` + } + const meta: TransportRequestMetadata = { + name: 'ml.get_buckets', + pathParts: { + job_id: params.job_id, + timestamp: params.timestamp + }, + acceptedParams: [ + 'job_id', + 'timestamp', + 'anomaly_score', + 'desc', + 'end', + 'exclude_interim', + 'expand', + 'page', + 'sort', + 'start', + 'anomaly_score', + 'desc', + 'end', + 'exclude_interim', + 'expand', + 'from', + 'size', + 'sort', + 'start' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get info about events in calendars. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-calendar-events | Elasticsearch API documentation} + */ + async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest, options?: TransportRequestOptions): Promise + async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ml.get_calendar_events'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_ml/calendars/${encodeURIComponent(params.calendar_id.toString())}/events` + const meta: TransportRequestMetadata = { + name: 'ml.get_calendar_events', + pathParts: { + calendar_id: params.calendar_id + }, + acceptedParams: [ + 'calendar_id', + 'end', + 'from', + 'job_id', + 'size', + 'start' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get calendar configuration info. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-calendars | Elasticsearch API documentation} + */ + async getCalendars (this: That, params?: T.MlGetCalendarsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getCalendars (this: That, params?: T.MlGetCalendarsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getCalendars (this: That, params?: T.MlGetCalendarsRequest, options?: TransportRequestOptions): Promise + async getCalendars (this: That, params?: T.MlGetCalendarsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ml.get_calendars'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + let method = '' + let path = '' + if (params.calendar_id != null) { + method = body != null ? 'POST' : 'GET' + path = `/_ml/calendars/${encodeURIComponent(params.calendar_id.toString())}` + } else { + method = body != null ? 'POST' : 'GET' + path = '/_ml/calendars' + } + const meta: TransportRequestMetadata = { + name: 'ml.get_calendars', + pathParts: { + calendar_id: params.calendar_id + }, + acceptedParams: [ + 'calendar_id', + 'page', + 'from', + 'size' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get anomaly detection job results for categories. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-categories | Elasticsearch API documentation} + */ + async getCategories (this: That, params: T.MlGetCategoriesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getCategories (this: That, params: T.MlGetCategoriesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getCategories (this: That, params: T.MlGetCategoriesRequest, options?: TransportRequestOptions): Promise + async getCategories (this: That, params: T.MlGetCategoriesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ml.get_categories'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + let method = '' + let path = '' + if (params.job_id != null && params.category_id != null) { + method = body != null ? 'POST' : 'GET' + path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/results/categories/${encodeURIComponent(params.category_id.toString())}` + } else { + method = body != null ? 'POST' : 'GET' + path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/results/categories` + } + const meta: TransportRequestMetadata = { + name: 'ml.get_categories', + pathParts: { + job_id: params.job_id, + category_id: params.category_id + }, + acceptedParams: [ + 'job_id', + 'category_id', + 'page', + 'from', + 'partition_field_value', + 'size' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get data frame analytics job configuration info. You can get information for multiple data frame analytics jobs in a single API request by using a comma-separated list of data frame analytics jobs or a wildcard expression. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-data-frame-analytics | Elasticsearch API documentation} + */ + async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise + async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ml.get_data_frame_analytics'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.id != null) { + method = 'GET' + path = `/_ml/data_frame/analytics/${encodeURIComponent(params.id.toString())}` + } else { + method = 'GET' + path = '/_ml/data_frame/analytics' + } + const meta: TransportRequestMetadata = { + name: 'ml.get_data_frame_analytics', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id', + 'allow_no_match', + 'from', + 'size', + 'exclude_generated' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get data frame analytics job stats. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-data-frame-analytics-stats | Elasticsearch API documentation} + */ + async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptions): Promise + async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ml.get_data_frame_analytics_stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.id != null) { + method = 'GET' + path = `/_ml/data_frame/analytics/${encodeURIComponent(params.id.toString())}/_stats` + } else { + method = 'GET' + path = '/_ml/data_frame/analytics/_stats' + } + const meta: TransportRequestMetadata = { + name: 'ml.get_data_frame_analytics_stats', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id', + 'allow_no_match', + 'from', + 'size', + 'verbose' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get datafeed stats. You can get statistics for multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can get statistics for all datafeeds by using `_all`, by specifying `*` as the ``, or by omitting the ``. If the datafeed is stopped, the only information you receive is the `datafeed_id` and the `state`. This API returns a maximum of 10,000 datafeeds. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-datafeed-stats | Elasticsearch API documentation} + */ + async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest, options?: TransportRequestOptions): Promise + async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ml.get_datafeed_stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.datafeed_id != null) { + method = 'GET' + path = `/_ml/datafeeds/${encodeURIComponent(params.datafeed_id.toString())}/_stats` + } else { + method = 'GET' + path = '/_ml/datafeeds/_stats' + } + const meta: TransportRequestMetadata = { + name: 'ml.get_datafeed_stats', + pathParts: { + datafeed_id: params.datafeed_id + }, + acceptedParams: [ + 'datafeed_id', + 'allow_no_match' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get datafeeds configuration info. You can get information for multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can get information for all datafeeds by using `_all`, by specifying `*` as the ``, or by omitting the ``. This API returns a maximum of 10,000 datafeeds. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-datafeeds | Elasticsearch API documentation} + */ + async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest, options?: TransportRequestOptions): Promise + async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ml.get_datafeeds'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.datafeed_id != null) { + method = 'GET' + path = `/_ml/datafeeds/${encodeURIComponent(params.datafeed_id.toString())}` + } else { + method = 'GET' + path = '/_ml/datafeeds' + } + const meta: TransportRequestMetadata = { + name: 'ml.get_datafeeds', + pathParts: { + datafeed_id: params.datafeed_id + }, + acceptedParams: [ + 'datafeed_id', + 'allow_no_match', + 'exclude_generated' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get filters. You can get a single filter or all filters. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-filters | Elasticsearch API documentation} + */ + async getFilters (this: That, params?: T.MlGetFiltersRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getFilters (this: That, params?: T.MlGetFiltersRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getFilters (this: That, params?: T.MlGetFiltersRequest, options?: TransportRequestOptions): Promise + async getFilters (this: That, params?: T.MlGetFiltersRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ml.get_filters'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.filter_id != null) { + method = 'GET' + path = `/_ml/filters/${encodeURIComponent(params.filter_id.toString())}` + } else { + method = 'GET' + path = '/_ml/filters' + } + const meta: TransportRequestMetadata = { + name: 'ml.get_filters', + pathParts: { + filter_id: params.filter_id + }, + acceptedParams: [ + 'filter_id', + 'from', + 'size' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get anomaly detection job results for influencers. Influencers are the entities that have contributed to, or are to blame for, the anomalies. Influencer results are available only if an `influencer_field_name` is specified in the job configuration. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-influencers | Elasticsearch API documentation} + */ + async getInfluencers (this: That, params: T.MlGetInfluencersRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getInfluencers (this: That, params: T.MlGetInfluencersRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getInfluencers (this: That, params: T.MlGetInfluencersRequest, options?: TransportRequestOptions): Promise + async getInfluencers (this: That, params: T.MlGetInfluencersRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ml.get_influencers'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = body != null ? 'POST' : 'GET' + const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/results/influencers` + const meta: TransportRequestMetadata = { + name: 'ml.get_influencers', + pathParts: { + job_id: params.job_id + }, + acceptedParams: [ + 'job_id', + 'page', + 'desc', + 'end', + 'exclude_interim', + 'influencer_score', + 'from', + 'size', + 'sort', + 'start' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get anomaly detection job stats. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-job-stats | Elasticsearch API documentation} + */ + async getJobStats (this: That, params?: T.MlGetJobStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getJobStats (this: That, params?: T.MlGetJobStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getJobStats (this: That, params?: T.MlGetJobStatsRequest, options?: TransportRequestOptions): Promise + async getJobStats (this: That, params?: T.MlGetJobStatsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ml.get_job_stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.job_id != null) { + method = 'GET' + path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/_stats` + } else { + method = 'GET' + path = '/_ml/anomaly_detectors/_stats' + } + const meta: TransportRequestMetadata = { + name: 'ml.get_job_stats', + pathParts: { + job_id: params.job_id + }, + acceptedParams: [ + 'job_id', + 'allow_no_match' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get anomaly detection jobs configuration info. You can get information for multiple anomaly detection jobs in a single API request by using a group name, a comma-separated list of jobs, or a wildcard expression. You can get information for all anomaly detection jobs by using `_all`, by specifying `*` as the ``, or by omitting the ``. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-jobs | Elasticsearch API documentation} + */ + async getJobs (this: That, params?: T.MlGetJobsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getJobs (this: That, params?: T.MlGetJobsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getJobs (this: That, params?: T.MlGetJobsRequest, options?: TransportRequestOptions): Promise + async getJobs (this: That, params?: T.MlGetJobsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ml.get_jobs'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.job_id != null) { + method = 'GET' + path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}` + } else { + method = 'GET' + path = '/_ml/anomaly_detectors' + } + const meta: TransportRequestMetadata = { + name: 'ml.get_jobs', + pathParts: { + job_id: params.job_id + }, + acceptedParams: [ + 'job_id', + 'allow_no_match', + 'exclude_generated' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get machine learning memory usage info. Get information about how machine learning jobs and trained models are using memory, on each node, both within the JVM heap, and natively, outside of the JVM. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-memory-stats | Elasticsearch API documentation} + */ + async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest, options?: TransportRequestOptions): Promise + async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ml.get_memory_stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.node_id != null) { + method = 'GET' + path = `/_ml/memory/${encodeURIComponent(params.node_id.toString())}/_stats` + } else { + method = 'GET' + path = '/_ml/memory/_stats' + } + const meta: TransportRequestMetadata = { + name: 'ml.get_memory_stats', + pathParts: { + node_id: params.node_id + }, + acceptedParams: [ + 'node_id', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get anomaly detection job model snapshot upgrade usage info. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-model-snapshot-upgrade-stats | Elasticsearch API documentation} + */ + async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptions): Promise + async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ml.get_model_snapshot_upgrade_stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/model_snapshots/${encodeURIComponent(params.snapshot_id.toString())}/_upgrade/_stats` + const meta: TransportRequestMetadata = { + name: 'ml.get_model_snapshot_upgrade_stats', + pathParts: { + job_id: params.job_id, + snapshot_id: params.snapshot_id + }, + acceptedParams: [ + 'job_id', + 'snapshot_id', + 'allow_no_match' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get model snapshots info. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-model-snapshots | Elasticsearch API documentation} + */ + async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest, options?: TransportRequestOptions): Promise + async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ml.get_model_snapshots'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + let method = '' + let path = '' + if (params.job_id != null && params.snapshot_id != null) { + method = body != null ? 'POST' : 'GET' + path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/model_snapshots/${encodeURIComponent(params.snapshot_id.toString())}` + } else { + method = body != null ? 'POST' : 'GET' + path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/model_snapshots` + } + const meta: TransportRequestMetadata = { + name: 'ml.get_model_snapshots', + pathParts: { + job_id: params.job_id, + snapshot_id: params.snapshot_id + }, + acceptedParams: [ + 'job_id', + 'snapshot_id', + 'desc', + 'end', + 'page', + 'sort', + 'start', + 'desc', + 'end', + 'from', + 'size', + 'sort', + 'start' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get overall bucket results. Retrievs overall bucket results that summarize the bucket results of multiple anomaly detection jobs. The `overall_score` is calculated by combining the scores of all the buckets within the overall bucket span. First, the maximum `anomaly_score` per anomaly detection job in the overall bucket is calculated. Then the `top_n` of those scores are averaged to result in the `overall_score`. This means that you can fine-tune the `overall_score` so that it is more or less sensitive to the number of jobs that detect an anomaly at the same time. For example, if you set `top_n` to `1`, the `overall_score` is the maximum bucket score in the overall bucket. Alternatively, if you set `top_n` to the number of jobs, the `overall_score` is high only when all jobs detect anomalies in that overall bucket. If you set the `bucket_span` parameter (to a value greater than its default), the `overall_score` is the maximum `overall_score` of the overall buckets that have a span equal to the jobs' largest bucket span. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-overall-buckets | Elasticsearch API documentation} + */ + async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest, options?: TransportRequestOptions): Promise + async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ml.get_overall_buckets'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = body != null ? 'POST' : 'GET' + const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/results/overall_buckets` + const meta: TransportRequestMetadata = { + name: 'ml.get_overall_buckets', + pathParts: { + job_id: params.job_id + }, + acceptedParams: [ + 'job_id', + 'allow_no_match', + 'bucket_span', + 'end', + 'exclude_interim', + 'overall_score', + 'start', + 'top_n', + 'allow_no_match', + 'bucket_span', + 'end', + 'exclude_interim', + 'overall_score', + 'start', + 'top_n' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get anomaly records for an anomaly detection job. Records contain the detailed analytical results. They describe the anomalous activity that has been identified in the input data based on the detector configuration. There can be many anomaly records depending on the characteristics and size of the input data. In practice, there are often too many to be able to manually process them. The machine learning features therefore perform a sophisticated aggregation of the anomaly records into buckets. The number of record results depends on the number of anomalies found in each bucket, which relates to the number of time series being modeled and the number of detectors. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-records | Elasticsearch API documentation} + */ + async getRecords (this: That, params: T.MlGetRecordsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getRecords (this: That, params: T.MlGetRecordsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getRecords (this: That, params: T.MlGetRecordsRequest, options?: TransportRequestOptions): Promise + async getRecords (this: That, params: T.MlGetRecordsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ml.get_records'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = body != null ? 'POST' : 'GET' + const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/results/records` + const meta: TransportRequestMetadata = { + name: 'ml.get_records', + pathParts: { + job_id: params.job_id + }, + acceptedParams: [ + 'job_id', + 'desc', + 'end', + 'exclude_interim', + 'page', + 'record_score', + 'sort', + 'start', + 'desc', + 'end', + 'exclude_interim', + 'from', + 'record_score', + 'size', + 'sort', + 'start' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get trained model configuration info. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-trained-models | Elasticsearch API documentation} + */ + async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest, options?: TransportRequestOptions): Promise + async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ml.get_trained_models'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.model_id != null) { + method = 'GET' + path = `/_ml/trained_models/${encodeURIComponent(params.model_id.toString())}` + } else { + method = 'GET' + path = '/_ml/trained_models' + } + const meta: TransportRequestMetadata = { + name: 'ml.get_trained_models', + pathParts: { + model_id: params.model_id + }, + acceptedParams: [ + 'model_id', + 'allow_no_match', + 'decompress_definition', + 'exclude_generated', + 'from', + 'include', + 'size', + 'tags' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get trained models usage info. You can get usage information for multiple trained models in a single API request by using a comma-separated list of model IDs or a wildcard expression. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-trained-models-stats | Elasticsearch API documentation} + */ + async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptions): Promise + async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ml.get_trained_models_stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.model_id != null) { + method = 'GET' + path = `/_ml/trained_models/${encodeURIComponent(params.model_id.toString())}/_stats` + } else { + method = 'GET' + path = '/_ml/trained_models/_stats' + } + const meta: TransportRequestMetadata = { + name: 'ml.get_trained_models_stats', + pathParts: { + model_id: params.model_id + }, + acceptedParams: [ + 'model_id', + 'allow_no_match', + 'from', + 'size' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Evaluate a trained model. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-infer-trained-model | Elasticsearch API documentation} + */ + async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest, options?: TransportRequestOptionsWithMeta): Promise> + async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest, options?: TransportRequestOptions): Promise + async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ml.infer_trained_model'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/_ml/trained_models/${encodeURIComponent(params.model_id.toString())}/_infer` + const meta: TransportRequestMetadata = { + name: 'ml.infer_trained_model', + pathParts: { + model_id: params.model_id + }, + acceptedParams: [ + 'model_id', + 'docs', + 'inference_config', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get machine learning information. Get defaults and limits used by machine learning. This endpoint is designed to be used by a user interface that needs to fully understand machine learning configurations where some options are not specified, meaning that the defaults should be used. This endpoint may be used to find out what those defaults are. It also provides information about the maximum size of machine learning jobs that could run in the current cluster configuration. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-info | Elasticsearch API documentation} + */ + async info (this: That, params?: T.MlInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async info (this: That, params?: T.MlInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> + async info (this: That, params?: T.MlInfoRequest, options?: TransportRequestOptions): Promise + async info (this: That, params?: T.MlInfoRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ml.info'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_ml/info' + const meta: TransportRequestMetadata = { + name: 'ml.info', + acceptedParams: [ + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Open anomaly detection jobs. An anomaly detection job must be opened to be ready to receive and analyze data. It can be opened and closed multiple times throughout its lifecycle. When you open a new job, it starts with an empty model. When you open an existing job, the most recent model state is automatically loaded. The job is ready to resume its analysis from where it left off, once new data is received. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-open-job | Elasticsearch API documentation} + */ + async openJob (this: That, params: T.MlOpenJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async openJob (this: That, params: T.MlOpenJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async openJob (this: That, params: T.MlOpenJobRequest, options?: TransportRequestOptions): Promise + async openJob (this: That, params: T.MlOpenJobRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ml.open_job'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/_open` + const meta: TransportRequestMetadata = { + name: 'ml.open_job', + pathParts: { + job_id: params.job_id + }, + acceptedParams: [ + 'job_id', + 'timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Add scheduled events to the calendar. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-post-calendar-events | Elasticsearch API documentation} + */ + async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest, options?: TransportRequestOptions): Promise + async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ml.post_calendar_events'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/_ml/calendars/${encodeURIComponent(params.calendar_id.toString())}/events` + const meta: TransportRequestMetadata = { + name: 'ml.post_calendar_events', + pathParts: { + calendar_id: params.calendar_id + }, + acceptedParams: [ + 'calendar_id', + 'events' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Send data to an anomaly detection job for analysis. IMPORTANT: For each job, data can be accepted from only a single connection at a time. It is not currently possible to post data to multiple jobs using wildcards or a comma-separated list. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-post-data | Elasticsearch API documentation} + */ + async postData (this: That, params: T.MlPostDataRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async postData (this: That, params: T.MlPostDataRequest, options?: TransportRequestOptionsWithMeta): Promise> + async postData (this: That, params: T.MlPostDataRequest, options?: TransportRequestOptions): Promise + async postData (this: That, params: T.MlPostDataRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ml.post_data'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: any = params.body ?? undefined + for (const key in params) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/_data` + const meta: TransportRequestMetadata = { + name: 'ml.post_data', + pathParts: { + job_id: params.job_id + }, + acceptedParams: [ + 'job_id', + 'data', + 'reset_end', + 'reset_start' + ] + } + return await this.transport.request({ path, method, querystring, bulkBody: body, meta }, options) + } + + /** + * Preview features used by data frame analytics. Preview the extracted features used by a data frame analytics config. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-preview-data-frame-analytics | Elasticsearch API documentation} + */ + async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise + async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ml.preview_data_frame_analytics'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + let method = '' + let path = '' + if (params.id != null) { + method = body != null ? 'POST' : 'GET' + path = `/_ml/data_frame/analytics/${encodeURIComponent(params.id.toString())}/_preview` + } else { + method = body != null ? 'POST' : 'GET' + path = '/_ml/data_frame/analytics/_preview' + } + const meta: TransportRequestMetadata = { + name: 'ml.preview_data_frame_analytics', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id', + 'config' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Preview a datafeed. This API returns the first "page" of search results from a datafeed. You can preview an existing datafeed or provide configuration details for a datafeed and anomaly detection job in the API. The preview shows the structure of the data that will be passed to the anomaly detection engine. IMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that called the API. However, when the datafeed starts it uses the roles of the last user that created or updated the datafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials. You can also use secondary authorization headers to supply the credentials. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-preview-datafeed | Elasticsearch API documentation} + */ + async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise> + async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> + async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest, options?: TransportRequestOptions): Promise> + async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ml.preview_datafeed'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + let method = '' + let path = '' + if (params.datafeed_id != null) { + method = body != null ? 'POST' : 'GET' + path = `/_ml/datafeeds/${encodeURIComponent(params.datafeed_id.toString())}/_preview` + } else { + method = body != null ? 'POST' : 'GET' + path = '/_ml/datafeeds/_preview' + } + const meta: TransportRequestMetadata = { + name: 'ml.preview_datafeed', + pathParts: { + datafeed_id: params.datafeed_id + }, + acceptedParams: [ + 'datafeed_id', + 'datafeed_config', + 'job_config', + 'start', + 'end' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create a calendar. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-calendar | Elasticsearch API documentation} + */ + async putCalendar (this: That, params: T.MlPutCalendarRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putCalendar (this: That, params: T.MlPutCalendarRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putCalendar (this: That, params: T.MlPutCalendarRequest, options?: TransportRequestOptions): Promise + async putCalendar (this: That, params: T.MlPutCalendarRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ml.put_calendar'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_ml/calendars/${encodeURIComponent(params.calendar_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'ml.put_calendar', + pathParts: { + calendar_id: params.calendar_id + }, + acceptedParams: [ + 'calendar_id', + 'job_ids', + 'description' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Add anomaly detection job to calendar. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-calendar-job | Elasticsearch API documentation} + */ + async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest, options?: TransportRequestOptions): Promise + async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ml.put_calendar_job'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_ml/calendars/${encodeURIComponent(params.calendar_id.toString())}/jobs/${encodeURIComponent(params.job_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'ml.put_calendar_job', + pathParts: { + calendar_id: params.calendar_id, + job_id: params.job_id + }, + acceptedParams: [ + 'calendar_id', + 'job_id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create a data frame analytics job. This API creates a data frame analytics job that performs an analysis on the source indices and stores the outcome in a destination index. By default, the query used in the source configuration is `{"match_all": {}}`. If the destination index does not exist, it is created automatically when you start the job. If you supply only a subset of the regression or classification parameters, hyperparameter optimization occurs. It determines a value for each of the undefined parameters. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-data-frame-analytics | Elasticsearch API documentation} + */ + async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise + async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ml.put_data_frame_analytics'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_ml/data_frame/analytics/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'ml.put_data_frame_analytics', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id', + 'allow_lazy_start', + 'analysis', + 'analyzed_fields', + 'description', + 'dest', + 'max_num_threads', + '_meta', + 'model_memory_limit', + 'source', + 'headers', + 'version' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create a datafeed. Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job. You can associate only one datafeed with each anomaly detection job. The datafeed contains a query that runs at a defined interval (`frequency`). If you are concerned about delayed data, you can add a delay (`query_delay') at each interval. By default, the datafeed uses the following query: `{"match_all": {"boost": 1}}`. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. You must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed directly to the `.ml-config` index. Do not give users `write` privileges on the `.ml-config` index. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-datafeed | Elasticsearch API documentation} + */ + async putDatafeed (this: That, params: T.MlPutDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putDatafeed (this: That, params: T.MlPutDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putDatafeed (this: That, params: T.MlPutDatafeedRequest, options?: TransportRequestOptions): Promise + async putDatafeed (this: That, params: T.MlPutDatafeedRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ml.put_datafeed'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_ml/datafeeds/${encodeURIComponent(params.datafeed_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'ml.put_datafeed', + pathParts: { + datafeed_id: params.datafeed_id + }, + acceptedParams: [ + 'datafeed_id', + 'aggregations', + 'aggs', + 'chunking_config', + 'delayed_data_check_config', + 'frequency', + 'indices', + 'indexes', + 'indices_options', + 'job_id', + 'max_empty_searches', + 'query', + 'query_delay', + 'runtime_mappings', + 'script_fields', + 'scroll_size', + 'headers', + 'allow_no_indices', + 'expand_wildcards', + 'ignore_throttled', + 'ignore_unavailable' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create a filter. A filter contains a list of strings. It can be used by one or more anomaly detection jobs. Specifically, filters are referenced in the `custom_rules` property of detector configuration objects. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-filter | Elasticsearch API documentation} + */ + async putFilter (this: That, params: T.MlPutFilterRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putFilter (this: That, params: T.MlPutFilterRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putFilter (this: That, params: T.MlPutFilterRequest, options?: TransportRequestOptions): Promise + async putFilter (this: That, params: T.MlPutFilterRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ml.put_filter'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_ml/filters/${encodeURIComponent(params.filter_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'ml.put_filter', + pathParts: { + filter_id: params.filter_id + }, + acceptedParams: [ + 'filter_id', + 'description', + 'items' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create an anomaly detection job. If you include a `datafeed_config`, you must have read index privileges on the source index. If you include a `datafeed_config` but do not provide a query, the datafeed uses `{"match_all": {"boost": 1}}`. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-job | Elasticsearch API documentation} + */ + async putJob (this: That, params: T.MlPutJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putJob (this: That, params: T.MlPutJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putJob (this: That, params: T.MlPutJobRequest, options?: TransportRequestOptions): Promise + async putJob (this: That, params: T.MlPutJobRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ml.put_job'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'ml.put_job', + pathParts: { + job_id: params.job_id + }, + acceptedParams: [ + 'job_id', + 'allow_lazy_open', + 'analysis_config', + 'analysis_limits', + 'background_persist_interval', + 'custom_settings', + 'daily_model_snapshot_retention_after_days', + 'data_description', + 'datafeed_config', + 'description', + 'job_id', + 'groups', + 'model_plot_config', + 'model_snapshot_retention_days', + 'renormalization_window_days', + 'results_index_name', + 'results_retention_days', + 'allow_no_indices', + 'expand_wildcards', + 'ignore_throttled', + 'ignore_unavailable' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create a trained model. Enable you to supply a trained model that is not created by data frame analytics. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model | Elasticsearch API documentation} + */ + async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest, options?: TransportRequestOptions): Promise + async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ml.put_trained_model'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_ml/trained_models/${encodeURIComponent(params.model_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'ml.put_trained_model', + pathParts: { + model_id: params.model_id + }, + acceptedParams: [ + 'model_id', + 'compressed_definition', + 'definition', + 'description', + 'inference_config', + 'input', + 'metadata', + 'model_type', + 'model_size_bytes', + 'platform_architecture', + 'tags', + 'prefix_strings', + 'defer_definition_decompression', + 'wait_for_completion' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create or update a trained model alias. A trained model alias is a logical name used to reference a single trained model. You can use aliases instead of trained model identifiers to make it easier to reference your models. For example, you can use aliases in inference aggregations and processors. An alias must be unique and refer to only a single trained model. However, you can have multiple aliases for each trained model. If you use this API to update an alias such that it references a different trained model ID and the model uses a different type of data frame analytics, an error occurs. For example, this situation occurs if you have a trained model for regression analysis and a trained model for classification analysis; you cannot reassign an alias from one type of trained model to another. If you use this API to update an alias and there are very few input fields in common between the old and new trained models for the model alias, the API returns a warning. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-alias | Elasticsearch API documentation} + */ + async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest, options?: TransportRequestOptions): Promise + async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ml.put_trained_model_alias'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_ml/trained_models/${encodeURIComponent(params.model_id.toString())}/model_aliases/${encodeURIComponent(params.model_alias.toString())}` + const meta: TransportRequestMetadata = { + name: 'ml.put_trained_model_alias', + pathParts: { + model_alias: params.model_alias, + model_id: params.model_id + }, + acceptedParams: [ + 'model_alias', + 'model_id', + 'reassign' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create part of a trained model definition. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-definition-part | Elasticsearch API documentation} + */ + async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptions): Promise + async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ml.put_trained_model_definition_part'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_ml/trained_models/${encodeURIComponent(params.model_id.toString())}/definition/${encodeURIComponent(params.part.toString())}` + const meta: TransportRequestMetadata = { + name: 'ml.put_trained_model_definition_part', + pathParts: { + model_id: params.model_id, + part: params.part + }, + acceptedParams: [ + 'model_id', + 'part', + 'definition', + 'total_definition_length', + 'total_parts' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create a trained model vocabulary. This API is supported only for natural language processing (NLP) models. The vocabulary is stored in the index as described in `inference_config.*.vocabulary` of the trained model definition. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-vocabulary | Elasticsearch API documentation} + */ + async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptions): Promise + async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ml.put_trained_model_vocabulary'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_ml/trained_models/${encodeURIComponent(params.model_id.toString())}/vocabulary` + const meta: TransportRequestMetadata = { + name: 'ml.put_trained_model_vocabulary', + pathParts: { + model_id: params.model_id + }, + acceptedParams: [ + 'model_id', + 'vocabulary', + 'merges', + 'scores' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Reset an anomaly detection job. All model state and results are deleted. The job is ready to start over as if it had just been created. It is not currently possible to reset multiple jobs using wildcards or a comma separated list. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-reset-job | Elasticsearch API documentation} + */ + async resetJob (this: That, params: T.MlResetJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async resetJob (this: That, params: T.MlResetJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async resetJob (this: That, params: T.MlResetJobRequest, options?: TransportRequestOptions): Promise + async resetJob (this: That, params: T.MlResetJobRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ml.reset_job'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/_reset` + const meta: TransportRequestMetadata = { + name: 'ml.reset_job', + pathParts: { + job_id: params.job_id + }, + acceptedParams: [ + 'job_id', + 'wait_for_completion', + 'delete_user_annotations' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Revert to a snapshot. The machine learning features react quickly to anomalous input, learning new behaviors in data. Highly anomalous input increases the variance in the models whilst the system learns whether this is a new step-change in behavior or a one-off event. In the case where this anomalous input is known to be a one-off, then it might be appropriate to reset the model state to a time before this event. For example, you might consider reverting to a saved snapshot after Black Friday or a critical system failure. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-revert-model-snapshot | Elasticsearch API documentation} + */ + async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> + async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest, options?: TransportRequestOptions): Promise + async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ml.revert_model_snapshot'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/model_snapshots/${encodeURIComponent(params.snapshot_id.toString())}/_revert` + const meta: TransportRequestMetadata = { + name: 'ml.revert_model_snapshot', + pathParts: { + job_id: params.job_id, + snapshot_id: params.snapshot_id + }, + acceptedParams: [ + 'job_id', + 'snapshot_id', + 'delete_intervening_results', + 'delete_intervening_results' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Set upgrade_mode for ML indices. Sets a cluster wide upgrade_mode setting that prepares machine learning indices for an upgrade. When upgrading your cluster, in some circumstances you must restart your nodes and reindex your machine learning indices. In those circumstances, there must be no machine learning jobs running. You can close the machine learning jobs, do the upgrade, then open all the jobs again. Alternatively, you can use this API to temporarily halt tasks associated with the jobs and datafeeds and prevent new jobs from opening. You can also use this API during upgrades that do not require you to reindex your machine learning indices, though stopping jobs is not a requirement in that case. You can see the current value for the upgrade_mode setting by using the get machine learning info API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-set-upgrade-mode | Elasticsearch API documentation} + */ + async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest, options?: TransportRequestOptions): Promise + async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ml.set_upgrade_mode'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_ml/set_upgrade_mode' + const meta: TransportRequestMetadata = { + name: 'ml.set_upgrade_mode', + acceptedParams: [ + 'enabled', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Start a data frame analytics job. A data frame analytics job can be started and stopped multiple times throughout its lifecycle. If the destination index does not exist, it is created automatically the first time you start the data frame analytics job. The `index.number_of_shards` and `index.number_of_replicas` settings for the destination index are copied from the source index. If there are multiple source indices, the destination index copies the highest setting values. The mappings for the destination index are also copied from the source indices. If there are any mapping conflicts, the job fails to start. If the destination index exists, it is used as is. You can therefore set up the destination index in advance with custom settings and mappings. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-data-frame-analytics | Elasticsearch API documentation} + */ + async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise + async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ml.start_data_frame_analytics'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/_ml/data_frame/analytics/${encodeURIComponent(params.id.toString())}/_start` + const meta: TransportRequestMetadata = { + name: 'ml.start_data_frame_analytics', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Start datafeeds. A datafeed must be started in order to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle. Before you can start a datafeed, the anomaly detection job must be open. Otherwise, an error occurs. If you restart a stopped datafeed, it continues processing input data from the next millisecond after it was stopped. If new data was indexed for that exact millisecond between stopping and starting, it will be ignored. When Elasticsearch security features are enabled, your datafeed remembers which roles the last user to create or update it had at the time of creation or update and runs the query using those same roles. If you provided secondary authorization headers when you created or updated the datafeed, those credentials are used instead. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-datafeed | Elasticsearch API documentation} + */ + async startDatafeed (this: That, params: T.MlStartDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async startDatafeed (this: That, params: T.MlStartDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> + async startDatafeed (this: That, params: T.MlStartDatafeedRequest, options?: TransportRequestOptions): Promise + async startDatafeed (this: That, params: T.MlStartDatafeedRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ml.start_datafeed'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/_ml/datafeeds/${encodeURIComponent(params.datafeed_id.toString())}/_start` + const meta: TransportRequestMetadata = { + name: 'ml.start_datafeed', + pathParts: { + datafeed_id: params.datafeed_id + }, + acceptedParams: [ + 'datafeed_id', + 'end', + 'start', + 'timeout', + 'end', + 'start', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Start a trained model deployment. It allocates the model to every machine learning node. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-trained-model-deployment | Elasticsearch API documentation} + */ + async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithMeta): Promise> + async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise + async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ml.start_trained_model_deployment'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/_ml/trained_models/${encodeURIComponent(params.model_id.toString())}/deployment/_start` + const meta: TransportRequestMetadata = { + name: 'ml.start_trained_model_deployment', + pathParts: { + model_id: params.model_id + }, + acceptedParams: [ + 'model_id', + 'adaptive_allocations', + 'cache_size', + 'deployment_id', + 'number_of_allocations', + 'priority', + 'queue_capacity', + 'threads_per_allocation', + 'timeout', + 'wait_for' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Stop data frame analytics jobs. A data frame analytics job can be started and stopped multiple times throughout its lifecycle. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-data-frame-analytics | Elasticsearch API documentation} + */ + async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise + async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ml.stop_data_frame_analytics'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/_ml/data_frame/analytics/${encodeURIComponent(params.id.toString())}/_stop` + const meta: TransportRequestMetadata = { + name: 'ml.stop_data_frame_analytics', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id', + 'allow_no_match', + 'force', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Stop datafeeds. A datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-datafeed | Elasticsearch API documentation} + */ + async stopDatafeed (this: That, params: T.MlStopDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stopDatafeed (this: That, params: T.MlStopDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stopDatafeed (this: That, params: T.MlStopDatafeedRequest, options?: TransportRequestOptions): Promise + async stopDatafeed (this: That, params: T.MlStopDatafeedRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ml.stop_datafeed'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/_ml/datafeeds/${encodeURIComponent(params.datafeed_id.toString())}/_stop` + const meta: TransportRequestMetadata = { + name: 'ml.stop_datafeed', + pathParts: { + datafeed_id: params.datafeed_id + }, + acceptedParams: [ + 'datafeed_id', + 'allow_no_match', + 'force', + 'timeout', + 'allow_no_match', + 'force', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Stop a trained model deployment. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-trained-model-deployment | Elasticsearch API documentation} + */ + async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise + async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ml.stop_trained_model_deployment'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/_ml/trained_models/${encodeURIComponent(params.model_id.toString())}/deployment/_stop` + const meta: TransportRequestMetadata = { + name: 'ml.stop_trained_model_deployment', + pathParts: { + model_id: params.model_id + }, + acceptedParams: [ + 'model_id', + 'allow_no_match', + 'force' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Update a data frame analytics job. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-data-frame-analytics | Elasticsearch API documentation} + */ + async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise + async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ml.update_data_frame_analytics'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/_ml/data_frame/analytics/${encodeURIComponent(params.id.toString())}/_update` + const meta: TransportRequestMetadata = { + name: 'ml.update_data_frame_analytics', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id', + 'description', + 'model_memory_limit', + 'max_num_threads', + 'allow_lazy_start' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Update a datafeed. You must stop and start the datafeed for the changes to be applied. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who updated it had at the time of the update and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-datafeed | Elasticsearch API documentation} + */ + async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest, options?: TransportRequestOptions): Promise + async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ml.update_datafeed'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/_ml/datafeeds/${encodeURIComponent(params.datafeed_id.toString())}/_update` + const meta: TransportRequestMetadata = { + name: 'ml.update_datafeed', + pathParts: { + datafeed_id: params.datafeed_id + }, + acceptedParams: [ + 'datafeed_id', + 'aggregations', + 'chunking_config', + 'delayed_data_check_config', + 'frequency', + 'indices', + 'indexes', + 'indices_options', + 'job_id', + 'max_empty_searches', + 'query', + 'query_delay', + 'runtime_mappings', + 'script_fields', + 'scroll_size', + 'allow_no_indices', + 'expand_wildcards', + 'ignore_throttled', + 'ignore_unavailable' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Update a filter. Updates the description of a filter, adds items, or removes items from the list. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-filter | Elasticsearch API documentation} + */ + async updateFilter (this: That, params: T.MlUpdateFilterRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateFilter (this: That, params: T.MlUpdateFilterRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateFilter (this: That, params: T.MlUpdateFilterRequest, options?: TransportRequestOptions): Promise + async updateFilter (this: That, params: T.MlUpdateFilterRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ml.update_filter'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/_ml/filters/${encodeURIComponent(params.filter_id.toString())}/_update` + const meta: TransportRequestMetadata = { + name: 'ml.update_filter', + pathParts: { + filter_id: params.filter_id + }, + acceptedParams: [ + 'filter_id', + 'add_items', + 'description', + 'remove_items' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Update an anomaly detection job. Updates certain properties of an anomaly detection job. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-job | Elasticsearch API documentation} + */ + async updateJob (this: That, params: T.MlUpdateJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateJob (this: That, params: T.MlUpdateJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateJob (this: That, params: T.MlUpdateJobRequest, options?: TransportRequestOptions): Promise + async updateJob (this: That, params: T.MlUpdateJobRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ml.update_job'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/_update` + const meta: TransportRequestMetadata = { + name: 'ml.update_job', + pathParts: { + job_id: params.job_id + }, + acceptedParams: [ + 'job_id', + 'allow_lazy_open', + 'analysis_limits', + 'background_persist_interval', + 'custom_settings', + 'categorization_filters', + 'description', + 'model_plot_config', + 'model_prune_window', + 'daily_model_snapshot_retention_after_days', + 'model_snapshot_retention_days', + 'renormalization_window_days', + 'results_retention_days', + 'groups', + 'detectors', + 'per_partition_categorization' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Update a snapshot. Updates certain properties of a snapshot. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-model-snapshot | Elasticsearch API documentation} + */ + async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest, options?: TransportRequestOptions): Promise + async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ml.update_model_snapshot'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/model_snapshots/${encodeURIComponent(params.snapshot_id.toString())}/_update` + const meta: TransportRequestMetadata = { + name: 'ml.update_model_snapshot', + pathParts: { + job_id: params.job_id, + snapshot_id: params.snapshot_id + }, + acceptedParams: [ + 'job_id', + 'snapshot_id', + 'description', + 'retain' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Update a trained model deployment. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-trained-model-deployment | Elasticsearch API documentation} + */ + async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise + async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ml.update_trained_model_deployment'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/_ml/trained_models/${encodeURIComponent(params.model_id.toString())}/deployment/_update` + const meta: TransportRequestMetadata = { + name: 'ml.update_trained_model_deployment', + pathParts: { + model_id: params.model_id + }, + acceptedParams: [ + 'model_id', + 'number_of_allocations', + 'adaptive_allocations', + 'number_of_allocations' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Upgrade a snapshot. Upgrade an anomaly detection model snapshot to the latest major version. Over time, older snapshot formats are deprecated and removed. Anomaly detection jobs support only snapshots that are from the current or previous major version. This API provides a means to upgrade a snapshot to the current major version. This aids in preparing the cluster for an upgrade to the next major version. Only one snapshot per anomaly detection job can be upgraded at a time and the upgraded snapshot cannot be the current snapshot of the anomaly detection job. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-upgrade-job-snapshot | Elasticsearch API documentation} + */ + async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> + async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptions): Promise + async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ml.upgrade_job_snapshot'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/_ml/anomaly_detectors/${encodeURIComponent(params.job_id.toString())}/model_snapshots/${encodeURIComponent(params.snapshot_id.toString())}/_upgrade` + const meta: TransportRequestMetadata = { + name: 'ml.upgrade_job_snapshot', + pathParts: { + job_id: params.job_id, + snapshot_id: params.snapshot_id + }, + acceptedParams: [ + 'job_id', + 'snapshot_id', + 'wait_for_completion', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Validate an anomaly detection job. + * @see {@link https://www.elastic.co/guide/en/machine-learning/master/ml-jobs.html | Elasticsearch API documentation} + */ + async validate (this: That, params?: T.MlValidateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async validate (this: That, params?: T.MlValidateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async validate (this: That, params?: T.MlValidateRequest, options?: TransportRequestOptions): Promise + async validate (this: That, params?: T.MlValidateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ml.validate'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = '/_ml/anomaly_detectors/_validate' + const meta: TransportRequestMetadata = { + name: 'ml.validate', + acceptedParams: [ + 'job_id', + 'analysis_config', + 'analysis_limits', + 'data_description', + 'description', + 'model_plot', + 'model_snapshot_id', + 'model_snapshot_retention_days', + 'results_index_name' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Validate an anomaly detection job. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch | Elasticsearch API documentation} + */ + async validateDetector (this: That, params: T.MlValidateDetectorRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async validateDetector (this: That, params: T.MlValidateDetectorRequest, options?: TransportRequestOptionsWithMeta): Promise> + async validateDetector (this: That, params: T.MlValidateDetectorRequest, options?: TransportRequestOptions): Promise + async validateDetector (this: That, params: T.MlValidateDetectorRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['ml.validate_detector'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: any = params.body ?? undefined + for (const key in params) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = '/_ml/anomaly_detectors/_validate/detector' + const meta: TransportRequestMetadata = { + name: 'ml.validate_detector', + acceptedParams: [ + 'detector' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/api/monitoring.ts b/src/api/api/monitoring.ts new file mode 100644 index 000000000..f8abf74f6 --- /dev/null +++ b/src/api/api/monitoring.ts @@ -0,0 +1,108 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import { kAcceptedParams } from '../../client' + +interface That { + transport: Transport + [kAcceptedParams]: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +export default class Monitoring { + transport: Transport + [kAcceptedParams]: Record + constructor (transport: Transport) { + this.transport = transport + this[kAcceptedParams] = { + 'monitoring.bulk': { + path: [ + 'type' + ], + body: [ + 'operations' + ], + query: [ + 'system_id', + 'system_api_version', + 'interval' + ] + } + } + } + + /** + * Send monitoring data. This API is used by the monitoring features to send monitoring data. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch | Elasticsearch API documentation} + */ + async bulk (this: That, params: T.MonitoringBulkRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async bulk (this: That, params: T.MonitoringBulkRequest, options?: TransportRequestOptionsWithMeta): Promise> + async bulk (this: That, params: T.MonitoringBulkRequest, options?: TransportRequestOptions): Promise + async bulk (this: That, params: T.MonitoringBulkRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['monitoring.bulk'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: any = params.body ?? undefined + for (const key in params) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = '/_monitoring/bulk' + const meta: TransportRequestMetadata = { + name: 'monitoring.bulk', + pathParts: { + type: params.type + }, + acceptedParams: [ + 'type', + 'operations', + 'system_id', + 'system_api_version', + 'interval' + ] + } + return await this.transport.request({ path, method, querystring, bulkBody: body, meta }, options) + } +} diff --git a/src/api/api/msearch.ts b/src/api/api/msearch.ts new file mode 100644 index 000000000..d508b62b5 --- /dev/null +++ b/src/api/api/msearch.ts @@ -0,0 +1,130 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + msearch: { + path: [ + 'index' + ], + body: [ + 'searches' + ], + query: [ + 'allow_no_indices', + 'ccs_minimize_roundtrips', + 'expand_wildcards', + 'ignore_throttled', + 'ignore_unavailable', + 'include_named_queries_score', + 'index', + 'max_concurrent_searches', + 'max_concurrent_shard_requests', + 'pre_filter_shard_size', + 'project_routing', + 'rest_total_hits_as_int', + 'routing', + 'search_type', + 'typed_keys' + ] + } +} + +/** + * Run multiple searches. The format of the request is similar to the bulk API format and makes use of the newline delimited JSON (NDJSON) format. The structure is as follows: ``` header\n body\n header\n body\n ``` This structure is specifically optimized to reduce parsing if a specific search ends up redirected to another node. IMPORTANT: The final line of data must end with a newline character `\n`. Each newline character may be preceded by a carriage return `\r`. When sending requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch | Elasticsearch API documentation} + */ +export default async function MsearchApi> (this: That, params: T.MsearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function MsearchApi> (this: That, params: T.MsearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function MsearchApi> (this: That, params: T.MsearchRequest, options?: TransportRequestOptions): Promise> +export default async function MsearchApi> (this: That, params: T.MsearchRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.msearch + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: any = params.body ?? undefined + for (const key in params) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = body != null ? 'POST' : 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_msearch` + } else { + method = body != null ? 'POST' : 'GET' + path = '/_msearch' + } + const meta: TransportRequestMetadata = { + name: 'msearch', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'searches', + 'allow_no_indices', + 'ccs_minimize_roundtrips', + 'expand_wildcards', + 'ignore_throttled', + 'ignore_unavailable', + 'include_named_queries_score', + 'index', + 'max_concurrent_searches', + 'max_concurrent_shard_requests', + 'pre_filter_shard_size', + 'project_routing', + 'rest_total_hits_as_int', + 'routing', + 'search_type', + 'typed_keys' + ] + } + return await this.transport.request({ path, method, querystring, bulkBody: body, meta }, options) +} diff --git a/src/api/api/msearch_template.ts b/src/api/api/msearch_template.ts new file mode 100644 index 000000000..6f6922aba --- /dev/null +++ b/src/api/api/msearch_template.ts @@ -0,0 +1,112 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + msearch_template: { + path: [ + 'index' + ], + body: [ + 'search_templates' + ], + query: [ + 'ccs_minimize_roundtrips', + 'max_concurrent_searches', + 'project_routing', + 'search_type', + 'rest_total_hits_as_int', + 'typed_keys' + ] + } +} + +/** + * Run multiple templated searches. Run multiple templated searches with a single request. If you are providing a text file or text input to `curl`, use the `--data-binary` flag instead of `-d` to preserve newlines. For example: ``` $ cat requests { "index": "my-index" } { "id": "my-search-template", "params": { "query_string": "hello world", "from": 0, "size": 10 }} { "index": "my-other-index" } { "id": "my-other-search-template", "params": { "query_type": "match_all" }} $ curl -H "Content-Type: application/x-ndjson" -XGET localhost:9200/_msearch/template --data-binary "@requests"; echo ``` + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch-template | Elasticsearch API documentation} + */ +export default async function MsearchTemplateApi> (this: That, params: T.MsearchTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function MsearchTemplateApi> (this: That, params: T.MsearchTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function MsearchTemplateApi> (this: That, params: T.MsearchTemplateRequest, options?: TransportRequestOptions): Promise> +export default async function MsearchTemplateApi> (this: That, params: T.MsearchTemplateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.msearch_template + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: any = params.body ?? undefined + for (const key in params) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = body != null ? 'POST' : 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_msearch/template` + } else { + method = body != null ? 'POST' : 'GET' + path = '/_msearch/template' + } + const meta: TransportRequestMetadata = { + name: 'msearch_template', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'search_templates', + 'ccs_minimize_roundtrips', + 'max_concurrent_searches', + 'project_routing', + 'search_type', + 'rest_total_hits_as_int', + 'typed_keys' + ] + } + return await this.transport.request({ path, method, querystring, bulkBody: body, meta }, options) +} diff --git a/src/api/api/mtermvectors.ts b/src/api/api/mtermvectors.ts new file mode 100644 index 000000000..ce7e2f843 --- /dev/null +++ b/src/api/api/mtermvectors.ts @@ -0,0 +1,137 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + mtermvectors: { + path: [ + 'index' + ], + body: [ + 'docs', + 'ids' + ], + query: [ + 'ids', + 'fields', + 'field_statistics', + 'offsets', + 'payloads', + 'positions', + 'preference', + 'realtime', + 'routing', + 'term_statistics', + 'version', + 'version_type' + ] + } +} + +/** + * Get multiple term vectors. Get multiple term vectors with a single request. You can specify existing documents by index and ID or provide artificial documents in the body of the request. You can specify the index in the request body or request URI. The response contains a `docs` array with all the fetched termvectors. Each element has the structure provided by the termvectors API. **Artificial documents** You can also use `mtermvectors` to generate term vectors for artificial documents provided in the body of the request. The mapping used is determined by the specified `_index`. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mtermvectors | Elasticsearch API documentation} + */ +export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest, options?: TransportRequestOptions): Promise +export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.mtermvectors + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = body != null ? 'POST' : 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_mtermvectors` + } else { + method = body != null ? 'POST' : 'GET' + path = '/_mtermvectors' + } + const meta: TransportRequestMetadata = { + name: 'mtermvectors', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'docs', + 'ids', + 'ids', + 'fields', + 'field_statistics', + 'offsets', + 'payloads', + 'positions', + 'preference', + 'realtime', + 'routing', + 'term_statistics', + 'version', + 'version_type' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) +} diff --git a/src/api/api/nodes.ts b/src/api/api/nodes.ts new file mode 100644 index 000000000..e285131b7 --- /dev/null +++ b/src/api/api/nodes.ts @@ -0,0 +1,565 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import { kAcceptedParams } from '../../client' + +interface That { + transport: Transport + [kAcceptedParams]: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +export default class Nodes { + transport: Transport + [kAcceptedParams]: Record + constructor (transport: Transport) { + this.transport = transport + this[kAcceptedParams] = { + 'nodes.clear_repositories_metering_archive': { + path: [ + 'node_id', + 'max_archive_version' + ], + body: [], + query: [] + }, + 'nodes.get_repositories_metering_info': { + path: [ + 'node_id' + ], + body: [], + query: [] + }, + 'nodes.hot_threads': { + path: [ + 'node_id' + ], + body: [], + query: [ + 'ignore_idle_threads', + 'interval', + 'snapshots', + 'threads', + 'timeout', + 'type', + 'sort' + ] + }, + 'nodes.info': { + path: [ + 'node_id', + 'metric' + ], + body: [], + query: [ + 'flat_settings', + 'timeout' + ] + }, + 'nodes.reload_secure_settings': { + path: [ + 'node_id' + ], + body: [ + 'secure_settings_password' + ], + query: [ + 'timeout' + ] + }, + 'nodes.stats': { + path: [ + 'node_id', + 'metric', + 'index_metric' + ], + body: [], + query: [ + 'completion_fields', + 'fielddata_fields', + 'fields', + 'groups', + 'include_segment_file_sizes', + 'level', + 'timeout', + 'types', + 'include_unloaded_segments' + ] + }, + 'nodes.usage': { + path: [ + 'node_id', + 'metric' + ], + body: [], + query: [ + 'timeout' + ] + } + } + } + + /** + * Clear the archived repositories metering. Clear the archived repositories metering information in the cluster. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-clear-repositories-metering-archive | Elasticsearch API documentation} + */ + async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptionsWithMeta): Promise> + async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptions): Promise + async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['nodes.clear_repositories_metering_archive'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_nodes/${encodeURIComponent(params.node_id.toString())}/_repositories_metering/${encodeURIComponent(params.max_archive_version.toString())}` + const meta: TransportRequestMetadata = { + name: 'nodes.clear_repositories_metering_archive', + pathParts: { + node_id: params.node_id, + max_archive_version: params.max_archive_version + }, + acceptedParams: [ + 'node_id', + 'max_archive_version' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get cluster repositories metering. Get repositories metering information for a cluster. This API exposes monotonically non-decreasing counters and it is expected that clients would durably store the information needed to compute aggregations over a period of time. Additionally, the information exposed by this API is volatile, meaning that it will not be present after node restarts. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-get-repositories-metering-info | Elasticsearch API documentation} + */ + async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptions): Promise + async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['nodes.get_repositories_metering_info'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_nodes/${encodeURIComponent(params.node_id.toString())}/_repositories_metering` + const meta: TransportRequestMetadata = { + name: 'nodes.get_repositories_metering_info', + pathParts: { + node_id: params.node_id + }, + acceptedParams: [ + 'node_id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get the hot threads for nodes. Get a breakdown of the hot threads on each selected node in the cluster. The output is plain text with a breakdown of the top hot threads for each node. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-hot-threads | Elasticsearch API documentation} + */ + async hotThreads (this: That, params?: T.NodesHotThreadsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async hotThreads (this: That, params?: T.NodesHotThreadsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async hotThreads (this: That, params?: T.NodesHotThreadsRequest, options?: TransportRequestOptions): Promise + async hotThreads (this: That, params?: T.NodesHotThreadsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['nodes.hot_threads'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.node_id != null) { + method = 'GET' + path = `/_nodes/${encodeURIComponent(params.node_id.toString())}/hot_threads` + } else { + method = 'GET' + path = '/_nodes/hot_threads' + } + const meta: TransportRequestMetadata = { + name: 'nodes.hot_threads', + pathParts: { + node_id: params.node_id + }, + acceptedParams: [ + 'node_id', + 'ignore_idle_threads', + 'interval', + 'snapshots', + 'threads', + 'timeout', + 'type', + 'sort' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get node information. By default, the API returns all attributes and core settings for cluster nodes. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-info | Elasticsearch API documentation} + */ + async info (this: That, params?: T.NodesInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async info (this: That, params?: T.NodesInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> + async info (this: That, params?: T.NodesInfoRequest, options?: TransportRequestOptions): Promise + async info (this: That, params?: T.NodesInfoRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['nodes.info'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.node_id != null && params.metric != null) { + method = 'GET' + path = `/_nodes/${encodeURIComponent(params.node_id.toString())}/${encodeURIComponent(params.metric.toString())}` + } else if (params.node_id != null) { + method = 'GET' + path = `/_nodes/${encodeURIComponent(params.node_id.toString())}` + } else if (params.metric != null) { + method = 'GET' + path = `/_nodes/${encodeURIComponent(params.metric.toString())}` + } else { + method = 'GET' + path = '/_nodes' + } + const meta: TransportRequestMetadata = { + name: 'nodes.info', + pathParts: { + node_id: params.node_id, + metric: params.metric + }, + acceptedParams: [ + 'node_id', + 'metric', + 'flat_settings', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Reload the keystore on nodes in the cluster. Secure settings are stored in an on-disk keystore. Certain of these settings are reloadable. That is, you can change them on disk and reload them without restarting any nodes in the cluster. When you have updated reloadable secure settings in your keystore, you can use this API to reload those settings on each node. When the Elasticsearch keystore is password protected and not simply obfuscated, you must provide the password for the keystore when you reload the secure settings. Reloading the settings for the whole cluster assumes that the keystores for all nodes are protected with the same password; this method is allowed only when inter-node communications are encrypted. Alternatively, you can reload the secure settings on each node by locally accessing the API and passing the node-specific Elasticsearch keystore password. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-reload-secure-settings | Elasticsearch API documentation} + */ + async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest, options?: TransportRequestOptions): Promise + async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['nodes.reload_secure_settings'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + let method = '' + let path = '' + if (params.node_id != null) { + method = 'POST' + path = `/_nodes/${encodeURIComponent(params.node_id.toString())}/reload_secure_settings` + } else { + method = 'POST' + path = '/_nodes/reload_secure_settings' + } + const meta: TransportRequestMetadata = { + name: 'nodes.reload_secure_settings', + pathParts: { + node_id: params.node_id + }, + acceptedParams: [ + 'node_id', + 'secure_settings_password', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get node statistics. Get statistics for nodes in a cluster. By default, all stats are returned. You can limit the returned information by using metrics. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-stats | Elasticsearch API documentation} + */ + async stats (this: That, params?: T.NodesStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stats (this: That, params?: T.NodesStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stats (this: That, params?: T.NodesStatsRequest, options?: TransportRequestOptions): Promise + async stats (this: That, params?: T.NodesStatsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['nodes.stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.node_id != null && params.metric != null && params.index_metric != null) { + method = 'GET' + path = `/_nodes/${encodeURIComponent(params.node_id.toString())}/stats/${encodeURIComponent(params.metric.toString())}/${encodeURIComponent(params.index_metric.toString())}` + } else if (params.node_id != null && params.metric != null) { + method = 'GET' + path = `/_nodes/${encodeURIComponent(params.node_id.toString())}/stats/${encodeURIComponent(params.metric.toString())}` + } else if (params.metric != null && params.index_metric != null) { + method = 'GET' + path = `/_nodes/stats/${encodeURIComponent(params.metric.toString())}/${encodeURIComponent(params.index_metric.toString())}` + } else if (params.node_id != null) { + method = 'GET' + path = `/_nodes/${encodeURIComponent(params.node_id.toString())}/stats` + } else if (params.metric != null) { + method = 'GET' + path = `/_nodes/stats/${encodeURIComponent(params.metric.toString())}` + } else { + method = 'GET' + path = '/_nodes/stats' + } + const meta: TransportRequestMetadata = { + name: 'nodes.stats', + pathParts: { + node_id: params.node_id, + metric: params.metric, + index_metric: params.index_metric + }, + acceptedParams: [ + 'node_id', + 'metric', + 'index_metric', + 'completion_fields', + 'fielddata_fields', + 'fields', + 'groups', + 'include_segment_file_sizes', + 'level', + 'timeout', + 'types', + 'include_unloaded_segments' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get feature usage information. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-usage | Elasticsearch API documentation} + */ + async usage (this: That, params?: T.NodesUsageRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async usage (this: That, params?: T.NodesUsageRequest, options?: TransportRequestOptionsWithMeta): Promise> + async usage (this: That, params?: T.NodesUsageRequest, options?: TransportRequestOptions): Promise + async usage (this: That, params?: T.NodesUsageRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['nodes.usage'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.node_id != null && params.metric != null) { + method = 'GET' + path = `/_nodes/${encodeURIComponent(params.node_id.toString())}/usage/${encodeURIComponent(params.metric.toString())}` + } else if (params.node_id != null) { + method = 'GET' + path = `/_nodes/${encodeURIComponent(params.node_id.toString())}/usage` + } else if (params.metric != null) { + method = 'GET' + path = `/_nodes/usage/${encodeURIComponent(params.metric.toString())}` + } else { + method = 'GET' + path = '/_nodes/usage' + } + const meta: TransportRequestMetadata = { + name: 'nodes.usage', + pathParts: { + node_id: params.node_id, + metric: params.metric + }, + acceptedParams: [ + 'node_id', + 'metric', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/api/open_point_in_time.ts b/src/api/api/open_point_in_time.ts new file mode 100644 index 000000000..1a863b1e6 --- /dev/null +++ b/src/api/api/open_point_in_time.ts @@ -0,0 +1,119 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + open_point_in_time: { + path: [ + 'index' + ], + body: [ + 'index_filter' + ], + query: [ + 'keep_alive', + 'ignore_unavailable', + 'preference', + 'project_routing', + 'routing', + 'expand_wildcards', + 'allow_partial_search_results', + 'max_concurrent_shard_requests' + ] + } +} + +/** + * Open a point in time. A search request by default runs against the most recent visible data of the target indices, which is called point in time. Elasticsearch pit (point in time) is a lightweight view into the state of the data as it existed when initiated. In some cases, it’s preferred to perform multiple search requests using the same point in time. For example, if refreshes happen between `search_after` requests, then the results of those requests might not be consistent as changes happening between searches are only visible to the more recent point in time. A point in time must be opened explicitly before being used in search requests. A subsequent search request with the `pit` parameter must not specify `index`, `routing`, or `preference` values as these parameters are copied from the point in time. Just like regular searches, you can use `from` and `size` to page through point in time search results, up to the first 10,000 hits. If you want to retrieve more hits, use PIT with `search_after`. IMPORTANT: The open point in time request and each subsequent search request can return different identifiers; always use the most recently received ID for the next search request. When a PIT that contains shard failures is used in a search request, the missing are always reported in the search response as a `NoShardAvailableActionException` exception. To get rid of these exceptions, a new PIT needs to be created so that shards missing from the previous PIT can be handled, assuming they become available in the meantime. **Keeping point in time alive** The `keep_alive` parameter, which is passed to a open point in time request and search request, extends the time to live of the corresponding point in time. The value does not need to be long enough to process all data — it just needs to be long enough for the next request. Normally, the background merge process optimizes the index by merging together smaller segments to create new, bigger segments. Once the smaller segments are no longer needed they are deleted. However, open point-in-times prevent the old segments from being deleted since they are still in use. TIP: Keeping older segments alive means that more disk space and file handles are needed. Ensure that you have configured your nodes to have ample free file handles. Additionally, if a segment contains deleted or updated documents then the point in time must keep track of whether each document in the segment was live at the time of the initial search request. Ensure that your nodes have sufficient heap space if you have many open point-in-times on an index that is subject to ongoing deletes or updates. Note that a point-in-time doesn't prevent its associated indices from being deleted. You can check how many point-in-times (that is, search contexts) are open with the nodes stats API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-open-point-in-time | Elasticsearch API documentation} + */ +export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest, options?: TransportRequestOptions): Promise +export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.open_point_in_time + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/${encodeURIComponent(params.index.toString())}/_pit` + const meta: TransportRequestMetadata = { + name: 'open_point_in_time', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'index_filter', + 'keep_alive', + 'ignore_unavailable', + 'preference', + 'project_routing', + 'routing', + 'expand_wildcards', + 'allow_partial_search_results', + 'max_concurrent_shard_requests' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) +} diff --git a/src/api/api/ping.ts b/src/api/api/ping.ts new file mode 100644 index 000000000..5acab70fb --- /dev/null +++ b/src/api/api/ping.ts @@ -0,0 +1,80 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + ping: { + path: [], + body: [], + query: [] + } +} + +/** + * Ping the cluster. Get information about whether the cluster is running. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-cluster | Elasticsearch API documentation} + */ +export default async function PingApi (this: That, params?: T.PingRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function PingApi (this: That, params?: T.PingRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function PingApi (this: That, params?: T.PingRequest, options?: TransportRequestOptions): Promise +export default async function PingApi (this: That, params?: T.PingRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = acceptedParams.ping + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'HEAD' + const path = '/' + const meta: TransportRequestMetadata = { + name: 'ping', + acceptedParams: [ + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) +} diff --git a/src/api/api/profiling.ts b/src/api/api/profiling.ts new file mode 100644 index 000000000..aa118c5d4 --- /dev/null +++ b/src/api/api/profiling.ts @@ -0,0 +1,235 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import { kAcceptedParams } from '../../client' + +interface That { + transport: Transport + [kAcceptedParams]: Record +} + +export default class Profiling { + transport: Transport + [kAcceptedParams]: Record + constructor (transport: Transport) { + this.transport = transport + this[kAcceptedParams] = { + 'profiling.flamegraph': { + path: [], + body: [], + query: [] + }, + 'profiling.stacktraces': { + path: [], + body: [], + query: [] + }, + 'profiling.status': { + path: [], + body: [], + query: [] + }, + 'profiling.topn_functions': { + path: [], + body: [], + query: [] + } + } + } + + /** + * Extracts a UI-optimized structure to render flamegraphs from Universal Profiling + * @see {@link https://www.elastic.co/guide/en/observability/master/universal-profiling.html | Elasticsearch API documentation} + */ + async flamegraph (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async flamegraph (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async flamegraph (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async flamegraph (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['profiling.flamegraph'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_profiling/flamegraph' + const meta: TransportRequestMetadata = { + name: 'profiling.flamegraph', + acceptedParams: [ + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Extracts raw stacktrace information from Universal Profiling + * @see {@link https://www.elastic.co/guide/en/observability/master/universal-profiling.html | Elasticsearch API documentation} + */ + async stacktraces (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async stacktraces (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async stacktraces (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async stacktraces (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['profiling.stacktraces'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_profiling/stacktraces' + const meta: TransportRequestMetadata = { + name: 'profiling.stacktraces', + acceptedParams: [ + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Returns basic information about the status of Universal Profiling + * @see {@link https://www.elastic.co/guide/en/observability/master/universal-profiling.html | Elasticsearch API documentation} + */ + async status (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async status (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async status (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async status (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['profiling.status'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_profiling/status' + const meta: TransportRequestMetadata = { + name: 'profiling.status', + acceptedParams: [ + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Extracts a list of topN functions from Universal Profiling + * @see {@link https://www.elastic.co/guide/en/observability/master/universal-profiling.html | Elasticsearch API documentation} + */ + async topnFunctions (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async topnFunctions (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async topnFunctions (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async topnFunctions (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['profiling.topn_functions'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_profiling/topn/functions' + const meta: TransportRequestMetadata = { + name: 'profiling.topn_functions', + acceptedParams: [ + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/api/project.ts b/src/api/api/project.ts new file mode 100644 index 000000000..7818be504 --- /dev/null +++ b/src/api/api/project.ts @@ -0,0 +1,89 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import { kAcceptedParams } from '../../client' + +interface That { + transport: Transport + [kAcceptedParams]: Record +} + +export default class Project { + transport: Transport + [kAcceptedParams]: Record + constructor (transport: Transport) { + this.transport = transport + this[kAcceptedParams] = { + 'project.tags': { + path: [], + body: [], + query: [] + } + } + } + + /** + * Get tags. Get the tags that are defined for the project. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch-serverless/operation/operation-project-tags | Elasticsearch API documentation} + */ + async tags (this: That, params?: T.ProjectTagsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async tags (this: That, params?: T.ProjectTagsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async tags (this: That, params?: T.ProjectTagsRequest, options?: TransportRequestOptions): Promise + async tags (this: That, params?: T.ProjectTagsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['project.tags'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_project/tags' + const meta: TransportRequestMetadata = { + name: 'project.tags', + acceptedParams: [ + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/api/put_script.ts b/src/api/api/put_script.ts new file mode 100644 index 000000000..0b62ea3b4 --- /dev/null +++ b/src/api/api/put_script.ts @@ -0,0 +1,119 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + put_script: { + path: [ + 'id', + 'context' + ], + body: [ + 'script' + ], + query: [ + 'context', + 'master_timeout', + 'timeout' + ] + } +} + +/** + * Create or update a script or search template. Creates or updates a stored script or search template. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-put-script | Elasticsearch API documentation} + */ +export default async function PutScriptApi (this: That, params: T.PutScriptRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function PutScriptApi (this: That, params: T.PutScriptRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function PutScriptApi (this: That, params: T.PutScriptRequest, options?: TransportRequestOptions): Promise +export default async function PutScriptApi (this: That, params: T.PutScriptRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.put_script + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + let method = '' + let path = '' + if (params.id != null && params.context != null) { + method = 'PUT' + path = `/_scripts/${encodeURIComponent(params.id.toString())}/${encodeURIComponent(params.context.toString())}` + } else { + method = 'PUT' + path = `/_scripts/${encodeURIComponent(params.id.toString())}` + } + const meta: TransportRequestMetadata = { + name: 'put_script', + pathParts: { + id: params.id, + context: params.context + }, + acceptedParams: [ + 'id', + 'context', + 'script', + 'context', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) +} diff --git a/src/api/api/query_rules.ts b/src/api/api/query_rules.ts new file mode 100644 index 000000000..cbde4cf9e --- /dev/null +++ b/src/api/api/query_rules.ts @@ -0,0 +1,541 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import { kAcceptedParams } from '../../client' + +interface That { + transport: Transport + [kAcceptedParams]: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +export default class QueryRules { + transport: Transport + [kAcceptedParams]: Record + constructor (transport: Transport) { + this.transport = transport + this[kAcceptedParams] = { + 'query_rules.delete_rule': { + path: [ + 'ruleset_id', + 'rule_id' + ], + body: [], + query: [] + }, + 'query_rules.delete_ruleset': { + path: [ + 'ruleset_id' + ], + body: [], + query: [] + }, + 'query_rules.get_rule': { + path: [ + 'ruleset_id', + 'rule_id' + ], + body: [], + query: [] + }, + 'query_rules.get_ruleset': { + path: [ + 'ruleset_id' + ], + body: [], + query: [] + }, + 'query_rules.list_rulesets': { + path: [], + body: [], + query: [ + 'from', + 'size' + ] + }, + 'query_rules.put_rule': { + path: [ + 'ruleset_id', + 'rule_id' + ], + body: [ + 'type', + 'criteria', + 'actions', + 'priority' + ], + query: [] + }, + 'query_rules.put_ruleset': { + path: [ + 'ruleset_id' + ], + body: [ + 'rules' + ], + query: [] + }, + 'query_rules.test': { + path: [ + 'ruleset_id' + ], + body: [ + 'match_criteria' + ], + query: [] + } + } + } + + /** + * Delete a query rule. Delete a query rule within a query ruleset. This is a destructive action that is only recoverable by re-adding the same rule with the create or update query rule API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-delete-rule | Elasticsearch API documentation} + */ + async deleteRule (this: That, params: T.QueryRulesDeleteRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteRule (this: That, params: T.QueryRulesDeleteRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteRule (this: That, params: T.QueryRulesDeleteRuleRequest, options?: TransportRequestOptions): Promise + async deleteRule (this: That, params: T.QueryRulesDeleteRuleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['query_rules.delete_rule'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_query_rules/${encodeURIComponent(params.ruleset_id.toString())}/_rule/${encodeURIComponent(params.rule_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'query_rules.delete_rule', + pathParts: { + ruleset_id: params.ruleset_id, + rule_id: params.rule_id + }, + acceptedParams: [ + 'ruleset_id', + 'rule_id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Delete a query ruleset. Remove a query ruleset and its associated data. This is a destructive action that is not recoverable. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-delete-ruleset | Elasticsearch API documentation} + */ + async deleteRuleset (this: That, params: T.QueryRulesDeleteRulesetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteRuleset (this: That, params: T.QueryRulesDeleteRulesetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteRuleset (this: That, params: T.QueryRulesDeleteRulesetRequest, options?: TransportRequestOptions): Promise + async deleteRuleset (this: That, params: T.QueryRulesDeleteRulesetRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['query_rules.delete_ruleset'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_query_rules/${encodeURIComponent(params.ruleset_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'query_rules.delete_ruleset', + pathParts: { + ruleset_id: params.ruleset_id + }, + acceptedParams: [ + 'ruleset_id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get a query rule. Get details about a query rule within a query ruleset. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-get-rule | Elasticsearch API documentation} + */ + async getRule (this: That, params: T.QueryRulesGetRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getRule (this: That, params: T.QueryRulesGetRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getRule (this: That, params: T.QueryRulesGetRuleRequest, options?: TransportRequestOptions): Promise + async getRule (this: That, params: T.QueryRulesGetRuleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['query_rules.get_rule'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_query_rules/${encodeURIComponent(params.ruleset_id.toString())}/_rule/${encodeURIComponent(params.rule_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'query_rules.get_rule', + pathParts: { + ruleset_id: params.ruleset_id, + rule_id: params.rule_id + }, + acceptedParams: [ + 'ruleset_id', + 'rule_id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get a query ruleset. Get details about a query ruleset. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-get-ruleset | Elasticsearch API documentation} + */ + async getRuleset (this: That, params: T.QueryRulesGetRulesetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getRuleset (this: That, params: T.QueryRulesGetRulesetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getRuleset (this: That, params: T.QueryRulesGetRulesetRequest, options?: TransportRequestOptions): Promise + async getRuleset (this: That, params: T.QueryRulesGetRulesetRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['query_rules.get_ruleset'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_query_rules/${encodeURIComponent(params.ruleset_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'query_rules.get_ruleset', + pathParts: { + ruleset_id: params.ruleset_id + }, + acceptedParams: [ + 'ruleset_id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get all query rulesets. Get summarized information about the query rulesets. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-list-rulesets | Elasticsearch API documentation} + */ + async listRulesets (this: That, params?: T.QueryRulesListRulesetsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async listRulesets (this: That, params?: T.QueryRulesListRulesetsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async listRulesets (this: That, params?: T.QueryRulesListRulesetsRequest, options?: TransportRequestOptions): Promise + async listRulesets (this: That, params?: T.QueryRulesListRulesetsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['query_rules.list_rulesets'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_query_rules' + const meta: TransportRequestMetadata = { + name: 'query_rules.list_rulesets', + acceptedParams: [ + 'from', + 'size' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create or update a query rule. Create or update a query rule within a query ruleset. IMPORTANT: Due to limitations within pinned queries, you can only pin documents using ids or docs, but cannot use both in single rule. It is advised to use one or the other in query rulesets, to avoid errors. Additionally, pinned queries have a maximum limit of 100 pinned hits. If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-put-rule | Elasticsearch API documentation} + */ + async putRule (this: That, params: T.QueryRulesPutRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putRule (this: That, params: T.QueryRulesPutRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putRule (this: That, params: T.QueryRulesPutRuleRequest, options?: TransportRequestOptions): Promise + async putRule (this: That, params: T.QueryRulesPutRuleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['query_rules.put_rule'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_query_rules/${encodeURIComponent(params.ruleset_id.toString())}/_rule/${encodeURIComponent(params.rule_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'query_rules.put_rule', + pathParts: { + ruleset_id: params.ruleset_id, + rule_id: params.rule_id + }, + acceptedParams: [ + 'ruleset_id', + 'rule_id', + 'type', + 'criteria', + 'actions', + 'priority' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create or update a query ruleset. There is a limit of 100 rules per ruleset. This limit can be increased by using the `xpack.applications.rules.max_rules_per_ruleset` cluster setting. IMPORTANT: Due to limitations within pinned queries, you can only select documents using `ids` or `docs`, but cannot use both in single rule. It is advised to use one or the other in query rulesets, to avoid errors. Additionally, pinned queries have a maximum limit of 100 pinned hits. If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-put-ruleset | Elasticsearch API documentation} + */ + async putRuleset (this: That, params: T.QueryRulesPutRulesetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putRuleset (this: That, params: T.QueryRulesPutRulesetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putRuleset (this: That, params: T.QueryRulesPutRulesetRequest, options?: TransportRequestOptions): Promise + async putRuleset (this: That, params: T.QueryRulesPutRulesetRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['query_rules.put_ruleset'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_query_rules/${encodeURIComponent(params.ruleset_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'query_rules.put_ruleset', + pathParts: { + ruleset_id: params.ruleset_id + }, + acceptedParams: [ + 'ruleset_id', + 'rules' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Test a query ruleset. Evaluate match criteria against a query ruleset to identify the rules that would match that criteria. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-test | Elasticsearch API documentation} + */ + async test (this: That, params: T.QueryRulesTestRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async test (this: That, params: T.QueryRulesTestRequest, options?: TransportRequestOptionsWithMeta): Promise> + async test (this: That, params: T.QueryRulesTestRequest, options?: TransportRequestOptions): Promise + async test (this: That, params: T.QueryRulesTestRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['query_rules.test'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/_query_rules/${encodeURIComponent(params.ruleset_id.toString())}/_test` + const meta: TransportRequestMetadata = { + name: 'query_rules.test', + pathParts: { + ruleset_id: params.ruleset_id + }, + acceptedParams: [ + 'ruleset_id', + 'match_criteria' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/api/rank_eval.ts b/src/api/api/rank_eval.ts new file mode 100644 index 000000000..1ddb4082a --- /dev/null +++ b/src/api/api/rank_eval.ts @@ -0,0 +1,120 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + rank_eval: { + path: [ + 'index' + ], + body: [ + 'requests', + 'metric' + ], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'search_type' + ] + } +} + +/** + * Evaluate ranked search results. Evaluate the quality of ranked search results over a set of typical search queries. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rank-eval | Elasticsearch API documentation} + */ +export default async function RankEvalApi (this: That, params: T.RankEvalRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function RankEvalApi (this: That, params: T.RankEvalRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function RankEvalApi (this: That, params: T.RankEvalRequest, options?: TransportRequestOptions): Promise +export default async function RankEvalApi (this: That, params: T.RankEvalRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.rank_eval + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = body != null ? 'POST' : 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_rank_eval` + } else { + method = body != null ? 'POST' : 'GET' + path = '/_rank_eval' + } + const meta: TransportRequestMetadata = { + name: 'rank_eval', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'requests', + 'metric', + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'search_type' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) +} diff --git a/src/api/api/reindex.ts b/src/api/api/reindex.ts new file mode 100644 index 000000000..2d576a7fc --- /dev/null +++ b/src/api/api/reindex.ts @@ -0,0 +1,123 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + reindex: { + path: [], + body: [ + 'conflicts', + 'dest', + 'max_docs', + 'script', + 'source' + ], + query: [ + 'refresh', + 'requests_per_second', + 'scroll', + 'slices', + 'max_docs', + 'timeout', + 'wait_for_active_shards', + 'wait_for_completion', + 'require_alias' + ] + } +} + +/** + * Reindex documents. Copy documents from a source to a destination. You can copy all documents to the destination index or reindex a subset of the documents. The source can be any existing index, alias, or data stream. The destination must differ from the source. For example, you cannot reindex a data stream into itself. IMPORTANT: Reindex requires `_source` to be enabled for all documents in the source. The destination should be configured as wanted before calling the reindex API. Reindex does not copy the settings from the source or its associated template. Mappings, shard counts, and replicas, for example, must be configured ahead of time. If the Elasticsearch security features are enabled, you must have the following security privileges: * The `read` index privilege for the source data stream, index, or alias. * The `write` index privilege for the destination data stream, index, or index alias. * To automatically create a data stream or index with a reindex API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege for the destination data stream, index, or alias. * If reindexing from a remote cluster, the `source.remote.user` must have the `monitor` cluster privilege and the `read` index privilege for the source data stream, index, or alias. If reindexing from a remote cluster, you must explicitly allow the remote host in the `reindex.remote.whitelist` setting. Automatic data stream creation requires a matching index template with data stream enabled. The `dest` element can be configured like the index API to control optimistic concurrency control. Omitting `version_type` or setting it to `internal` causes Elasticsearch to blindly dump documents into the destination, overwriting any that happen to have the same ID. Setting `version_type` to `external` causes Elasticsearch to preserve the `version` from the source, create any documents that are missing, and update any documents that have an older version in the destination than they do in the source. Setting `op_type` to `create` causes the reindex API to create only missing documents in the destination. All existing documents will cause a version conflict. IMPORTANT: Because data streams are append-only, any reindex request to a destination data stream must have an `op_type` of `create`. A reindex can only add new documents to a destination data stream. It cannot update existing documents in a destination data stream. By default, version conflicts abort the reindex process. To continue reindexing if there are conflicts, set the `conflicts` request body property to `proceed`. In this case, the response includes a count of the version conflicts that were encountered. Note that the handling of other error types is unaffected by the `conflicts` property. Additionally, if you opt to count version conflicts, the operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query. It's recommended to reindex on indices with a green status. Reindexing can fail when a node shuts down or crashes. * When requested with `wait_for_completion=true` (default), the request fails if the node shuts down. * When requested with `wait_for_completion=false`, a task id is returned, for use with the task management APIs. The task may disappear or fail if the node shuts down. When retrying a failed reindex operation, it might be necessary to set `conflicts=proceed` or to first delete the partial destination index. Additionally, dry runs, checking disk space, and fetching index recovery information can help address the root cause. Refer to the linked documentation for examples of how to reindex documents. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex | Elasticsearch API documentation} + */ +export default async function ReindexApi (this: That, params: T.ReindexRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function ReindexApi (this: That, params: T.ReindexRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function ReindexApi (this: That, params: T.ReindexRequest, options?: TransportRequestOptions): Promise +export default async function ReindexApi (this: That, params: T.ReindexRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.reindex + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = '/_reindex' + const meta: TransportRequestMetadata = { + name: 'reindex', + acceptedParams: [ + 'conflicts', + 'dest', + 'max_docs', + 'script', + 'source', + 'refresh', + 'requests_per_second', + 'scroll', + 'slices', + 'max_docs', + 'timeout', + 'wait_for_active_shards', + 'wait_for_completion', + 'require_alias' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) +} diff --git a/src/api/api/reindex_rethrottle.ts b/src/api/api/reindex_rethrottle.ts new file mode 100644 index 000000000..8ef9381e2 --- /dev/null +++ b/src/api/api/reindex_rethrottle.ts @@ -0,0 +1,88 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + reindex_rethrottle: { + path: [ + 'task_id' + ], + body: [], + query: [ + 'requests_per_second' + ] + } +} + +/** + * Throttle a reindex operation. Change the number of requests per second for a particular reindex operation. For example: ``` POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1 ``` Rethrottling that speeds up the query takes effect immediately. Rethrottling that slows down the query will take effect after completing the current batch. This behavior prevents scroll timeouts. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex | Elasticsearch API documentation} + */ +export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest, options?: TransportRequestOptions): Promise +export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = acceptedParams.reindex_rethrottle + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/_reindex/${encodeURIComponent(params.task_id.toString())}/_rethrottle` + const meta: TransportRequestMetadata = { + name: 'reindex_rethrottle', + pathParts: { + task_id: params.task_id + }, + acceptedParams: [ + 'task_id', + 'requests_per_second' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) +} diff --git a/src/api/api/render_search_template.ts b/src/api/api/render_search_template.ts new file mode 100644 index 000000000..4b16d3248 --- /dev/null +++ b/src/api/api/render_search_template.ts @@ -0,0 +1,114 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + render_search_template: { + path: [], + body: [ + 'id', + 'file', + 'params', + 'source' + ], + query: [] + } +} + +/** + * Render a search template. Render a search template as a search request body. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-render-search-template | Elasticsearch API documentation} + */ +export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest, options?: TransportRequestOptions): Promise +export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.render_search_template + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + let method = '' + let path = '' + if (params.id != null) { + method = body != null ? 'POST' : 'GET' + path = `/_render/template/${encodeURIComponent(params.id.toString())}` + } else { + method = body != null ? 'POST' : 'GET' + path = '/_render/template' + } + const meta: TransportRequestMetadata = { + name: 'render_search_template', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id', + 'id', + 'file', + 'params', + 'source' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) +} diff --git a/src/api/api/rollup.ts b/src/api/api/rollup.ts new file mode 100644 index 000000000..4f216dc29 --- /dev/null +++ b/src/api/api/rollup.ts @@ -0,0 +1,557 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import { kAcceptedParams } from '../../client' + +interface That { + transport: Transport + [kAcceptedParams]: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +export default class Rollup { + transport: Transport + [kAcceptedParams]: Record + constructor (transport: Transport) { + this.transport = transport + this[kAcceptedParams] = { + 'rollup.delete_job': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'rollup.get_jobs': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'rollup.get_rollup_caps': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'rollup.get_rollup_index_caps': { + path: [ + 'index' + ], + body: [], + query: [] + }, + 'rollup.put_job': { + path: [ + 'id' + ], + body: [ + 'cron', + 'groups', + 'index_pattern', + 'metrics', + 'page_size', + 'rollup_index', + 'timeout', + 'headers' + ], + query: [] + }, + 'rollup.rollup_search': { + path: [ + 'index' + ], + body: [ + 'aggregations', + 'aggs', + 'query', + 'size' + ], + query: [ + 'rest_total_hits_as_int', + 'typed_keys' + ] + }, + 'rollup.start_job': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'rollup.stop_job': { + path: [ + 'id' + ], + body: [], + query: [ + 'timeout', + 'wait_for_completion' + ] + } + } + } + + /** + * Delete a rollup job. A job must be stopped before it can be deleted. If you attempt to delete a started job, an error occurs. Similarly, if you attempt to delete a nonexistent job, an exception occurs. IMPORTANT: When you delete a job, you remove only the process that is actively monitoring and rolling up data. The API does not delete any previously rolled up data. This is by design; a user may wish to roll up a static data set. Because the data set is static, after it has been fully rolled up there is no need to keep the indexing rollup job around (as there will be no new data). Thus the job can be deleted, leaving behind the rolled up data for analysis. If you wish to also remove the rollup data and the rollup index contains the data for only a single job, you can delete the whole rollup index. If the rollup index stores data from several jobs, you must issue a delete-by-query that targets the rollup job's identifier in the rollup index. For example: ``` POST my_rollup_index/_delete_by_query { "query": { "term": { "_rollup.id": "the_rollup_job_id" } } } ``` + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-delete-job | Elasticsearch API documentation} + */ + async deleteJob (this: That, params: T.RollupDeleteJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteJob (this: That, params: T.RollupDeleteJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteJob (this: That, params: T.RollupDeleteJobRequest, options?: TransportRequestOptions): Promise + async deleteJob (this: That, params: T.RollupDeleteJobRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['rollup.delete_job'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_rollup/job/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'rollup.delete_job', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get rollup job information. Get the configuration, stats, and status of rollup jobs. NOTE: This API returns only active (both `STARTED` and `STOPPED`) jobs. If a job was created, ran for a while, then was deleted, the API does not return any details about it. For details about a historical rollup job, the rollup capabilities API may be more useful. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-jobs | Elasticsearch API documentation} + */ + async getJobs (this: That, params?: T.RollupGetJobsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getJobs (this: That, params?: T.RollupGetJobsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getJobs (this: That, params?: T.RollupGetJobsRequest, options?: TransportRequestOptions): Promise + async getJobs (this: That, params?: T.RollupGetJobsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['rollup.get_jobs'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.id != null) { + method = 'GET' + path = `/_rollup/job/${encodeURIComponent(params.id.toString())}` + } else { + method = 'GET' + path = '/_rollup/job' + } + const meta: TransportRequestMetadata = { + name: 'rollup.get_jobs', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get the rollup job capabilities. Get the capabilities of any rollup jobs that have been configured for a specific index or index pattern. This API is useful because a rollup job is often configured to rollup only a subset of fields from the source index. Furthermore, only certain aggregations can be configured for various fields, leading to a limited subset of functionality depending on that configuration. This API enables you to inspect an index and determine: 1. Does this index have associated rollup data somewhere in the cluster? 2. If yes to the first question, what fields were rolled up, what aggregations can be performed, and where does the data live? + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-rollup-caps | Elasticsearch API documentation} + */ + async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest, options?: TransportRequestOptions): Promise + async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['rollup.get_rollup_caps'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.id != null) { + method = 'GET' + path = `/_rollup/data/${encodeURIComponent(params.id.toString())}` + } else { + method = 'GET' + path = '/_rollup/data' + } + const meta: TransportRequestMetadata = { + name: 'rollup.get_rollup_caps', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get the rollup index capabilities. Get the rollup capabilities of all jobs inside of a rollup index. A single rollup index may store the data for multiple rollup jobs and may have a variety of capabilities depending on those jobs. This API enables you to determine: * What jobs are stored in an index (or indices specified via a pattern)? * What target indices were rolled up, what fields were used in those rollups, and what aggregations can be performed on each job? + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-rollup-index-caps | Elasticsearch API documentation} + */ + async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptions): Promise + async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['rollup.get_rollup_index_caps'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/${encodeURIComponent(params.index.toString())}/_rollup/data` + const meta: TransportRequestMetadata = { + name: 'rollup.get_rollup_index_caps', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create a rollup job. WARNING: From 8.15.0, calling this API in a cluster with no rollup usage will fail with a message about the deprecation and planned removal of rollup features. A cluster needs to contain either a rollup job or a rollup index in order for this API to be allowed to run. The rollup job configuration contains all the details about how the job should run, when it indexes documents, and what future queries will be able to run against the rollup index. There are three main sections to the job configuration: the logistical details about the job (for example, the cron schedule), the fields that are used for grouping, and what metrics to collect for each group. Jobs are created in a `STOPPED` state. You can start them with the start rollup jobs API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-put-job | Elasticsearch API documentation} + */ + async putJob (this: That, params: T.RollupPutJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putJob (this: That, params: T.RollupPutJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putJob (this: That, params: T.RollupPutJobRequest, options?: TransportRequestOptions): Promise + async putJob (this: That, params: T.RollupPutJobRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['rollup.put_job'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_rollup/job/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'rollup.put_job', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id', + 'cron', + 'groups', + 'index_pattern', + 'metrics', + 'page_size', + 'rollup_index', + 'timeout', + 'headers' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Search rolled-up data. The rollup search endpoint is needed because, internally, rolled-up documents utilize a different document structure than the original data. It rewrites standard Query DSL into a format that matches the rollup documents then takes the response and rewrites it back to what a client would expect given the original query. The request body supports a subset of features from the regular search API. The following functionality is not available: `size`: Because rollups work on pre-aggregated data, no search hits can be returned and so size must be set to zero or omitted entirely. `highlighter`, `suggestors`, `post_filter`, `profile`, `explain`: These are similarly disallowed. For more detailed examples of using the rollup search API, including querying rolled-up data only or combining rolled-up and live data, refer to the External documentation. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-rollup-search | Elasticsearch API documentation} + */ + async rollupSearch> (this: That, params: T.RollupRollupSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> + async rollupSearch> (this: That, params: T.RollupRollupSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> + async rollupSearch> (this: That, params: T.RollupRollupSearchRequest, options?: TransportRequestOptions): Promise> + async rollupSearch> (this: That, params: T.RollupRollupSearchRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['rollup.rollup_search'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = body != null ? 'POST' : 'GET' + const path = `/${encodeURIComponent(params.index.toString())}/_rollup_search` + const meta: TransportRequestMetadata = { + name: 'rollup.rollup_search', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'aggregations', + 'aggs', + 'query', + 'size', + 'rest_total_hits_as_int', + 'typed_keys' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Start rollup jobs. If you try to start a job that does not exist, an exception occurs. If you try to start a job that is already started, nothing happens. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-start-job | Elasticsearch API documentation} + */ + async startJob (this: That, params: T.RollupStartJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async startJob (this: That, params: T.RollupStartJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async startJob (this: That, params: T.RollupStartJobRequest, options?: TransportRequestOptions): Promise + async startJob (this: That, params: T.RollupStartJobRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['rollup.start_job'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/_rollup/job/${encodeURIComponent(params.id.toString())}/_start` + const meta: TransportRequestMetadata = { + name: 'rollup.start_job', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Stop rollup jobs. If you try to stop a job that does not exist, an exception occurs. If you try to stop a job that is already stopped, nothing happens. Since only a stopped job can be deleted, it can be useful to block the API until the indexer has fully stopped. This is accomplished with the `wait_for_completion` query parameter, and optionally a timeout. For example: ``` POST _rollup/job/sensor/_stop?wait_for_completion=true&timeout=10s ``` The parameter blocks the API call from returning until either the job has moved to STOPPED or the specified time has elapsed. If the specified time elapses without the job moving to STOPPED, a timeout exception occurs. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-stop-job | Elasticsearch API documentation} + */ + async stopJob (this: That, params: T.RollupStopJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stopJob (this: That, params: T.RollupStopJobRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stopJob (this: That, params: T.RollupStopJobRequest, options?: TransportRequestOptions): Promise + async stopJob (this: That, params: T.RollupStopJobRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['rollup.stop_job'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/_rollup/job/${encodeURIComponent(params.id.toString())}/_stop` + const meta: TransportRequestMetadata = { + name: 'rollup.stop_job', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id', + 'timeout', + 'wait_for_completion' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/api/scripts_painless_execute.ts b/src/api/api/scripts_painless_execute.ts new file mode 100644 index 000000000..c8d3f3a1c --- /dev/null +++ b/src/api/api/scripts_painless_execute.ts @@ -0,0 +1,101 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + scripts_painless_execute: { + path: [], + body: [ + 'context', + 'context_setup', + 'script' + ], + query: [] + } +} + +/** + * Run a script. Runs a script and returns a result. Use this API to build and test scripts, such as when defining a script for a runtime field. This API requires very few dependencies and is especially useful if you don't have permissions to write documents on a cluster. The API uses several _contexts_, which control how scripts are run, what variables are available at runtime, and what the return type is. Each context requires a script, but additional parameters depend on the context you're using for that script. + * @see {@link https://www.elastic.co/docs/reference/scripting-languages/painless/painless-api-examples | Elasticsearch API documentation} + */ +export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest, options?: TransportRequestOptions): Promise> +export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.scripts_painless_execute + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = body != null ? 'POST' : 'GET' + const path = '/_scripts/painless/_execute' + const meta: TransportRequestMetadata = { + name: 'scripts_painless_execute', + acceptedParams: [ + 'context', + 'context_setup', + 'script' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) +} diff --git a/src/api/api/scroll.ts b/src/api/api/scroll.ts new file mode 100644 index 000000000..a2f1966d6 --- /dev/null +++ b/src/api/api/scroll.ts @@ -0,0 +1,109 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + scroll: { + path: [], + body: [ + 'scroll', + 'scroll_id' + ], + query: [ + 'scroll', + 'scroll_id', + 'rest_total_hits_as_int' + ] + } +} + +/** + * Run a scrolling search. IMPORTANT: The scroll API is no longer recommend for deep pagination. If you need to preserve the index state while paging through more than 10,000 hits, use the `search_after` parameter with a point in time (PIT). The scroll API gets large sets of results from a single scrolling search request. To get the necessary scroll ID, submit a search API request that includes an argument for the `scroll` query parameter. The `scroll` parameter indicates how long Elasticsearch should retain the search context for the request. The search response returns a scroll ID in the `_scroll_id` response body parameter. You can then use the scroll ID with the scroll API to retrieve the next batch of results for the request. If the Elasticsearch security features are enabled, the access to the results of a specific scroll ID is restricted to the user or API key that submitted the search. You can also use the scroll API to specify a new scroll parameter that extends or shortens the retention period for the search context. IMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-scroll | Elasticsearch API documentation} + */ +export default async function ScrollApi> (this: That, params: T.ScrollRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function ScrollApi> (this: That, params: T.ScrollRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function ScrollApi> (this: That, params: T.ScrollRequest, options?: TransportRequestOptions): Promise> +export default async function ScrollApi> (this: That, params: T.ScrollRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.scroll + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = body != null ? 'POST' : 'GET' + const path = '/_search/scroll' + const meta: TransportRequestMetadata = { + name: 'scroll', + pathParts: { + scroll_id: params.scroll_id + }, + acceptedParams: [ + 'scroll_id', + 'scroll', + 'scroll_id', + 'scroll', + 'scroll_id', + 'rest_total_hits_as_int' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) +} diff --git a/src/api/api/search.ts b/src/api/api/search.ts new file mode 100644 index 000000000..b12a8e5a4 --- /dev/null +++ b/src/api/api/search.ts @@ -0,0 +1,275 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + search: { + path: [ + 'index' + ], + body: [ + 'aggregations', + 'aggs', + 'collapse', + 'explain', + 'ext', + 'from', + 'highlight', + 'track_total_hits', + 'indices_boost', + 'docvalue_fields', + 'knn', + 'rank', + 'min_score', + 'post_filter', + 'profile', + 'query', + 'rescore', + 'retriever', + 'script_fields', + 'search_after', + 'size', + 'slice', + 'sort', + '_source', + 'fields', + 'suggest', + 'terminate_after', + 'timeout', + 'track_scores', + 'version', + 'seq_no_primary_term', + 'stored_fields', + 'pit', + 'runtime_mappings', + 'stats' + ], + query: [ + 'allow_no_indices', + 'allow_partial_search_results', + 'analyzer', + 'analyze_wildcard', + 'batched_reduce_size', + 'ccs_minimize_roundtrips', + 'default_operator', + 'df', + 'docvalue_fields', + 'expand_wildcards', + 'explain', + 'ignore_throttled', + 'ignore_unavailable', + 'include_named_queries_score', + 'lenient', + 'max_concurrent_shard_requests', + 'preference', + 'pre_filter_shard_size', + 'project_routing', + 'request_cache', + 'routing', + 'scroll', + 'search_type', + 'stats', + 'stored_fields', + 'suggest_field', + 'suggest_mode', + 'suggest_size', + 'suggest_text', + 'terminate_after', + 'timeout', + 'track_total_hits', + 'track_scores', + 'typed_keys', + 'rest_total_hits_as_int', + 'version', + '_source', + '_source_excludes', + '_source_exclude_vectors', + '_source_includes', + 'seq_no_primary_term', + 'q', + 'size', + 'from', + 'sort', + 'force_synthetic_source' + ] + } +} + +/** + * Run a search. Get search hits that match the query defined in the request. You can provide search queries using the `q` query string parameter or the request body. If both are specified, only the query parameter is used. If the Elasticsearch security features are enabled, you must have the read index privilege for the target data stream, index, or alias. For cross-cluster search, refer to the documentation about configuring CCS privileges. To search a point in time (PIT) for an alias, you must have the `read` index privilege for the alias's data streams or indices. **Search slicing** When paging through a large number of documents, it can be helpful to split the search into multiple slices to consume them independently with the `slice` and `pit` properties. By default the splitting is done first on the shards, then locally on each shard. The local splitting partitions the shard into contiguous ranges based on Lucene document IDs. For instance if the number of shards is equal to 2 and you request 4 slices, the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are assigned to the second shard. IMPORTANT: The same point-in-time ID should be used for all slices. If different PIT IDs are used, slices can overlap and miss documents. This situation can occur because the splitting criterion is based on Lucene document IDs, which are not stable across changes to the index. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search | Elasticsearch API documentation} + */ +export default async function SearchApi> (this: That, params?: T.SearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function SearchApi> (this: That, params?: T.SearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function SearchApi> (this: That, params?: T.SearchRequest, options?: TransportRequestOptions): Promise> +export default async function SearchApi> (this: That, params?: T.SearchRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.search + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + if (key === 'sort' && typeof params[key] === 'string' && params[key].includes(':')) { // eslint-disable-line + querystring[key] = params[key] + } else { + // @ts-expect-error + body[key] = params[key] + } + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = body != null ? 'POST' : 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_search` + } else { + method = body != null ? 'POST' : 'GET' + path = '/_search' + } + const meta: TransportRequestMetadata = { + name: 'search', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'aggregations', + 'aggs', + 'collapse', + 'explain', + 'ext', + 'from', + 'highlight', + 'track_total_hits', + 'indices_boost', + 'docvalue_fields', + 'knn', + 'rank', + 'min_score', + 'post_filter', + 'profile', + 'query', + 'rescore', + 'retriever', + 'script_fields', + 'search_after', + 'size', + 'slice', + 'sort', + '_source', + 'fields', + 'suggest', + 'terminate_after', + 'timeout', + 'track_scores', + 'version', + 'seq_no_primary_term', + 'stored_fields', + 'pit', + 'runtime_mappings', + 'stats', + 'allow_no_indices', + 'allow_partial_search_results', + 'analyzer', + 'analyze_wildcard', + 'batched_reduce_size', + 'ccs_minimize_roundtrips', + 'default_operator', + 'df', + 'docvalue_fields', + 'expand_wildcards', + 'explain', + 'ignore_throttled', + 'ignore_unavailable', + 'include_named_queries_score', + 'lenient', + 'max_concurrent_shard_requests', + 'preference', + 'pre_filter_shard_size', + 'project_routing', + 'request_cache', + 'routing', + 'scroll', + 'search_type', + 'stats', + 'stored_fields', + 'suggest_field', + 'suggest_mode', + 'suggest_size', + 'suggest_text', + 'terminate_after', + 'timeout', + 'track_total_hits', + 'track_scores', + 'typed_keys', + 'rest_total_hits_as_int', + 'version', + '_source', + '_source_excludes', + '_source_exclude_vectors', + '_source_includes', + 'seq_no_primary_term', + 'q', + 'size', + 'from', + 'sort', + 'force_synthetic_source' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) +} diff --git a/src/api/api/search_application.ts b/src/api/api/search_application.ts new file mode 100644 index 000000000..e15255ef9 --- /dev/null +++ b/src/api/api/search_application.ts @@ -0,0 +1,653 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import { kAcceptedParams } from '../../client' + +interface That { + transport: Transport + [kAcceptedParams]: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +export default class SearchApplication { + transport: Transport + [kAcceptedParams]: Record + constructor (transport: Transport) { + this.transport = transport + this[kAcceptedParams] = { + 'search_application.delete': { + path: [ + 'name' + ], + body: [], + query: [] + }, + 'search_application.delete_behavioral_analytics': { + path: [ + 'name' + ], + body: [], + query: [] + }, + 'search_application.get': { + path: [ + 'name' + ], + body: [], + query: [] + }, + 'search_application.get_behavioral_analytics': { + path: [ + 'name' + ], + body: [], + query: [] + }, + 'search_application.list': { + path: [], + body: [], + query: [ + 'q', + 'from', + 'size' + ] + }, + 'search_application.post_behavioral_analytics_event': { + path: [ + 'collection_name', + 'event_type' + ], + body: [ + 'payload' + ], + query: [ + 'debug' + ] + }, + 'search_application.put': { + path: [ + 'name' + ], + body: [ + 'search_application' + ], + query: [ + 'create' + ] + }, + 'search_application.put_behavioral_analytics': { + path: [ + 'name' + ], + body: [], + query: [] + }, + 'search_application.render_query': { + path: [ + 'name' + ], + body: [ + 'params' + ], + query: [] + }, + 'search_application.search': { + path: [ + 'name' + ], + body: [ + 'params' + ], + query: [ + 'typed_keys' + ] + } + } + } + + /** + * Delete a search application. Remove a search application and its associated alias. Indices attached to the search application are not removed. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-delete | Elasticsearch API documentation} + */ + async delete (this: That, params: T.SearchApplicationDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async delete (this: That, params: T.SearchApplicationDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> + async delete (this: That, params: T.SearchApplicationDeleteRequest, options?: TransportRequestOptions): Promise + async delete (this: That, params: T.SearchApplicationDeleteRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['search_application.delete'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_application/search_application/${encodeURIComponent(params.name.toString())}` + const meta: TransportRequestMetadata = { + name: 'search_application.delete', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Delete a behavioral analytics collection. The associated data stream is also deleted. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-delete-behavioral-analytics | Elasticsearch API documentation} + */ + async deleteBehavioralAnalytics (this: That, params: T.SearchApplicationDeleteBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteBehavioralAnalytics (this: That, params: T.SearchApplicationDeleteBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteBehavioralAnalytics (this: That, params: T.SearchApplicationDeleteBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise + async deleteBehavioralAnalytics (this: That, params: T.SearchApplicationDeleteBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['search_application.delete_behavioral_analytics'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_application/analytics/${encodeURIComponent(params.name.toString())}` + const meta: TransportRequestMetadata = { + name: 'search_application.delete_behavioral_analytics', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get search application details. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-get | Elasticsearch API documentation} + */ + async get (this: That, params: T.SearchApplicationGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async get (this: That, params: T.SearchApplicationGetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async get (this: That, params: T.SearchApplicationGetRequest, options?: TransportRequestOptions): Promise + async get (this: That, params: T.SearchApplicationGetRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['search_application.get'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_application/search_application/${encodeURIComponent(params.name.toString())}` + const meta: TransportRequestMetadata = { + name: 'search_application.get', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get behavioral analytics collections. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-get-behavioral-analytics | Elasticsearch API documentation} + */ + async getBehavioralAnalytics (this: That, params?: T.SearchApplicationGetBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getBehavioralAnalytics (this: That, params?: T.SearchApplicationGetBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getBehavioralAnalytics (this: That, params?: T.SearchApplicationGetBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise + async getBehavioralAnalytics (this: That, params?: T.SearchApplicationGetBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['search_application.get_behavioral_analytics'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.name != null) { + method = 'GET' + path = `/_application/analytics/${encodeURIComponent(params.name.toString())}` + } else { + method = 'GET' + path = '/_application/analytics' + } + const meta: TransportRequestMetadata = { + name: 'search_application.get_behavioral_analytics', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get search applications. Get information about search applications. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-get-behavioral-analytics | Elasticsearch API documentation} + */ + async list (this: That, params?: T.SearchApplicationListRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async list (this: That, params?: T.SearchApplicationListRequest, options?: TransportRequestOptionsWithMeta): Promise> + async list (this: That, params?: T.SearchApplicationListRequest, options?: TransportRequestOptions): Promise + async list (this: That, params?: T.SearchApplicationListRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['search_application.list'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_application/search_application' + const meta: TransportRequestMetadata = { + name: 'search_application.list', + acceptedParams: [ + 'q', + 'from', + 'size' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create a behavioral analytics collection event. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-post-behavioral-analytics-event | Elasticsearch API documentation} + */ + async postBehavioralAnalyticsEvent (this: That, params: T.SearchApplicationPostBehavioralAnalyticsEventRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async postBehavioralAnalyticsEvent (this: That, params: T.SearchApplicationPostBehavioralAnalyticsEventRequest, options?: TransportRequestOptionsWithMeta): Promise> + async postBehavioralAnalyticsEvent (this: That, params: T.SearchApplicationPostBehavioralAnalyticsEventRequest, options?: TransportRequestOptions): Promise + async postBehavioralAnalyticsEvent (this: That, params: T.SearchApplicationPostBehavioralAnalyticsEventRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['search_application.post_behavioral_analytics_event'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: any = params.body ?? undefined + for (const key in params) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/_application/analytics/${encodeURIComponent(params.collection_name.toString())}/event/${encodeURIComponent(params.event_type.toString())}` + const meta: TransportRequestMetadata = { + name: 'search_application.post_behavioral_analytics_event', + pathParts: { + collection_name: params.collection_name, + event_type: params.event_type + }, + acceptedParams: [ + 'collection_name', + 'event_type', + 'payload', + 'debug' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create or update a search application. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-put | Elasticsearch API documentation} + */ + async put (this: That, params: T.SearchApplicationPutRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async put (this: That, params: T.SearchApplicationPutRequest, options?: TransportRequestOptionsWithMeta): Promise> + async put (this: That, params: T.SearchApplicationPutRequest, options?: TransportRequestOptions): Promise + async put (this: That, params: T.SearchApplicationPutRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['search_application.put'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: any = params.body ?? undefined + for (const key in params) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_application/search_application/${encodeURIComponent(params.name.toString())}` + const meta: TransportRequestMetadata = { + name: 'search_application.put', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'search_application', + 'create' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create a behavioral analytics collection. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-put-behavioral-analytics | Elasticsearch API documentation} + */ + async putBehavioralAnalytics (this: That, params: T.SearchApplicationPutBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putBehavioralAnalytics (this: That, params: T.SearchApplicationPutBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putBehavioralAnalytics (this: That, params: T.SearchApplicationPutBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise + async putBehavioralAnalytics (this: That, params: T.SearchApplicationPutBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['search_application.put_behavioral_analytics'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_application/analytics/${encodeURIComponent(params.name.toString())}` + const meta: TransportRequestMetadata = { + name: 'search_application.put_behavioral_analytics', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Render a search application query. Generate an Elasticsearch query using the specified query parameters and the search template associated with the search application or a default template if none is specified. If a parameter used in the search template is not specified in `params`, the parameter's default value will be used. The API returns the specific Elasticsearch query that would be generated and run by calling the search application search API. You must have `read` privileges on the backing alias of the search application. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-render-query | Elasticsearch API documentation} + */ + async renderQuery (this: That, params: T.SearchApplicationRenderQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async renderQuery (this: That, params: T.SearchApplicationRenderQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async renderQuery (this: That, params: T.SearchApplicationRenderQueryRequest, options?: TransportRequestOptions): Promise + async renderQuery (this: That, params: T.SearchApplicationRenderQueryRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['search_application.render_query'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/_application/search_application/${encodeURIComponent(params.name.toString())}/_render_query` + const meta: TransportRequestMetadata = { + name: 'search_application.render_query', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'params' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Run a search application search. Generate and run an Elasticsearch query that uses the specified query parameteter and the search template associated with the search application or default template. Unspecified template parameters are assigned their default values if applicable. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-search | Elasticsearch API documentation} + */ + async search> (this: That, params: T.SearchApplicationSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> + async search> (this: That, params: T.SearchApplicationSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> + async search> (this: That, params: T.SearchApplicationSearchRequest, options?: TransportRequestOptions): Promise> + async search> (this: That, params: T.SearchApplicationSearchRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['search_application.search'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = body != null ? 'POST' : 'GET' + const path = `/_application/search_application/${encodeURIComponent(params.name.toString())}/_search` + const meta: TransportRequestMetadata = { + name: 'search_application.search', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'params', + 'typed_keys' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/api/search_mvt.ts b/src/api/api/search_mvt.ts new file mode 100644 index 000000000..71d524f2c --- /dev/null +++ b/src/api/api/search_mvt.ts @@ -0,0 +1,159 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + search_mvt: { + path: [ + 'index', + 'field', + 'zoom', + 'x', + 'y' + ], + body: [ + 'aggs', + 'buffer', + 'exact_bounds', + 'extent', + 'fields', + 'grid_agg', + 'grid_precision', + 'grid_type', + 'query', + 'runtime_mappings', + 'size', + 'sort', + 'track_total_hits', + 'with_labels' + ], + query: [ + 'exact_bounds', + 'extent', + 'grid_agg', + 'grid_precision', + 'grid_type', + 'project_routing', + 'size', + 'track_total_hits', + 'with_labels' + ] + } +} + +/** + * Search a vector tile. Search a vector tile for geospatial values. Before using this API, you should be familiar with the Mapbox vector tile specification. The API returns results as a binary mapbox vector tile. Internally, Elasticsearch translates a vector tile search API request into a search containing: * A `geo_bounding_box` query on the ``. The query uses the `//` tile as a bounding box. * A `geotile_grid` or `geohex_grid` aggregation on the ``. The `grid_agg` parameter determines the aggregation type. The aggregation uses the `//` tile as a bounding box. * Optionally, a `geo_bounds` aggregation on the ``. The search only includes this aggregation if the `exact_bounds` parameter is `true`. * If the optional parameter `with_labels` is `true`, the internal search will include a dynamic runtime field that calls the `getLabelPosition` function of the geometry doc value. This enables the generation of new point features containing suggested geometry labels, so that, for example, multi-polygons will have only one label. The API returns results as a binary Mapbox vector tile. Mapbox vector tiles are encoded as Google Protobufs (PBF). By default, the tile contains three layers: * A `hits` layer containing a feature for each `` value matching the `geo_bounding_box` query. * An `aggs` layer containing a feature for each cell of the `geotile_grid` or `geohex_grid`. The layer only contains features for cells with matching data. * A meta layer containing: * A feature containing a bounding box. By default, this is the bounding box of the tile. * Value ranges for any sub-aggregations on the `geotile_grid` or `geohex_grid`. * Metadata for the search. The API only returns features that can display at its zoom level. For example, if a polygon feature has no area at its zoom level, the API omits it. The API returns errors as UTF-8 encoded JSON. IMPORTANT: You can specify several options for this API as either a query parameter or request body parameter. If you specify both parameters, the query parameter takes precedence. **Grid precision for geotile** For a `grid_agg` of `geotile`, you can use cells in the `aggs` layer as tiles for lower zoom levels. `grid_precision` represents the additional zoom levels available through these cells. The final precision is computed by as follows: ` + grid_precision`. For example, if `` is 7 and `grid_precision` is 8, then the `geotile_grid` aggregation will use a precision of 15. The maximum final precision is 29. The `grid_precision` also determines the number of cells for the grid as follows: `(2^grid_precision) x (2^grid_precision)`. For example, a value of 8 divides the tile into a grid of 256 x 256 cells. The `aggs` layer only contains features for cells with matching data. **Grid precision for geohex** For a `grid_agg` of `geohex`, Elasticsearch uses `` and `grid_precision` to calculate a final precision as follows: ` + grid_precision`. This precision determines the H3 resolution of the hexagonal cells produced by the `geohex` aggregation. The following table maps the H3 resolution for each precision. For example, if `` is 3 and `grid_precision` is 3, the precision is 6. At a precision of 6, hexagonal cells have an H3 resolution of 2. If `` is 3 and `grid_precision` is 4, the precision is 7. At a precision of 7, hexagonal cells have an H3 resolution of 3. | Precision | Unique tile bins | H3 resolution | Unique hex bins | Ratio | | --------- | ---------------- | ------------- | ----------------| ----- | | 1 | 4 | 0 | 122 | 30.5 | | 2 | 16 | 0 | 122 | 7.625 | | 3 | 64 | 1 | 842 | 13.15625 | | 4 | 256 | 1 | 842 | 3.2890625 | | 5 | 1024 | 2 | 5882 | 5.744140625 | | 6 | 4096 | 2 | 5882 | 1.436035156 | | 7 | 16384 | 3 | 41162 | 2.512329102 | | 8 | 65536 | 3 | 41162 | 0.6280822754 | | 9 | 262144 | 4 | 288122 | 1.099098206 | | 10 | 1048576 | 4 | 288122 | 0.2747745514 | | 11 | 4194304 | 5 | 2016842 | 0.4808526039 | | 12 | 16777216 | 6 | 14117882 | 0.8414913416 | | 13 | 67108864 | 6 | 14117882 | 0.2103728354 | | 14 | 268435456 | 7 | 98825162 | 0.3681524172 | | 15 | 1073741824 | 8 | 691776122 | 0.644266719 | | 16 | 4294967296 | 8 | 691776122 | 0.1610666797 | | 17 | 17179869184 | 9 | 4842432842 | 0.2818666889 | | 18 | 68719476736 | 10 | 33897029882 | 0.4932667053 | | 19 | 274877906944 | 11 | 237279209162 | 0.8632167343 | | 20 | 1099511627776 | 11 | 237279209162 | 0.2158041836 | | 21 | 4398046511104 | 12 | 1660954464122 | 0.3776573213 | | 22 | 17592186044416 | 13 | 11626681248842 | 0.6609003122 | | 23 | 70368744177664 | 13 | 11626681248842 | 0.165225078 | | 24 | 281474976710656 | 14 | 81386768741882 | 0.2891438866 | | 25 | 1125899906842620 | 15 | 569707381193162 | 0.5060018015 | | 26 | 4503599627370500 | 15 | 569707381193162 | 0.1265004504 | | 27 | 18014398509482000 | 15 | 569707381193162 | 0.03162511259 | | 28 | 72057594037927900 | 15 | 569707381193162 | 0.007906278149 | | 29 | 288230376151712000 | 15 | 569707381193162 | 0.001976569537 | Hexagonal cells don't align perfectly on a vector tile. Some cells may intersect more than one vector tile. To compute the H3 resolution for each precision, Elasticsearch compares the average density of hexagonal bins at each resolution with the average density of tile bins at each zoom level. Elasticsearch uses the H3 resolution that is closest to the corresponding geotile density. Learn how to use the vector tile search API with practical examples in the [Vector tile search examples](https://www.elastic.co/docs/reference/elasticsearch/rest-apis/vector-tile-search) guide. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-mvt | Elasticsearch API documentation} + */ +export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest, options?: TransportRequestOptions): Promise +export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.search_mvt + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = body != null ? 'POST' : 'GET' + const path = `/${encodeURIComponent(params.index.toString())}/_mvt/${encodeURIComponent(params.field.toString())}/${encodeURIComponent(params.zoom.toString())}/${encodeURIComponent(params.x.toString())}/${encodeURIComponent(params.y.toString())}` + const meta: TransportRequestMetadata = { + name: 'search_mvt', + pathParts: { + index: params.index, + field: params.field, + zoom: params.zoom, + x: params.x, + y: params.y + }, + acceptedParams: [ + 'index', + 'field', + 'zoom', + 'x', + 'y', + 'aggs', + 'buffer', + 'exact_bounds', + 'extent', + 'fields', + 'grid_agg', + 'grid_precision', + 'grid_type', + 'query', + 'runtime_mappings', + 'size', + 'sort', + 'track_total_hits', + 'with_labels', + 'exact_bounds', + 'extent', + 'grid_agg', + 'grid_precision', + 'grid_type', + 'project_routing', + 'size', + 'track_total_hits', + 'with_labels' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) +} diff --git a/src/api/api/search_shards.ts b/src/api/api/search_shards.ts new file mode 100644 index 000000000..674f63dee --- /dev/null +++ b/src/api/api/search_shards.ts @@ -0,0 +1,108 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + search_shards: { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'local', + 'master_timeout', + 'preference', + 'routing' + ] + } +} + +/** + * Get the search shards. Get the indices and shards that a search request would be run against. This information can be useful for working out issues or planning optimizations with routing and shard preferences. When filtered aliases are used, the filter is returned as part of the `indices` section. If the Elasticsearch security features are enabled, you must have the `view_index_metadata` or `manage` index privilege for the target data stream, index, or alias. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-shards | Elasticsearch API documentation} + */ +export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest, options?: TransportRequestOptions): Promise +export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = acceptedParams.search_shards + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = body != null ? 'POST' : 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_search_shards` + } else { + method = body != null ? 'POST' : 'GET' + path = '/_search_shards' + } + const meta: TransportRequestMetadata = { + name: 'search_shards', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'local', + 'master_timeout', + 'preference', + 'routing' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) +} diff --git a/src/api/api/search_template.ts b/src/api/api/search_template.ts new file mode 100644 index 000000000..99e7fd0ca --- /dev/null +++ b/src/api/api/search_template.ts @@ -0,0 +1,147 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + search_template: { + path: [ + 'index' + ], + body: [ + 'explain', + 'id', + 'params', + 'profile', + 'source' + ], + query: [ + 'allow_no_indices', + 'ccs_minimize_roundtrips', + 'expand_wildcards', + 'explain', + 'ignore_throttled', + 'ignore_unavailable', + 'preference', + 'profile', + 'project_routing', + 'routing', + 'scroll', + 'search_type', + 'rest_total_hits_as_int', + 'typed_keys' + ] + } +} + +/** + * Run a search with a search template. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-template | Elasticsearch API documentation} + */ +export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest, options?: TransportRequestOptions): Promise> +export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.search_template + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = body != null ? 'POST' : 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_search/template` + } else { + method = body != null ? 'POST' : 'GET' + path = '/_search/template' + } + const meta: TransportRequestMetadata = { + name: 'search_template', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'explain', + 'id', + 'params', + 'profile', + 'source', + 'allow_no_indices', + 'ccs_minimize_roundtrips', + 'expand_wildcards', + 'explain', + 'ignore_throttled', + 'ignore_unavailable', + 'preference', + 'profile', + 'project_routing', + 'routing', + 'scroll', + 'search_type', + 'rest_total_hits_as_int', + 'typed_keys' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) +} diff --git a/src/api/api/searchable_snapshots.ts b/src/api/api/searchable_snapshots.ts new file mode 100644 index 000000000..0486d7ec1 --- /dev/null +++ b/src/api/api/searchable_snapshots.ts @@ -0,0 +1,329 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import { kAcceptedParams } from '../../client' + +interface That { + transport: Transport + [kAcceptedParams]: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +export default class SearchableSnapshots { + transport: Transport + [kAcceptedParams]: Record + constructor (transport: Transport) { + this.transport = transport + this[kAcceptedParams] = { + 'searchable_snapshots.cache_stats': { + path: [ + 'node_id' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'searchable_snapshots.clear_cache': { + path: [ + 'index' + ], + body: [], + query: [ + 'expand_wildcards', + 'allow_no_indices', + 'ignore_unavailable' + ] + }, + 'searchable_snapshots.mount': { + path: [ + 'repository', + 'snapshot' + ], + body: [ + 'index', + 'renamed_index', + 'index_settings', + 'ignore_index_settings' + ], + query: [ + 'master_timeout', + 'wait_for_completion', + 'storage' + ] + }, + 'searchable_snapshots.stats': { + path: [ + 'index' + ], + body: [], + query: [ + 'level' + ] + } + } + } + + /** + * Get cache statistics. Get statistics about the shared cache for partially mounted indices. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-cache-stats | Elasticsearch API documentation} + */ + async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptions): Promise + async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['searchable_snapshots.cache_stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.node_id != null) { + method = 'GET' + path = `/_searchable_snapshots/${encodeURIComponent(params.node_id.toString())}/cache/stats` + } else { + method = 'GET' + path = '/_searchable_snapshots/cache/stats' + } + const meta: TransportRequestMetadata = { + name: 'searchable_snapshots.cache_stats', + pathParts: { + node_id: params.node_id + }, + acceptedParams: [ + 'node_id', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Clear the cache. Clear indices and data streams from the shared cache for partially mounted indices. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-clear-cache | Elasticsearch API documentation} + */ + async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> + async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptions): Promise + async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['searchable_snapshots.clear_cache'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = 'POST' + path = `/${encodeURIComponent(params.index.toString())}/_searchable_snapshots/cache/clear` + } else { + method = 'POST' + path = '/_searchable_snapshots/cache/clear' + } + const meta: TransportRequestMetadata = { + name: 'searchable_snapshots.clear_cache', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'expand_wildcards', + 'allow_no_indices', + 'ignore_unavailable' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Mount a snapshot. Mount a snapshot as a searchable snapshot index. Do not use this API for snapshots managed by index lifecycle management (ILM). Manually mounting ILM-managed snapshots can interfere with ILM processes. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-mount | Elasticsearch API documentation} + */ + async mount (this: That, params: T.SearchableSnapshotsMountRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async mount (this: That, params: T.SearchableSnapshotsMountRequest, options?: TransportRequestOptionsWithMeta): Promise> + async mount (this: That, params: T.SearchableSnapshotsMountRequest, options?: TransportRequestOptions): Promise + async mount (this: That, params: T.SearchableSnapshotsMountRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['searchable_snapshots.mount'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/_snapshot/${encodeURIComponent(params.repository.toString())}/${encodeURIComponent(params.snapshot.toString())}/_mount` + const meta: TransportRequestMetadata = { + name: 'searchable_snapshots.mount', + pathParts: { + repository: params.repository, + snapshot: params.snapshot + }, + acceptedParams: [ + 'repository', + 'snapshot', + 'index', + 'renamed_index', + 'index_settings', + 'ignore_index_settings', + 'master_timeout', + 'wait_for_completion', + 'storage' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get searchable snapshot statistics. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-stats | Elasticsearch API documentation} + */ + async stats (this: That, params?: T.SearchableSnapshotsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stats (this: That, params?: T.SearchableSnapshotsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stats (this: That, params?: T.SearchableSnapshotsStatsRequest, options?: TransportRequestOptions): Promise + async stats (this: That, params?: T.SearchableSnapshotsStatsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['searchable_snapshots.stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_searchable_snapshots/stats` + } else { + method = 'GET' + path = '/_searchable_snapshots/stats' + } + const meta: TransportRequestMetadata = { + name: 'searchable_snapshots.stats', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'level' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/api/security.ts b/src/api/api/security.ts new file mode 100644 index 000000000..bd7ee00fa --- /dev/null +++ b/src/api/api/security.ts @@ -0,0 +1,4337 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import { kAcceptedParams } from '../../client' + +interface That { + transport: Transport + [kAcceptedParams]: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +export default class Security { + transport: Transport + [kAcceptedParams]: Record + constructor (transport: Transport) { + this.transport = transport + this[kAcceptedParams] = { + 'security.activate_user_profile': { + path: [], + body: [ + 'access_token', + 'grant_type', + 'password', + 'username' + ], + query: [] + }, + 'security.authenticate': { + path: [], + body: [], + query: [] + }, + 'security.bulk_delete_role': { + path: [], + body: [ + 'names' + ], + query: [ + 'refresh' + ] + }, + 'security.bulk_put_role': { + path: [], + body: [ + 'roles' + ], + query: [ + 'refresh' + ] + }, + 'security.bulk_update_api_keys': { + path: [], + body: [ + 'expiration', + 'ids', + 'metadata', + 'role_descriptors' + ], + query: [] + }, + 'security.change_password': { + path: [ + 'username' + ], + body: [ + 'password', + 'password_hash' + ], + query: [ + 'refresh' + ] + }, + 'security.clear_api_key_cache': { + path: [ + 'ids' + ], + body: [], + query: [] + }, + 'security.clear_cached_privileges': { + path: [ + 'application' + ], + body: [], + query: [] + }, + 'security.clear_cached_realms': { + path: [ + 'realms' + ], + body: [], + query: [ + 'usernames' + ] + }, + 'security.clear_cached_roles': { + path: [ + 'name' + ], + body: [], + query: [] + }, + 'security.clear_cached_service_tokens': { + path: [ + 'namespace', + 'service', + 'name' + ], + body: [], + query: [] + }, + 'security.create_api_key': { + path: [], + body: [ + 'expiration', + 'name', + 'role_descriptors', + 'metadata' + ], + query: [ + 'refresh' + ] + }, + 'security.create_cross_cluster_api_key': { + path: [], + body: [ + 'access', + 'expiration', + 'metadata', + 'name', + 'certificate_identity' + ], + query: [] + }, + 'security.create_service_token': { + path: [ + 'namespace', + 'service', + 'name' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.delegate_pki': { + path: [], + body: [ + 'x509_certificate_chain' + ], + query: [] + }, + 'security.delete_privileges': { + path: [ + 'application', + 'name' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.delete_role': { + path: [ + 'name' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.delete_role_mapping': { + path: [ + 'name' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.delete_service_token': { + path: [ + 'namespace', + 'service', + 'name' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.delete_user': { + path: [ + 'username' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.disable_user': { + path: [ + 'username' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.disable_user_profile': { + path: [ + 'uid' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.enable_user': { + path: [ + 'username' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.enable_user_profile': { + path: [ + 'uid' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.enroll_kibana': { + path: [], + body: [], + query: [] + }, + 'security.enroll_node': { + path: [], + body: [], + query: [] + }, + 'security.get_api_key': { + path: [], + body: [], + query: [ + 'id', + 'name', + 'owner', + 'realm_name', + 'username', + 'with_limited_by', + 'active_only', + 'with_profile_uid' + ] + }, + 'security.get_builtin_privileges': { + path: [], + body: [], + query: [] + }, + 'security.get_privileges': { + path: [ + 'application', + 'name' + ], + body: [], + query: [] + }, + 'security.get_role': { + path: [ + 'name' + ], + body: [], + query: [] + }, + 'security.get_role_mapping': { + path: [ + 'name' + ], + body: [], + query: [] + }, + 'security.get_service_accounts': { + path: [ + 'namespace', + 'service' + ], + body: [], + query: [] + }, + 'security.get_service_credentials': { + path: [ + 'namespace', + 'service' + ], + body: [], + query: [] + }, + 'security.get_settings': { + path: [], + body: [], + query: [ + 'master_timeout' + ] + }, + 'security.get_stats': { + path: [], + body: [], + query: [] + }, + 'security.get_token': { + path: [], + body: [ + 'grant_type', + 'scope', + 'password', + 'kerberos_ticket', + 'refresh_token', + 'username' + ], + query: [] + }, + 'security.get_user': { + path: [ + 'username' + ], + body: [], + query: [ + 'with_profile_uid' + ] + }, + 'security.get_user_privileges': { + path: [], + body: [], + query: [] + }, + 'security.get_user_profile': { + path: [ + 'uid' + ], + body: [], + query: [ + 'data' + ] + }, + 'security.grant_api_key': { + path: [], + body: [ + 'api_key', + 'grant_type', + 'access_token', + 'username', + 'password', + 'run_as' + ], + query: [ + 'refresh' + ] + }, + 'security.has_privileges': { + path: [ + 'user' + ], + body: [ + 'application', + 'cluster', + 'index' + ], + query: [] + }, + 'security.has_privileges_user_profile': { + path: [], + body: [ + 'uids', + 'privileges' + ], + query: [] + }, + 'security.invalidate_api_key': { + path: [], + body: [ + 'id', + 'ids', + 'name', + 'owner', + 'realm_name', + 'username' + ], + query: [] + }, + 'security.invalidate_token': { + path: [], + body: [ + 'token', + 'refresh_token', + 'realm_name', + 'username' + ], + query: [] + }, + 'security.oidc_authenticate': { + path: [], + body: [ + 'nonce', + 'realm', + 'redirect_uri', + 'state' + ], + query: [] + }, + 'security.oidc_logout': { + path: [], + body: [ + 'token', + 'refresh_token' + ], + query: [] + }, + 'security.oidc_prepare_authentication': { + path: [], + body: [ + 'iss', + 'login_hint', + 'nonce', + 'realm', + 'state' + ], + query: [] + }, + 'security.put_privileges': { + path: [], + body: [ + 'privileges' + ], + query: [ + 'refresh' + ] + }, + 'security.put_role': { + path: [ + 'name' + ], + body: [ + 'applications', + 'cluster', + 'global', + 'indices', + 'remote_indices', + 'remote_cluster', + 'metadata', + 'run_as', + 'description', + 'transient_metadata' + ], + query: [ + 'refresh' + ] + }, + 'security.put_role_mapping': { + path: [ + 'name' + ], + body: [ + 'enabled', + 'metadata', + 'roles', + 'role_templates', + 'rules', + 'run_as' + ], + query: [ + 'refresh' + ] + }, + 'security.put_user': { + path: [], + body: [ + 'username', + 'email', + 'full_name', + 'metadata', + 'password', + 'password_hash', + 'roles', + 'enabled' + ], + query: [ + 'refresh' + ] + }, + 'security.query_api_keys': { + path: [], + body: [ + 'aggregations', + 'aggs', + 'query', + 'from', + 'sort', + 'size', + 'search_after' + ], + query: [ + 'with_limited_by', + 'with_profile_uid', + 'typed_keys' + ] + }, + 'security.query_role': { + path: [], + body: [ + 'query', + 'from', + 'sort', + 'size', + 'search_after' + ], + query: [] + }, + 'security.query_user': { + path: [], + body: [ + 'query', + 'from', + 'sort', + 'size', + 'search_after' + ], + query: [ + 'with_profile_uid' + ] + }, + 'security.saml_authenticate': { + path: [], + body: [ + 'content', + 'ids', + 'realm' + ], + query: [] + }, + 'security.saml_complete_logout': { + path: [], + body: [ + 'realm', + 'ids', + 'query_string', + 'content' + ], + query: [] + }, + 'security.saml_invalidate': { + path: [], + body: [ + 'acs', + 'query_string', + 'realm' + ], + query: [] + }, + 'security.saml_logout': { + path: [], + body: [ + 'token', + 'refresh_token' + ], + query: [] + }, + 'security.saml_prepare_authentication': { + path: [], + body: [ + 'acs', + 'realm', + 'relay_state' + ], + query: [] + }, + 'security.saml_service_provider_metadata': { + path: [ + 'realm_name' + ], + body: [], + query: [] + }, + 'security.suggest_user_profiles': { + path: [], + body: [ + 'name', + 'size', + 'data', + 'hint' + ], + query: [ + 'data' + ] + }, + 'security.update_api_key': { + path: [ + 'id' + ], + body: [ + 'role_descriptors', + 'metadata', + 'expiration' + ], + query: [] + }, + 'security.update_cross_cluster_api_key': { + path: [ + 'id' + ], + body: [ + 'access', + 'expiration', + 'metadata', + 'certificate_identity' + ], + query: [] + }, + 'security.update_settings': { + path: [], + body: [ + 'security', + 'security-profile', + 'security-tokens' + ], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'security.update_user_profile_data': { + path: [ + 'uid' + ], + body: [ + 'labels', + 'data' + ], + query: [ + 'if_seq_no', + 'if_primary_term', + 'refresh' + ] + } + } + } + + /** + * Activate a user profile. Create or update a user profile on behalf of another user. NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. The calling application must have either an `access_token` or a combination of `username` and `password` for the user that the profile document is intended for. Elastic reserves the right to change or remove this feature in future releases without prior notice. This API creates or updates a profile document for end users with information that is extracted from the user's authentication object including `username`, `full_name,` `roles`, and the authentication realm. For example, in the JWT `access_token` case, the profile user's `username` is extracted from the JWT token claim pointed to by the `claims.principal` setting of the JWT realm that authenticated the token. When updating a profile document, the API enables the document if it was disabled. Any updates do not change existing content for either the `labels` or `data` fields. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-activate-user-profile | Elasticsearch API documentation} + */ + async activateUserProfile (this: That, params: T.SecurityActivateUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async activateUserProfile (this: That, params: T.SecurityActivateUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> + async activateUserProfile (this: That, params: T.SecurityActivateUserProfileRequest, options?: TransportRequestOptions): Promise + async activateUserProfile (this: That, params: T.SecurityActivateUserProfileRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['security.activate_user_profile'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = '/_security/profile/_activate' + const meta: TransportRequestMetadata = { + name: 'security.activate_user_profile', + acceptedParams: [ + 'access_token', + 'grant_type', + 'password', + 'username' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Authenticate a user. Authenticates a user and returns information about the authenticated user. Include the user information in a [basic auth header](https://en.wikipedia.org/wiki/Basic_access_authentication). A successful call returns a JSON structure that shows user information such as their username, the roles that are assigned to the user, any assigned metadata, and information about the realms that authenticated and authorized the user. If the user cannot be authenticated, this API returns a 401 status code. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-authenticate | Elasticsearch API documentation} + */ + async authenticate (this: That, params?: T.SecurityAuthenticateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async authenticate (this: That, params?: T.SecurityAuthenticateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async authenticate (this: That, params?: T.SecurityAuthenticateRequest, options?: TransportRequestOptions): Promise + async authenticate (this: That, params?: T.SecurityAuthenticateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['security.authenticate'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_security/_authenticate' + const meta: TransportRequestMetadata = { + name: 'security.authenticate', + acceptedParams: [ + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Bulk delete roles. The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The bulk delete roles API cannot delete roles that are defined in roles files. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-bulk-delete-role | Elasticsearch API documentation} + */ + async bulkDeleteRole (this: That, params: T.SecurityBulkDeleteRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async bulkDeleteRole (this: That, params: T.SecurityBulkDeleteRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async bulkDeleteRole (this: That, params: T.SecurityBulkDeleteRoleRequest, options?: TransportRequestOptions): Promise + async bulkDeleteRole (this: That, params: T.SecurityBulkDeleteRoleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['security.bulk_delete_role'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'DELETE' + const path = '/_security/role' + const meta: TransportRequestMetadata = { + name: 'security.bulk_delete_role', + acceptedParams: [ + 'names', + 'refresh' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Bulk create or update roles. The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The bulk create or update roles API cannot update roles that are defined in roles files. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-bulk-put-role | Elasticsearch API documentation} + */ + async bulkPutRole (this: That, params: T.SecurityBulkPutRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async bulkPutRole (this: That, params: T.SecurityBulkPutRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async bulkPutRole (this: That, params: T.SecurityBulkPutRoleRequest, options?: TransportRequestOptions): Promise + async bulkPutRole (this: That, params: T.SecurityBulkPutRoleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['security.bulk_put_role'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = '/_security/role' + const meta: TransportRequestMetadata = { + name: 'security.bulk_put_role', + acceptedParams: [ + 'roles', + 'refresh' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Bulk update API keys. Update the attributes for multiple API keys. IMPORTANT: It is not possible to use an API key as the authentication credential for this API. To update API keys, the owner user's credentials are required. This API is similar to the update API key API but enables you to apply the same update to multiple API keys in one API call. This operation can greatly improve performance over making individual updates. It is not possible to update expired or invalidated API keys. This API supports updates to API key access scope, metadata and expiration. The access scope of each API key is derived from the `role_descriptors` you specify in the request and a snapshot of the owner user's permissions at the time of the request. The snapshot of the owner's permissions is updated automatically on every call. IMPORTANT: If you don't specify `role_descriptors` in the request, a call to this API might still change an API key's access scope. This change can occur if the owner user's permissions have changed since the API key was created or last modified. A successful request returns a JSON structure that contains the IDs of all updated API keys, the IDs of API keys that already had the requested changes and did not require an update, and error details for any failed update. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-bulk-update-api-keys | Elasticsearch API documentation} + */ + async bulkUpdateApiKeys (this: That, params: T.SecurityBulkUpdateApiKeysRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async bulkUpdateApiKeys (this: That, params: T.SecurityBulkUpdateApiKeysRequest, options?: TransportRequestOptionsWithMeta): Promise> + async bulkUpdateApiKeys (this: That, params: T.SecurityBulkUpdateApiKeysRequest, options?: TransportRequestOptions): Promise + async bulkUpdateApiKeys (this: That, params: T.SecurityBulkUpdateApiKeysRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['security.bulk_update_api_keys'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = '/_security/api_key/_bulk_update' + const meta: TransportRequestMetadata = { + name: 'security.bulk_update_api_keys', + acceptedParams: [ + 'expiration', + 'ids', + 'metadata', + 'role_descriptors' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Change passwords. Change the passwords of users in the native realm and built-in users. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-change-password | Elasticsearch API documentation} + */ + async changePassword (this: That, params?: T.SecurityChangePasswordRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async changePassword (this: That, params?: T.SecurityChangePasswordRequest, options?: TransportRequestOptionsWithMeta): Promise> + async changePassword (this: That, params?: T.SecurityChangePasswordRequest, options?: TransportRequestOptions): Promise + async changePassword (this: That, params?: T.SecurityChangePasswordRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['security.change_password'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + let method = '' + let path = '' + if (params.username != null) { + method = 'PUT' + path = `/_security/user/${encodeURIComponent(params.username.toString())}/_password` + } else { + method = 'PUT' + path = '/_security/user/_password' + } + const meta: TransportRequestMetadata = { + name: 'security.change_password', + pathParts: { + username: params.username + }, + acceptedParams: [ + 'username', + 'password', + 'password_hash', + 'refresh' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Clear the API key cache. Evict a subset of all entries from the API key cache. The cache is also automatically cleared on state changes of the security index. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-api-key-cache | Elasticsearch API documentation} + */ + async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> + async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptions): Promise + async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['security.clear_api_key_cache'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/_security/api_key/${encodeURIComponent(params.ids.toString())}/_clear_cache` + const meta: TransportRequestMetadata = { + name: 'security.clear_api_key_cache', + pathParts: { + ids: params.ids + }, + acceptedParams: [ + 'ids' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Clear the privileges cache. Evict privileges from the native application privilege cache. The cache is also automatically cleared for applications that have their privileges updated. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-privileges | Elasticsearch API documentation} + */ + async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptions): Promise + async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['security.clear_cached_privileges'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/_security/privilege/${encodeURIComponent(params.application.toString())}/_clear_cache` + const meta: TransportRequestMetadata = { + name: 'security.clear_cached_privileges', + pathParts: { + application: params.application + }, + acceptedParams: [ + 'application' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Clear the user cache. Evict users from the user cache. You can completely clear the cache or evict specific users. User credentials are cached in memory on each node to avoid connecting to a remote authentication service or hitting the disk for every incoming request. There are realm settings that you can use to configure the user cache. For more information, refer to the documentation about controlling the user cache. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-realms | Elasticsearch API documentation} + */ + async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest, options?: TransportRequestOptions): Promise + async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['security.clear_cached_realms'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/_security/realm/${encodeURIComponent(params.realms.toString())}/_clear_cache` + const meta: TransportRequestMetadata = { + name: 'security.clear_cached_realms', + pathParts: { + realms: params.realms + }, + acceptedParams: [ + 'realms', + 'usernames' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Clear the roles cache. Evict roles from the native role cache. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-roles | Elasticsearch API documentation} + */ + async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest, options?: TransportRequestOptions): Promise + async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['security.clear_cached_roles'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/_security/role/${encodeURIComponent(params.name.toString())}/_clear_cache` + const meta: TransportRequestMetadata = { + name: 'security.clear_cached_roles', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Clear service account token caches. Evict a subset of all entries from the service account token caches. Two separate caches exist for service account tokens: one cache for tokens backed by the `service_tokens` file, and another for tokens backed by the `.security` index. This API clears matching entries from both caches. The cache for service account tokens backed by the `.security` index is cleared automatically on state changes of the security index. The cache for tokens backed by the `service_tokens` file is cleared automatically on file changes. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-service-tokens | Elasticsearch API documentation} + */ + async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptionsWithMeta): Promise> + async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptions): Promise + async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['security.clear_cached_service_tokens'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/_security/service/${encodeURIComponent(params.namespace.toString())}/${encodeURIComponent(params.service.toString())}/credential/token/${encodeURIComponent(params.name.toString())}/_clear_cache` + const meta: TransportRequestMetadata = { + name: 'security.clear_cached_service_tokens', + pathParts: { + namespace: params.namespace, + service: params.service, + name: params.name + }, + acceptedParams: [ + 'namespace', + 'service', + 'name' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create an API key. Create an API key for access without requiring basic authentication. IMPORTANT: If the credential that is used to authenticate this request is an API key, the derived API key cannot have any privileges. If you specify privileges, the API returns an error. A successful request returns a JSON structure that contains the API key, its unique id, and its name. If applicable, it also returns expiration information for the API key in milliseconds. NOTE: By default, API keys never expire. You can specify expiration information when you create the API keys. The API keys are created by the Elasticsearch API key service, which is automatically enabled. To configure or turn off the API key service, refer to API key service setting documentation. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key | Elasticsearch API documentation} + */ + async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest, options?: TransportRequestOptions): Promise + async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['security.create_api_key'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = '/_security/api_key' + const meta: TransportRequestMetadata = { + name: 'security.create_api_key', + acceptedParams: [ + 'expiration', + 'name', + 'role_descriptors', + 'metadata', + 'refresh' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create a cross-cluster API key. Create an API key of the `cross_cluster` type for the API key based remote cluster access. A `cross_cluster` API key cannot be used to authenticate through the REST interface. IMPORTANT: To authenticate this request you must use a credential that is not an API key. Even if you use an API key that has the required privilege, the API returns an error. Cross-cluster API keys are created by the Elasticsearch API key service, which is automatically enabled. NOTE: Unlike REST API keys, a cross-cluster API key does not capture permissions of the authenticated user. The API key’s effective permission is exactly as specified with the `access` property. A successful request returns a JSON structure that contains the API key, its unique ID, and its name. If applicable, it also returns expiration information for the API key in milliseconds. By default, API keys never expire. You can specify expiration information when you create the API keys. Cross-cluster API keys can only be updated with the update cross-cluster API key API. Attempting to update them with the update REST API key API or the bulk update REST API keys API will result in an error. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-cross-cluster-api-key | Elasticsearch API documentation} + */ + async createCrossClusterApiKey (this: That, params: T.SecurityCreateCrossClusterApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async createCrossClusterApiKey (this: That, params: T.SecurityCreateCrossClusterApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async createCrossClusterApiKey (this: That, params: T.SecurityCreateCrossClusterApiKeyRequest, options?: TransportRequestOptions): Promise + async createCrossClusterApiKey (this: That, params: T.SecurityCreateCrossClusterApiKeyRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['security.create_cross_cluster_api_key'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = '/_security/cross_cluster/api_key' + const meta: TransportRequestMetadata = { + name: 'security.create_cross_cluster_api_key', + acceptedParams: [ + 'access', + 'expiration', + 'metadata', + 'name', + 'certificate_identity' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create a service account token. Create a service accounts token for access without requiring basic authentication. NOTE: Service account tokens never expire. You must actively delete them if they are no longer needed. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-service-token | Elasticsearch API documentation} + */ + async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> + async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest, options?: TransportRequestOptions): Promise + async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['security.create_service_token'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.namespace != null && params.service != null && params.name != null) { + method = 'PUT' + path = `/_security/service/${encodeURIComponent(params.namespace.toString())}/${encodeURIComponent(params.service.toString())}/credential/token/${encodeURIComponent(params.name.toString())}` + } else { + method = 'POST' + path = `/_security/service/${encodeURIComponent(params.namespace.toString())}/${encodeURIComponent(params.service.toString())}/credential/token` + } + const meta: TransportRequestMetadata = { + name: 'security.create_service_token', + pathParts: { + namespace: params.namespace, + service: params.service, + name: params.name + }, + acceptedParams: [ + 'namespace', + 'service', + 'name', + 'refresh' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Delegate PKI authentication. This API implements the exchange of an X509Certificate chain for an Elasticsearch access token. The certificate chain is validated, according to RFC 5280, by sequentially considering the trust configuration of every installed PKI realm that has `delegation.enabled` set to `true`. A successfully trusted client certificate is also subject to the validation of the subject distinguished name according to thw `username_pattern` of the respective realm. This API is called by smart and trusted proxies, such as Kibana, which terminate the user's TLS session but still want to authenticate the user by using a PKI realm—-as if the user connected directly to Elasticsearch. IMPORTANT: The association between the subject public key in the target certificate and the corresponding private key is not validated. This is part of the TLS authentication process and it is delegated to the proxy that calls this API. The proxy is trusted to have performed the TLS authentication and this API translates that authentication into an Elasticsearch access token. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delegate-pki | Elasticsearch API documentation} + */ + async delegatePki (this: That, params: T.SecurityDelegatePkiRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async delegatePki (this: That, params: T.SecurityDelegatePkiRequest, options?: TransportRequestOptionsWithMeta): Promise> + async delegatePki (this: That, params: T.SecurityDelegatePkiRequest, options?: TransportRequestOptions): Promise + async delegatePki (this: That, params: T.SecurityDelegatePkiRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['security.delegate_pki'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = '/_security/delegate_pki' + const meta: TransportRequestMetadata = { + name: 'security.delegate_pki', + acceptedParams: [ + 'x509_certificate_chain' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Delete application privileges. To use this API, you must have one of the following privileges: * The `manage_security` cluster privilege (or a greater privilege such as `all`). * The "Manage Application Privileges" global privilege for the application being referenced in the request. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-privileges | Elasticsearch API documentation} + */ + async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest, options?: TransportRequestOptions): Promise + async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['security.delete_privileges'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_security/privilege/${encodeURIComponent(params.application.toString())}/${encodeURIComponent(params.name.toString())}` + const meta: TransportRequestMetadata = { + name: 'security.delete_privileges', + pathParts: { + application: params.application, + name: params.name + }, + acceptedParams: [ + 'application', + 'name', + 'refresh' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Delete roles. Delete roles in the native realm. The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The delete roles API cannot remove roles that are defined in roles files. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-role | Elasticsearch API documentation} + */ + async deleteRole (this: That, params: T.SecurityDeleteRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteRole (this: That, params: T.SecurityDeleteRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteRole (this: That, params: T.SecurityDeleteRoleRequest, options?: TransportRequestOptions): Promise + async deleteRole (this: That, params: T.SecurityDeleteRoleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['security.delete_role'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_security/role/${encodeURIComponent(params.name.toString())}` + const meta: TransportRequestMetadata = { + name: 'security.delete_role', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'refresh' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Delete role mappings. Role mappings define which roles are assigned to each user. The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The delete role mappings API cannot remove role mappings that are defined in role mapping files. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-role-mapping | Elasticsearch API documentation} + */ + async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptions): Promise + async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['security.delete_role_mapping'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_security/role_mapping/${encodeURIComponent(params.name.toString())}` + const meta: TransportRequestMetadata = { + name: 'security.delete_role_mapping', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'refresh' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Delete service account tokens. Delete service account tokens for a service in a specified namespace. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-service-token | Elasticsearch API documentation} + */ + async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptions): Promise + async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['security.delete_service_token'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_security/service/${encodeURIComponent(params.namespace.toString())}/${encodeURIComponent(params.service.toString())}/credential/token/${encodeURIComponent(params.name.toString())}` + const meta: TransportRequestMetadata = { + name: 'security.delete_service_token', + pathParts: { + namespace: params.namespace, + service: params.service, + name: params.name + }, + acceptedParams: [ + 'namespace', + 'service', + 'name', + 'refresh' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Delete users. Delete users from the native realm. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-user | Elasticsearch API documentation} + */ + async deleteUser (this: That, params: T.SecurityDeleteUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteUser (this: That, params: T.SecurityDeleteUserRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteUser (this: That, params: T.SecurityDeleteUserRequest, options?: TransportRequestOptions): Promise + async deleteUser (this: That, params: T.SecurityDeleteUserRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['security.delete_user'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_security/user/${encodeURIComponent(params.username.toString())}` + const meta: TransportRequestMetadata = { + name: 'security.delete_user', + pathParts: { + username: params.username + }, + acceptedParams: [ + 'username', + 'refresh' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Disable users. Disable users in the native realm. By default, when you create users, they are enabled. You can use this API to revoke a user's access to Elasticsearch. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-disable-user | Elasticsearch API documentation} + */ + async disableUser (this: That, params: T.SecurityDisableUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async disableUser (this: That, params: T.SecurityDisableUserRequest, options?: TransportRequestOptionsWithMeta): Promise> + async disableUser (this: That, params: T.SecurityDisableUserRequest, options?: TransportRequestOptions): Promise + async disableUser (this: That, params: T.SecurityDisableUserRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['security.disable_user'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_security/user/${encodeURIComponent(params.username.toString())}/_disable` + const meta: TransportRequestMetadata = { + name: 'security.disable_user', + pathParts: { + username: params.username + }, + acceptedParams: [ + 'username', + 'refresh' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Disable a user profile. Disable user profiles so that they are not visible in user profile searches. NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. When you activate a user profile, its automatically enabled and visible in user profile searches. You can use the disable user profile API to disable a user profile so it’s not visible in these searches. To re-enable a disabled user profile, use the enable user profile API . + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-disable-user-profile | Elasticsearch API documentation} + */ + async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> + async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest, options?: TransportRequestOptions): Promise + async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['security.disable_user_profile'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_security/profile/${encodeURIComponent(params.uid.toString())}/_disable` + const meta: TransportRequestMetadata = { + name: 'security.disable_user_profile', + pathParts: { + uid: params.uid + }, + acceptedParams: [ + 'uid', + 'refresh' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Enable users. Enable users in the native realm. By default, when you create users, they are enabled. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enable-user | Elasticsearch API documentation} + */ + async enableUser (this: That, params: T.SecurityEnableUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async enableUser (this: That, params: T.SecurityEnableUserRequest, options?: TransportRequestOptionsWithMeta): Promise> + async enableUser (this: That, params: T.SecurityEnableUserRequest, options?: TransportRequestOptions): Promise + async enableUser (this: That, params: T.SecurityEnableUserRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['security.enable_user'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_security/user/${encodeURIComponent(params.username.toString())}/_enable` + const meta: TransportRequestMetadata = { + name: 'security.enable_user', + pathParts: { + username: params.username + }, + acceptedParams: [ + 'username', + 'refresh' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Enable a user profile. Enable user profiles to make them visible in user profile searches. NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. When you activate a user profile, it's automatically enabled and visible in user profile searches. If you later disable the user profile, you can use the enable user profile API to make the profile visible in these searches again. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enable-user-profile | Elasticsearch API documentation} + */ + async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> + async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest, options?: TransportRequestOptions): Promise + async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['security.enable_user_profile'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_security/profile/${encodeURIComponent(params.uid.toString())}/_enable` + const meta: TransportRequestMetadata = { + name: 'security.enable_user_profile', + pathParts: { + uid: params.uid + }, + acceptedParams: [ + 'uid', + 'refresh' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Enroll Kibana. Enable a Kibana instance to configure itself for communication with a secured Elasticsearch cluster. NOTE: This API is currently intended for internal use only by Kibana. Kibana uses this API internally to configure itself for communications with an Elasticsearch cluster that already has security features enabled. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enroll-kibana | Elasticsearch API documentation} + */ + async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest, options?: TransportRequestOptionsWithMeta): Promise> + async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest, options?: TransportRequestOptions): Promise + async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['security.enroll_kibana'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_security/enroll/kibana' + const meta: TransportRequestMetadata = { + name: 'security.enroll_kibana', + acceptedParams: [ + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Enroll a node. Enroll a new node to allow it to join an existing cluster with security features enabled. The response contains all the necessary information for the joining node to bootstrap discovery and security related settings so that it can successfully join the cluster. The response contains key and certificate material that allows the caller to generate valid signed certificates for the HTTP layer of all nodes in the cluster. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enroll-node | Elasticsearch API documentation} + */ + async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest, options?: TransportRequestOptions): Promise + async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['security.enroll_node'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_security/enroll/node' + const meta: TransportRequestMetadata = { + name: 'security.enroll_node', + acceptedParams: [ + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get API key information. Retrieves information for one or more API keys. NOTE: If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own. If you have `read_security`, `manage_api_key` or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-api-key | Elasticsearch API documentation} + */ + async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest, options?: TransportRequestOptions): Promise + async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['security.get_api_key'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_security/api_key' + const meta: TransportRequestMetadata = { + name: 'security.get_api_key', + acceptedParams: [ + 'id', + 'name', + 'owner', + 'realm_name', + 'username', + 'with_limited_by', + 'active_only', + 'with_profile_uid' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get builtin privileges. Get the list of cluster privileges and index privileges that are available in this version of Elasticsearch. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-builtin-privileges | Elasticsearch API documentation} + */ + async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptions): Promise + async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['security.get_builtin_privileges'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_security/privilege/_builtin' + const meta: TransportRequestMetadata = { + name: 'security.get_builtin_privileges', + acceptedParams: [ + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get application privileges. To use this API, you must have one of the following privileges: * The `read_security` cluster privilege (or a greater privilege such as `manage_security` or `all`). * The "Manage Application Privileges" global privilege for the application being referenced in the request. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-privileges | Elasticsearch API documentation} + */ + async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest, options?: TransportRequestOptions): Promise + async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['security.get_privileges'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.application != null && params.name != null) { + method = 'GET' + path = `/_security/privilege/${encodeURIComponent(params.application.toString())}/${encodeURIComponent(params.name.toString())}` + } else if (params.application != null) { + method = 'GET' + path = `/_security/privilege/${encodeURIComponent(params.application.toString())}` + } else { + method = 'GET' + path = '/_security/privilege' + } + const meta: TransportRequestMetadata = { + name: 'security.get_privileges', + pathParts: { + application: params.application, + name: params.name + }, + acceptedParams: [ + 'application', + 'name' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get roles. Get roles in the native realm. The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The get roles API cannot retrieve roles that are defined in roles files. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-role | Elasticsearch API documentation} + */ + async getRole (this: That, params?: T.SecurityGetRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getRole (this: That, params?: T.SecurityGetRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getRole (this: That, params?: T.SecurityGetRoleRequest, options?: TransportRequestOptions): Promise + async getRole (this: That, params?: T.SecurityGetRoleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['security.get_role'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.name != null) { + method = 'GET' + path = `/_security/role/${encodeURIComponent(params.name.toString())}` + } else { + method = 'GET' + path = '/_security/role' + } + const meta: TransportRequestMetadata = { + name: 'security.get_role', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get role mappings. Role mappings define which roles are assigned to each user. The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The get role mappings API cannot retrieve role mappings that are defined in role mapping files. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-role-mapping | Elasticsearch API documentation} + */ + async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest, options?: TransportRequestOptions): Promise + async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['security.get_role_mapping'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.name != null) { + method = 'GET' + path = `/_security/role_mapping/${encodeURIComponent(params.name.toString())}` + } else { + method = 'GET' + path = '/_security/role_mapping' + } + const meta: TransportRequestMetadata = { + name: 'security.get_role_mapping', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get service accounts. Get a list of service accounts that match the provided path parameters. NOTE: Currently, only the `elastic/fleet-server` service account is available. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-service-accounts | Elasticsearch API documentation} + */ + async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest, options?: TransportRequestOptions): Promise + async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['security.get_service_accounts'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.namespace != null && params.service != null) { + method = 'GET' + path = `/_security/service/${encodeURIComponent(params.namespace.toString())}/${encodeURIComponent(params.service.toString())}` + } else if (params.namespace != null) { + method = 'GET' + path = `/_security/service/${encodeURIComponent(params.namespace.toString())}` + } else { + method = 'GET' + path = '/_security/service' + } + const meta: TransportRequestMetadata = { + name: 'security.get_service_accounts', + pathParts: { + namespace: params.namespace, + service: params.service + }, + acceptedParams: [ + 'namespace', + 'service' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get service account credentials. To use this API, you must have at least the `read_security` cluster privilege (or a greater privilege such as `manage_service_account` or `manage_security`). The response includes service account tokens that were created with the create service account tokens API as well as file-backed tokens from all nodes of the cluster. NOTE: For tokens backed by the `service_tokens` file, the API collects them from all nodes of the cluster. Tokens with the same name from different nodes are assumed to be the same token and are only counted once towards the total number of service tokens. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-service-credentials | Elasticsearch API documentation} + */ + async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptions): Promise + async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['security.get_service_credentials'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_security/service/${encodeURIComponent(params.namespace.toString())}/${encodeURIComponent(params.service.toString())}/credential` + const meta: TransportRequestMetadata = { + name: 'security.get_service_credentials', + pathParts: { + namespace: params.namespace, + service: params.service + }, + acceptedParams: [ + 'namespace', + 'service' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get security index settings. Get the user-configurable settings for the security internal index (`.security` and associated indices). Only a subset of the index settings — those that are user-configurable—will be shown. This includes: * `index.auto_expand_replicas` * `index.number_of_replicas` + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-settings | Elasticsearch API documentation} + */ + async getSettings (this: That, params?: T.SecurityGetSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getSettings (this: That, params?: T.SecurityGetSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getSettings (this: That, params?: T.SecurityGetSettingsRequest, options?: TransportRequestOptions): Promise + async getSettings (this: That, params?: T.SecurityGetSettingsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['security.get_settings'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_security/settings' + const meta: TransportRequestMetadata = { + name: 'security.get_settings', + acceptedParams: [ + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get security stats. Gather security usage statistics from all node(s) within the cluster. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-stats | Elasticsearch API documentation} + */ + async getStats (this: That, params?: T.SecurityGetStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getStats (this: That, params?: T.SecurityGetStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getStats (this: That, params?: T.SecurityGetStatsRequest, options?: TransportRequestOptions): Promise + async getStats (this: That, params?: T.SecurityGetStatsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['security.get_stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_security/stats' + const meta: TransportRequestMetadata = { + name: 'security.get_stats', + acceptedParams: [ + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get a token. Create a bearer token for access without requiring basic authentication. The tokens are created by the Elasticsearch Token Service, which is automatically enabled when you configure TLS on the HTTP interface. Alternatively, you can explicitly enable the `xpack.security.authc.token.enabled` setting. When you are running in production mode, a bootstrap check prevents you from enabling the token service unless you also enable TLS on the HTTP interface. The get token API takes the same parameters as a typical OAuth 2.0 token API except for the use of a JSON request body. A successful get token API call returns a JSON structure that contains the access token, the amount of time (seconds) that the token expires in, the type, and the scope if available. The tokens returned by the get token API have a finite period of time for which they are valid and after that time period, they can no longer be used. That time period is defined by the `xpack.security.authc.token.timeout` setting. If you want to invalidate a token immediately, you can do so by using the invalidate token API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-token | Elasticsearch API documentation} + */ + async getToken (this: That, params?: T.SecurityGetTokenRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getToken (this: That, params?: T.SecurityGetTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getToken (this: That, params?: T.SecurityGetTokenRequest, options?: TransportRequestOptions): Promise + async getToken (this: That, params?: T.SecurityGetTokenRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['security.get_token'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = '/_security/oauth2/token' + const meta: TransportRequestMetadata = { + name: 'security.get_token', + acceptedParams: [ + 'grant_type', + 'scope', + 'password', + 'kerberos_ticket', + 'refresh_token', + 'username' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get users. Get information about users in the native realm and built-in users. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user | Elasticsearch API documentation} + */ + async getUser (this: That, params?: T.SecurityGetUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getUser (this: That, params?: T.SecurityGetUserRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getUser (this: That, params?: T.SecurityGetUserRequest, options?: TransportRequestOptions): Promise + async getUser (this: That, params?: T.SecurityGetUserRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['security.get_user'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.username != null) { + method = 'GET' + path = `/_security/user/${encodeURIComponent(params.username.toString())}` + } else { + method = 'GET' + path = '/_security/user' + } + const meta: TransportRequestMetadata = { + name: 'security.get_user', + pathParts: { + username: params.username + }, + acceptedParams: [ + 'username', + 'with_profile_uid' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get user privileges. Get the security privileges for the logged in user. All users can use this API, but only to determine their own privileges. To check the privileges of other users, you must use the run as feature. To check whether a user has a specific list of privileges, use the has privileges API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user-privileges | Elasticsearch API documentation} + */ + async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptions): Promise + async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['security.get_user_privileges'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_security/user/_privileges' + const meta: TransportRequestMetadata = { + name: 'security.get_user_privileges', + acceptedParams: [ + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get a user profile. Get a user's profile using the unique profile ID. NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user-profile | Elasticsearch API documentation} + */ + async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest, options?: TransportRequestOptions): Promise + async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['security.get_user_profile'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_security/profile/${encodeURIComponent(params.uid.toString())}` + const meta: TransportRequestMetadata = { + name: 'security.get_user_profile', + pathParts: { + uid: params.uid + }, + acceptedParams: [ + 'uid', + 'data' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Grant an API key. Create an API key on behalf of another user. This API is similar to the create API keys API, however it creates the API key for a user that is different than the user that runs the API. The caller must have authentication credentials for the user on whose behalf the API key will be created. It is not possible to use this API to create an API key without that user's credentials. The supported user authentication credential types are: * username and password * Elasticsearch access tokens * JWTs The user, for whom the authentication credentials is provided, can optionally "run as" (impersonate) another user. In this case, the API key will be created on behalf of the impersonated user. This API is intended be used by applications that need to create and manage API keys for end users, but cannot guarantee that those users have permission to create API keys on their own behalf. The API keys are created by the Elasticsearch API key service, which is automatically enabled. A successful grant API key API call returns a JSON structure that contains the API key, its unique id, and its name. If applicable, it also returns expiration information for the API key in milliseconds. By default, API keys never expire. You can specify expiration information when you create the API keys. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-grant-api-key | Elasticsearch API documentation} + */ + async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest, options?: TransportRequestOptions): Promise + async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['security.grant_api_key'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = '/_security/api_key/grant' + const meta: TransportRequestMetadata = { + name: 'security.grant_api_key', + acceptedParams: [ + 'api_key', + 'grant_type', + 'access_token', + 'username', + 'password', + 'run_as', + 'refresh' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Check user privileges. Determine whether the specified user has a specified list of privileges. All users can use this API, but only to determine their own privileges. To check the privileges of other users, you must use the run as feature. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-has-privileges | Elasticsearch API documentation} + */ + async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest, options?: TransportRequestOptions): Promise + async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['security.has_privileges'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + let method = '' + let path = '' + if (params.user != null) { + method = body != null ? 'POST' : 'GET' + path = `/_security/user/${encodeURIComponent(params.user.toString())}/_has_privileges` + } else { + method = body != null ? 'POST' : 'GET' + path = '/_security/user/_has_privileges' + } + const meta: TransportRequestMetadata = { + name: 'security.has_privileges', + pathParts: { + user: params.user + }, + acceptedParams: [ + 'user', + 'application', + 'cluster', + 'index' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Check user profile privileges. Determine whether the users associated with the specified user profile IDs have all the requested privileges. NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-has-privileges-user-profile | Elasticsearch API documentation} + */ + async hasPrivilegesUserProfile (this: That, params: T.SecurityHasPrivilegesUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async hasPrivilegesUserProfile (this: That, params: T.SecurityHasPrivilegesUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> + async hasPrivilegesUserProfile (this: That, params: T.SecurityHasPrivilegesUserProfileRequest, options?: TransportRequestOptions): Promise + async hasPrivilegesUserProfile (this: That, params: T.SecurityHasPrivilegesUserProfileRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['security.has_privileges_user_profile'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = body != null ? 'POST' : 'GET' + const path = '/_security/profile/_has_privileges' + const meta: TransportRequestMetadata = { + name: 'security.has_privileges_user_profile', + acceptedParams: [ + 'uids', + 'privileges' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Invalidate API keys. This API invalidates API keys created by the create API key or grant API key APIs. Invalidated API keys fail authentication, but they can still be viewed using the get API key information and query API key information APIs, for at least the configured retention period, until they are automatically deleted. To use this API, you must have at least the `manage_security`, `manage_api_key`, or `manage_own_api_key` cluster privileges. The `manage_security` privilege allows deleting any API key, including both REST and cross cluster API keys. The `manage_api_key` privilege allows deleting any REST API key, but not cross cluster API keys. The `manage_own_api_key` only allows deleting REST API keys that are owned by the user. In addition, with the `manage_own_api_key` privilege, an invalidation request must be issued in one of the three formats: - Set the parameter `owner=true`. - Or, set both `username` and `realm_name` to match the user's identity. - Or, if the request is issued by an API key, that is to say an API key invalidates itself, specify its ID in the `ids` field. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-invalidate-api-key | Elasticsearch API documentation} + */ + async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptions): Promise + async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['security.invalidate_api_key'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'DELETE' + const path = '/_security/api_key' + const meta: TransportRequestMetadata = { + name: 'security.invalidate_api_key', + acceptedParams: [ + 'id', + 'ids', + 'name', + 'owner', + 'realm_name', + 'username' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Invalidate a token. The access tokens returned by the get token API have a finite period of time for which they are valid. After that time period, they can no longer be used. The time period is defined by the `xpack.security.authc.token.timeout` setting. The refresh tokens returned by the get token API are only valid for 24 hours. They can also be used exactly once. If you want to invalidate one or more access or refresh tokens immediately, use this invalidate token API. NOTE: While all parameters are optional, at least one of them is required. More specifically, either one of `token` or `refresh_token` parameters is required. If none of these two are specified, then `realm_name` and/or `username` need to be specified. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-invalidate-token | Elasticsearch API documentation} + */ + async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> + async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest, options?: TransportRequestOptions): Promise + async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['security.invalidate_token'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'DELETE' + const path = '/_security/oauth2/token' + const meta: TransportRequestMetadata = { + name: 'security.invalidate_token', + acceptedParams: [ + 'token', + 'refresh_token', + 'realm_name', + 'username' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Authenticate OpenID Connect. Exchange an OpenID Connect authentication response message for an Elasticsearch internal access token and refresh token that can be subsequently used for authentication. Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-oidc-authenticate | Elasticsearch API documentation} + */ + async oidcAuthenticate (this: That, params: T.SecurityOidcAuthenticateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async oidcAuthenticate (this: That, params: T.SecurityOidcAuthenticateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async oidcAuthenticate (this: That, params: T.SecurityOidcAuthenticateRequest, options?: TransportRequestOptions): Promise + async oidcAuthenticate (this: That, params: T.SecurityOidcAuthenticateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['security.oidc_authenticate'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = '/_security/oidc/authenticate' + const meta: TransportRequestMetadata = { + name: 'security.oidc_authenticate', + acceptedParams: [ + 'nonce', + 'realm', + 'redirect_uri', + 'state' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Logout of OpenID Connect. Invalidate an access token and a refresh token that were generated as a response to the `/_security/oidc/authenticate` API. If the OpenID Connect authentication realm in Elasticsearch is accordingly configured, the response to this call will contain a URI pointing to the end session endpoint of the OpenID Connect Provider in order to perform single logout. Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-oidc-logout | Elasticsearch API documentation} + */ + async oidcLogout (this: That, params: T.SecurityOidcLogoutRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async oidcLogout (this: That, params: T.SecurityOidcLogoutRequest, options?: TransportRequestOptionsWithMeta): Promise> + async oidcLogout (this: That, params: T.SecurityOidcLogoutRequest, options?: TransportRequestOptions): Promise + async oidcLogout (this: That, params: T.SecurityOidcLogoutRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['security.oidc_logout'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = '/_security/oidc/logout' + const meta: TransportRequestMetadata = { + name: 'security.oidc_logout', + acceptedParams: [ + 'token', + 'refresh_token' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Prepare OpenID connect authentication. Create an oAuth 2.0 authentication request as a URL string based on the configuration of the OpenID Connect authentication realm in Elasticsearch. The response of this API is a URL pointing to the Authorization Endpoint of the configured OpenID Connect Provider, which can be used to redirect the browser of the user in order to continue the authentication process. Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-oidc-prepare-authentication | Elasticsearch API documentation} + */ + async oidcPrepareAuthentication (this: That, params?: T.SecurityOidcPrepareAuthenticationRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async oidcPrepareAuthentication (this: That, params?: T.SecurityOidcPrepareAuthenticationRequest, options?: TransportRequestOptionsWithMeta): Promise> + async oidcPrepareAuthentication (this: That, params?: T.SecurityOidcPrepareAuthenticationRequest, options?: TransportRequestOptions): Promise + async oidcPrepareAuthentication (this: That, params?: T.SecurityOidcPrepareAuthenticationRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['security.oidc_prepare_authentication'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = '/_security/oidc/prepare' + const meta: TransportRequestMetadata = { + name: 'security.oidc_prepare_authentication', + acceptedParams: [ + 'iss', + 'login_hint', + 'nonce', + 'realm', + 'state' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create or update application privileges. To use this API, you must have one of the following privileges: * The `manage_security` cluster privilege (or a greater privilege such as `all`). * The "Manage Application Privileges" global privilege for the application being referenced in the request. Application names are formed from a prefix, with an optional suffix that conform to the following rules: * The prefix must begin with a lowercase ASCII letter. * The prefix must contain only ASCII letters or digits. * The prefix must be at least 3 characters long. * If the suffix exists, it must begin with either a dash `-` or `_`. * The suffix cannot contain any of the following characters: `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, `*`. * No part of the name can contain whitespace. Privilege names must begin with a lowercase ASCII letter and must contain only ASCII letters and digits along with the characters `_`, `-`, and `.`. Action names can contain any number of printable ASCII characters and must contain at least one of the following characters: `/`, `*`, `:`. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-privileges | Elasticsearch API documentation} + */ + async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest, options?: TransportRequestOptions): Promise + async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['security.put_privileges'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: any = params.body ?? undefined + for (const key in params) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = '/_security/privilege' + const meta: TransportRequestMetadata = { + name: 'security.put_privileges', + acceptedParams: [ + 'privileges', + 'refresh' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create or update roles. The role management APIs are generally the preferred way to manage roles in the native realm, rather than using file-based role management. The create or update roles API cannot update roles that are defined in roles files. File-based role management is not available in Elastic Serverless. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-role | Elasticsearch API documentation} + */ + async putRole (this: That, params: T.SecurityPutRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putRole (this: That, params: T.SecurityPutRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putRole (this: That, params: T.SecurityPutRoleRequest, options?: TransportRequestOptions): Promise + async putRole (this: That, params: T.SecurityPutRoleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['security.put_role'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_security/role/${encodeURIComponent(params.name.toString())}` + const meta: TransportRequestMetadata = { + name: 'security.put_role', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'applications', + 'cluster', + 'global', + 'indices', + 'remote_indices', + 'remote_cluster', + 'metadata', + 'run_as', + 'description', + 'transient_metadata', + 'refresh' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create or update role mappings. Role mappings define which roles are assigned to each user. Each mapping has rules that identify users and a list of roles that are granted to those users. The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The create or update role mappings API cannot update role mappings that are defined in role mapping files. NOTE: This API does not create roles. Rather, it maps users to existing roles. Roles can be created by using the create or update roles API or roles files. **Role templates** The most common use for role mappings is to create a mapping from a known value on the user to a fixed role name. For example, all users in the `cn=admin,dc=example,dc=com` LDAP group should be given the superuser role in Elasticsearch. The `roles` field is used for this purpose. For more complex needs, it is possible to use Mustache templates to dynamically determine the names of the roles that should be granted to the user. The `role_templates` field is used for this purpose. NOTE: To use role templates successfully, the relevant scripting feature must be enabled. Otherwise, all attempts to create a role mapping with role templates fail. All of the user fields that are available in the role mapping rules are also available in the role templates. Thus it is possible to assign a user to a role that reflects their username, their groups, or the name of the realm to which they authenticated. By default a template is evaluated to produce a single string that is the name of the role which should be assigned to the user. If the format of the template is set to "json" then the template is expected to produce a JSON string or an array of JSON strings for the role names. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-role-mapping | Elasticsearch API documentation} + */ + async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest, options?: TransportRequestOptions): Promise + async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['security.put_role_mapping'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_security/role_mapping/${encodeURIComponent(params.name.toString())}` + const meta: TransportRequestMetadata = { + name: 'security.put_role_mapping', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'name', + 'enabled', + 'metadata', + 'roles', + 'role_templates', + 'rules', + 'run_as', + 'refresh' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create or update users. Add and update users in the native realm. A password is required for adding a new user but is optional when updating an existing user. To change a user's password without updating any other fields, use the change password API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-user | Elasticsearch API documentation} + */ + async putUser (this: That, params: T.SecurityPutUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putUser (this: That, params: T.SecurityPutUserRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putUser (this: That, params: T.SecurityPutUserRequest, options?: TransportRequestOptions): Promise + async putUser (this: That, params: T.SecurityPutUserRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['security.put_user'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_security/user/${encodeURIComponent(params.username.toString())}` + const meta: TransportRequestMetadata = { + name: 'security.put_user', + pathParts: { + username: params.username + }, + acceptedParams: [ + 'username', + 'username', + 'email', + 'full_name', + 'metadata', + 'password', + 'password_hash', + 'roles', + 'enabled', + 'refresh' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Find API keys with a query. Get a paginated list of API keys and their information. You can optionally filter the results with a query. To use this API, you must have at least the `manage_own_api_key` or the `read_security` cluster privileges. If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own. If you have the `read_security`, `manage_api_key`, or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership. Refer to the linked documentation for examples of how to find API keys: + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-api-keys | Elasticsearch API documentation} + */ + async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest, options?: TransportRequestOptionsWithMeta): Promise> + async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest, options?: TransportRequestOptions): Promise + async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['security.query_api_keys'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = body != null ? 'POST' : 'GET' + const path = '/_security/_query/api_key' + const meta: TransportRequestMetadata = { + name: 'security.query_api_keys', + acceptedParams: [ + 'aggregations', + 'aggs', + 'query', + 'from', + 'sort', + 'size', + 'search_after', + 'with_limited_by', + 'with_profile_uid', + 'typed_keys' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Find roles with a query. Get roles in a paginated manner. The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The query roles API does not retrieve roles that are defined in roles files, nor built-in ones. You can optionally filter the results with a query. Also, the results can be paginated and sorted. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-role | Elasticsearch API documentation} + */ + async queryRole (this: That, params?: T.SecurityQueryRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async queryRole (this: That, params?: T.SecurityQueryRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async queryRole (this: That, params?: T.SecurityQueryRoleRequest, options?: TransportRequestOptions): Promise + async queryRole (this: That, params?: T.SecurityQueryRoleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['security.query_role'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = body != null ? 'POST' : 'GET' + const path = '/_security/_query/role' + const meta: TransportRequestMetadata = { + name: 'security.query_role', + acceptedParams: [ + 'query', + 'from', + 'sort', + 'size', + 'search_after' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Find users with a query. Get information for users in a paginated manner. You can optionally filter the results with a query. NOTE: As opposed to the get user API, built-in users are excluded from the result. This API is only for native users. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-user | Elasticsearch API documentation} + */ + async queryUser (this: That, params?: T.SecurityQueryUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async queryUser (this: That, params?: T.SecurityQueryUserRequest, options?: TransportRequestOptionsWithMeta): Promise> + async queryUser (this: That, params?: T.SecurityQueryUserRequest, options?: TransportRequestOptions): Promise + async queryUser (this: That, params?: T.SecurityQueryUserRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['security.query_user'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = body != null ? 'POST' : 'GET' + const path = '/_security/_query/user' + const meta: TransportRequestMetadata = { + name: 'security.query_user', + acceptedParams: [ + 'query', + 'from', + 'sort', + 'size', + 'search_after', + 'with_profile_uid' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Authenticate SAML. Submit a SAML response message to Elasticsearch for consumption. NOTE: This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. The SAML message that is submitted can be: * A response to a SAML authentication request that was previously created using the SAML prepare authentication API. * An unsolicited SAML message in the case of an IdP-initiated single sign-on (SSO) flow. In either case, the SAML message needs to be a base64 encoded XML document with a root element of ``. After successful validation, Elasticsearch responds with an Elasticsearch internal access token and refresh token that can be subsequently used for authentication. This API endpoint essentially exchanges SAML responses that indicate successful authentication in the IdP for Elasticsearch access and refresh tokens, which can be used for authentication against Elasticsearch. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-authenticate | Elasticsearch API documentation} + */ + async samlAuthenticate (this: That, params: T.SecuritySamlAuthenticateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async samlAuthenticate (this: That, params: T.SecuritySamlAuthenticateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async samlAuthenticate (this: That, params: T.SecuritySamlAuthenticateRequest, options?: TransportRequestOptions): Promise + async samlAuthenticate (this: That, params: T.SecuritySamlAuthenticateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['security.saml_authenticate'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = '/_security/saml/authenticate' + const meta: TransportRequestMetadata = { + name: 'security.saml_authenticate', + acceptedParams: [ + 'content', + 'ids', + 'realm' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Logout of SAML completely. Verifies the logout response sent from the SAML IdP. NOTE: This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. The SAML IdP may send a logout response back to the SP after handling the SP-initiated SAML Single Logout. This API verifies the response by ensuring the content is relevant and validating its signature. An empty response is returned if the verification process is successful. The response can be sent by the IdP with either the HTTP-Redirect or the HTTP-Post binding. The caller of this API must prepare the request accordingly so that this API can handle either of them. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-complete-logout | Elasticsearch API documentation} + */ + async samlCompleteLogout (this: That, params: T.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async samlCompleteLogout (this: That, params: T.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptionsWithMeta): Promise> + async samlCompleteLogout (this: That, params: T.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptions): Promise + async samlCompleteLogout (this: That, params: T.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['security.saml_complete_logout'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = '/_security/saml/complete_logout' + const meta: TransportRequestMetadata = { + name: 'security.saml_complete_logout', + acceptedParams: [ + 'realm', + 'ids', + 'query_string', + 'content' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Invalidate SAML. Submit a SAML LogoutRequest message to Elasticsearch for consumption. NOTE: This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. The logout request comes from the SAML IdP during an IdP initiated Single Logout. The custom web application can use this API to have Elasticsearch process the `LogoutRequest`. After successful validation of the request, Elasticsearch invalidates the access token and refresh token that corresponds to that specific SAML principal and provides a URL that contains a SAML LogoutResponse message. Thus the user can be redirected back to their IdP. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-invalidate | Elasticsearch API documentation} + */ + async samlInvalidate (this: That, params: T.SecuritySamlInvalidateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async samlInvalidate (this: That, params: T.SecuritySamlInvalidateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async samlInvalidate (this: That, params: T.SecuritySamlInvalidateRequest, options?: TransportRequestOptions): Promise + async samlInvalidate (this: That, params: T.SecuritySamlInvalidateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['security.saml_invalidate'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = '/_security/saml/invalidate' + const meta: TransportRequestMetadata = { + name: 'security.saml_invalidate', + acceptedParams: [ + 'acs', + 'query_string', + 'realm' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Logout of SAML. Submits a request to invalidate an access token and refresh token. NOTE: This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. This API invalidates the tokens that were generated for a user by the SAML authenticate API. If the SAML realm in Elasticsearch is configured accordingly and the SAML IdP supports this, the Elasticsearch response contains a URL to redirect the user to the IdP that contains a SAML logout request (starting an SP-initiated SAML Single Logout). + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-logout | Elasticsearch API documentation} + */ + async samlLogout (this: That, params: T.SecuritySamlLogoutRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async samlLogout (this: That, params: T.SecuritySamlLogoutRequest, options?: TransportRequestOptionsWithMeta): Promise> + async samlLogout (this: That, params: T.SecuritySamlLogoutRequest, options?: TransportRequestOptions): Promise + async samlLogout (this: That, params: T.SecuritySamlLogoutRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['security.saml_logout'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = '/_security/saml/logout' + const meta: TransportRequestMetadata = { + name: 'security.saml_logout', + acceptedParams: [ + 'token', + 'refresh_token' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Prepare SAML authentication. Create a SAML authentication request (``) as a URL string based on the configuration of the respective SAML realm in Elasticsearch. NOTE: This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. This API returns a URL pointing to the SAML Identity Provider. You can use the URL to redirect the browser of the user in order to continue the authentication process. The URL includes a single parameter named `SAMLRequest`, which contains a SAML Authentication request that is deflated and Base64 encoded. If the configuration dictates that SAML authentication requests should be signed, the URL has two extra parameters named `SigAlg` and `Signature`. These parameters contain the algorithm used for the signature and the signature value itself. It also returns a random string that uniquely identifies this SAML Authentication request. The caller of this API needs to store this identifier as it needs to be used in a following step of the authentication process. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-prepare-authentication | Elasticsearch API documentation} + */ + async samlPrepareAuthentication (this: That, params?: T.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async samlPrepareAuthentication (this: That, params?: T.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptionsWithMeta): Promise> + async samlPrepareAuthentication (this: That, params?: T.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptions): Promise + async samlPrepareAuthentication (this: That, params?: T.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['security.saml_prepare_authentication'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = '/_security/saml/prepare' + const meta: TransportRequestMetadata = { + name: 'security.saml_prepare_authentication', + acceptedParams: [ + 'acs', + 'realm', + 'relay_state' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create SAML service provider metadata. Generate SAML metadata for a SAML 2.0 Service Provider. The SAML 2.0 specification provides a mechanism for Service Providers to describe their capabilities and configuration using a metadata file. This API generates Service Provider metadata based on the configuration of a SAML realm in Elasticsearch. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-service-provider-metadata | Elasticsearch API documentation} + */ + async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptionsWithMeta): Promise> + async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptions): Promise + async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['security.saml_service_provider_metadata'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_security/saml/metadata/${encodeURIComponent(params.realm_name.toString())}` + const meta: TransportRequestMetadata = { + name: 'security.saml_service_provider_metadata', + pathParts: { + realm_name: params.realm_name + }, + acceptedParams: [ + 'realm_name' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Suggest a user profile. Get suggestions for user profiles that match specified search criteria. NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-suggest-user-profiles | Elasticsearch API documentation} + */ + async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptions): Promise + async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['security.suggest_user_profiles'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = body != null ? 'POST' : 'GET' + const path = '/_security/profile/_suggest' + const meta: TransportRequestMetadata = { + name: 'security.suggest_user_profiles', + acceptedParams: [ + 'name', + 'size', + 'data', + 'hint', + 'data' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Update an API key. Update attributes of an existing API key. This API supports updates to an API key's access scope, expiration, and metadata. To use this API, you must have at least the `manage_own_api_key` cluster privilege. Users can only update API keys that they created or that were granted to them. To update another user’s API key, use the `run_as` feature to submit a request on behalf of another user. IMPORTANT: It's not possible to use an API key as the authentication credential for this API. The owner user’s credentials are required. Use this API to update API keys created by the create API key or grant API Key APIs. If you need to apply the same update to many API keys, you can use the bulk update API keys API to reduce overhead. It's not possible to update expired API keys or API keys that have been invalidated by the invalidate API key API. The access scope of an API key is derived from the `role_descriptors` you specify in the request and a snapshot of the owner user's permissions at the time of the request. The snapshot of the owner's permissions is updated automatically on every call. IMPORTANT: If you don't specify `role_descriptors` in the request, a call to this API might still change the API key's access scope. This change can occur if the owner user's permissions have changed since the API key was created or last modified. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-api-key | Elasticsearch API documentation} + */ + async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest, options?: TransportRequestOptions): Promise + async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['security.update_api_key'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_security/api_key/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'security.update_api_key', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id', + 'role_descriptors', + 'metadata', + 'expiration' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Update a cross-cluster API key. Update the attributes of an existing cross-cluster API key, which is used for API key based remote cluster access. To use this API, you must have at least the `manage_security` cluster privilege. Users can only update API keys that they created. To update another user's API key, use the `run_as` feature to submit a request on behalf of another user. IMPORTANT: It's not possible to use an API key as the authentication credential for this API. To update an API key, the owner user's credentials are required. It's not possible to update expired API keys, or API keys that have been invalidated by the invalidate API key API. This API supports updates to an API key's access scope, metadata, and expiration. The owner user's information, such as the `username` and `realm`, is also updated automatically on every call. NOTE: This API cannot update REST API keys, which should be updated by either the update API key or bulk update API keys API. To learn more about how to use this API, refer to the [Update cross cluter API key API examples page](https://www.elastic.co/docs/reference/elasticsearch/rest-apis/update-cc-api-key-examples). + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-cross-cluster-api-key | Elasticsearch API documentation} + */ + async updateCrossClusterApiKey (this: That, params: T.SecurityUpdateCrossClusterApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateCrossClusterApiKey (this: That, params: T.SecurityUpdateCrossClusterApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateCrossClusterApiKey (this: That, params: T.SecurityUpdateCrossClusterApiKeyRequest, options?: TransportRequestOptions): Promise + async updateCrossClusterApiKey (this: That, params: T.SecurityUpdateCrossClusterApiKeyRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['security.update_cross_cluster_api_key'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_security/cross_cluster/api_key/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'security.update_cross_cluster_api_key', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id', + 'access', + 'expiration', + 'metadata', + 'certificate_identity' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Update security index settings. Update the user-configurable settings for the security internal index (`.security` and associated indices). Only a subset of settings are allowed to be modified. This includes `index.auto_expand_replicas` and `index.number_of_replicas`. NOTE: If `index.auto_expand_replicas` is set, `index.number_of_replicas` will be ignored during updates. If a specific index is not in use on the system and settings are provided for it, the request will be rejected. This API does not yet support configuring the settings for indices before they are in use. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-settings | Elasticsearch API documentation} + */ + async updateSettings (this: That, params?: T.SecurityUpdateSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateSettings (this: That, params?: T.SecurityUpdateSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateSettings (this: That, params?: T.SecurityUpdateSettingsRequest, options?: TransportRequestOptions): Promise + async updateSettings (this: That, params?: T.SecurityUpdateSettingsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['security.update_settings'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = '/_security/settings' + const meta: TransportRequestMetadata = { + name: 'security.update_settings', + acceptedParams: [ + 'security', + 'security-profile', + 'security-tokens', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Update user profile data. Update specific data for the user profile that is associated with a unique ID. NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. To use this API, you must have one of the following privileges: * The `manage_user_profile` cluster privilege. * The `update_profile_data` global privilege for the namespaces that are referenced in the request. This API updates the `labels` and `data` fields of an existing user profile document with JSON objects. New keys and their values are added to the profile document and conflicting keys are replaced by data that's included in the request. For both labels and data, content is namespaced by the top-level fields. The `update_profile_data` global privilege grants privileges for updating only the allowed namespaces. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-user-profile-data | Elasticsearch API documentation} + */ + async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptions): Promise + async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['security.update_user_profile_data'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_security/profile/${encodeURIComponent(params.uid.toString())}/_data` + const meta: TransportRequestMetadata = { + name: 'security.update_user_profile_data', + pathParts: { + uid: params.uid + }, + acceptedParams: [ + 'uid', + 'labels', + 'data', + 'if_seq_no', + 'if_primary_term', + 'refresh' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/api/shutdown.ts b/src/api/api/shutdown.ts new file mode 100644 index 000000000..2f1f9d5c0 --- /dev/null +++ b/src/api/api/shutdown.ts @@ -0,0 +1,248 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import { kAcceptedParams } from '../../client' + +interface That { + transport: Transport + [kAcceptedParams]: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +export default class Shutdown { + transport: Transport + [kAcceptedParams]: Record + constructor (transport: Transport) { + this.transport = transport + this[kAcceptedParams] = { + 'shutdown.delete_node': { + path: [ + 'node_id' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'shutdown.get_node': { + path: [ + 'node_id' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'shutdown.put_node': { + path: [ + 'node_id' + ], + body: [ + 'type', + 'reason', + 'allocation_delay', + 'target_node_name' + ], + query: [ + 'master_timeout', + 'timeout' + ] + } + } + } + + /** + * Cancel node shutdown preparations. Remove a node from the shutdown list so it can resume normal operations. You must explicitly clear the shutdown request when a node rejoins the cluster or when a node has permanently left the cluster. Shutdown requests are never removed automatically by Elasticsearch. NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. If the operator privileges feature is enabled, you must be an operator to use this API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-shutdown-delete-node | Elasticsearch API documentation} + */ + async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest, options?: TransportRequestOptions): Promise + async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['shutdown.delete_node'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_nodes/${encodeURIComponent(params.node_id.toString())}/shutdown` + const meta: TransportRequestMetadata = { + name: 'shutdown.delete_node', + pathParts: { + node_id: params.node_id + }, + acceptedParams: [ + 'node_id', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get the shutdown status. Get information about nodes that are ready to be shut down, have shut down preparations still in progress, or have stalled. The API returns status information for each part of the shut down process. NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. If the operator privileges feature is enabled, you must be an operator to use this API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-shutdown-get-node | Elasticsearch API documentation} + */ + async getNode (this: That, params?: T.ShutdownGetNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getNode (this: That, params?: T.ShutdownGetNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getNode (this: That, params?: T.ShutdownGetNodeRequest, options?: TransportRequestOptions): Promise + async getNode (this: That, params?: T.ShutdownGetNodeRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['shutdown.get_node'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.node_id != null) { + method = 'GET' + path = `/_nodes/${encodeURIComponent(params.node_id.toString())}/shutdown` + } else { + method = 'GET' + path = '/_nodes/shutdown' + } + const meta: TransportRequestMetadata = { + name: 'shutdown.get_node', + pathParts: { + node_id: params.node_id + }, + acceptedParams: [ + 'node_id', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Prepare a node to be shut down. NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. If you specify a node that is offline, it will be prepared for shut down when it rejoins the cluster. If the operator privileges feature is enabled, you must be an operator to use this API. The API migrates ongoing tasks and index shards to other nodes as needed to prepare a node to be restarted or shut down and removed from the cluster. This ensures that Elasticsearch can be stopped safely with minimal disruption to the cluster. You must specify the type of shutdown: `restart`, `remove`, or `replace`. If a node is already being prepared for shutdown, you can use this API to change the shutdown type. IMPORTANT: This API does NOT terminate the Elasticsearch process. Monitor the node shutdown status to determine when it is safe to stop Elasticsearch. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-shutdown-put-node | Elasticsearch API documentation} + */ + async putNode (this: That, params: T.ShutdownPutNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putNode (this: That, params: T.ShutdownPutNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putNode (this: That, params: T.ShutdownPutNodeRequest, options?: TransportRequestOptions): Promise + async putNode (this: That, params: T.ShutdownPutNodeRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['shutdown.put_node'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_nodes/${encodeURIComponent(params.node_id.toString())}/shutdown` + const meta: TransportRequestMetadata = { + name: 'shutdown.put_node', + pathParts: { + node_id: params.node_id + }, + acceptedParams: [ + 'node_id', + 'type', + 'reason', + 'allocation_delay', + 'target_node_name', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/api/simulate.ts b/src/api/api/simulate.ts new file mode 100644 index 000000000..a1dff921c --- /dev/null +++ b/src/api/api/simulate.ts @@ -0,0 +1,131 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import { kAcceptedParams } from '../../client' + +interface That { + transport: Transport + [kAcceptedParams]: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +export default class Simulate { + transport: Transport + [kAcceptedParams]: Record + constructor (transport: Transport) { + this.transport = transport + this[kAcceptedParams] = { + 'simulate.ingest': { + path: [ + 'index' + ], + body: [ + 'docs', + 'component_template_substitutions', + 'index_template_substitutions', + 'mapping_addition', + 'pipeline_substitutions' + ], + query: [ + 'pipeline', + 'merge_type' + ] + } + } + } + + /** + * Simulate data ingestion. Run ingest pipelines against a set of provided documents, optionally with substitute pipeline definitions, to simulate ingesting data into an index. This API is meant to be used for troubleshooting or pipeline development, as it does not actually index any data into Elasticsearch. The API runs the default and final pipeline for that index against a set of documents provided in the body of the request. If a pipeline contains a reroute processor, it follows that reroute processor to the new index, running that index's pipelines as well the same way that a non-simulated ingest would. No data is indexed into Elasticsearch. Instead, the transformed document is returned, along with the list of pipelines that have been run and the name of the index where the document would have been indexed if this were not a simulation. The transformed document is validated against the mappings that would apply to this index, and any validation error is reported in the result. This API differs from the simulate pipeline API in that you specify a single pipeline for that API, and it runs only that one pipeline. The simulate pipeline API is more useful for developing a single pipeline, while the simulate ingest API is more useful for troubleshooting the interaction of the various pipelines that get applied when ingesting into an index. By default, the pipeline definitions that are currently in the system are used. However, you can supply substitute pipeline definitions in the body of the request. These will be used in place of the pipeline definitions that are already in the system. This can be used to replace existing pipeline definitions or to create new ones. The pipeline substitutions are used only within this request. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-simulate-ingest | Elasticsearch API documentation} + */ + async ingest (this: That, params: T.SimulateIngestRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async ingest (this: That, params: T.SimulateIngestRequest, options?: TransportRequestOptionsWithMeta): Promise> + async ingest (this: That, params: T.SimulateIngestRequest, options?: TransportRequestOptions): Promise + async ingest (this: That, params: T.SimulateIngestRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['simulate.ingest'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + let method = '' + let path = '' + if (params.index != null) { + method = body != null ? 'POST' : 'GET' + path = `/_ingest/${encodeURIComponent(params.index.toString())}/_simulate` + } else { + method = body != null ? 'POST' : 'GET' + path = '/_ingest/_simulate' + } + const meta: TransportRequestMetadata = { + name: 'simulate.ingest', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'docs', + 'component_template_substitutions', + 'index_template_substitutions', + 'mapping_addition', + 'pipeline_substitutions', + 'pipeline', + 'merge_type' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/api/slm.ts b/src/api/api/slm.ts new file mode 100644 index 000000000..c710ce10c --- /dev/null +++ b/src/api/api/slm.ts @@ -0,0 +1,587 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import { kAcceptedParams } from '../../client' + +interface That { + transport: Transport + [kAcceptedParams]: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +export default class Slm { + transport: Transport + [kAcceptedParams]: Record + constructor (transport: Transport) { + this.transport = transport + this[kAcceptedParams] = { + 'slm.delete_lifecycle': { + path: [ + 'policy_id' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'slm.execute_lifecycle': { + path: [ + 'policy_id' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'slm.execute_retention': { + path: [], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'slm.get_lifecycle': { + path: [ + 'policy_id' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'slm.get_stats': { + path: [], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'slm.get_status': { + path: [], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'slm.put_lifecycle': { + path: [ + 'policy_id' + ], + body: [ + 'config', + 'name', + 'repository', + 'retention', + 'schedule' + ], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'slm.start': { + path: [], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'slm.stop': { + path: [], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + } + } + } + + /** + * Delete a policy. Delete a snapshot lifecycle policy definition. This operation prevents any future snapshots from being taken but does not cancel in-progress snapshots or remove previously-taken snapshots. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-delete-lifecycle | Elasticsearch API documentation} + */ + async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise + async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['slm.delete_lifecycle'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_slm/policy/${encodeURIComponent(params.policy_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'slm.delete_lifecycle', + pathParts: { + policy_id: params.policy_id + }, + acceptedParams: [ + 'policy_id', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Run a policy. Immediately create a snapshot according to the snapshot lifecycle policy without waiting for the scheduled time. The snapshot policy is normally applied according to its schedule, but you might want to manually run a policy before performing an upgrade or other maintenance. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-execute-lifecycle | Elasticsearch API documentation} + */ + async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest, options?: TransportRequestOptions): Promise + async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['slm.execute_lifecycle'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_slm/policy/${encodeURIComponent(params.policy_id.toString())}/_execute` + const meta: TransportRequestMetadata = { + name: 'slm.execute_lifecycle', + pathParts: { + policy_id: params.policy_id + }, + acceptedParams: [ + 'policy_id', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Run a retention policy. Manually apply the retention policy to force immediate removal of snapshots that are expired according to the snapshot lifecycle policy retention rules. The retention policy is normally applied according to its schedule. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-execute-retention | Elasticsearch API documentation} + */ + async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest, options?: TransportRequestOptionsWithMeta): Promise> + async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest, options?: TransportRequestOptions): Promise + async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['slm.execute_retention'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_slm/_execute_retention' + const meta: TransportRequestMetadata = { + name: 'slm.execute_retention', + acceptedParams: [ + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get policy information. Get snapshot lifecycle policy definitions and information about the latest snapshot attempts. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-lifecycle | Elasticsearch API documentation} + */ + async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest, options?: TransportRequestOptions): Promise + async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['slm.get_lifecycle'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.policy_id != null) { + method = 'GET' + path = `/_slm/policy/${encodeURIComponent(params.policy_id.toString())}` + } else { + method = 'GET' + path = '/_slm/policy' + } + const meta: TransportRequestMetadata = { + name: 'slm.get_lifecycle', + pathParts: { + policy_id: params.policy_id + }, + acceptedParams: [ + 'policy_id', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get snapshot lifecycle management statistics. Get global and policy-level statistics about actions taken by snapshot lifecycle management. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-stats | Elasticsearch API documentation} + */ + async getStats (this: That, params?: T.SlmGetStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getStats (this: That, params?: T.SlmGetStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getStats (this: That, params?: T.SlmGetStatsRequest, options?: TransportRequestOptions): Promise + async getStats (this: That, params?: T.SlmGetStatsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['slm.get_stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_slm/stats' + const meta: TransportRequestMetadata = { + name: 'slm.get_stats', + acceptedParams: [ + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get the snapshot lifecycle management status. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-status | Elasticsearch API documentation} + */ + async getStatus (this: That, params?: T.SlmGetStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getStatus (this: That, params?: T.SlmGetStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getStatus (this: That, params?: T.SlmGetStatusRequest, options?: TransportRequestOptions): Promise + async getStatus (this: That, params?: T.SlmGetStatusRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['slm.get_status'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_slm/status' + const meta: TransportRequestMetadata = { + name: 'slm.get_status', + acceptedParams: [ + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create or update a policy. Create or update a snapshot lifecycle policy. If the policy already exists, this request increments the policy version. Only the latest version of a policy is stored. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-put-lifecycle | Elasticsearch API documentation} + */ + async putLifecycle (this: That, params: T.SlmPutLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putLifecycle (this: That, params: T.SlmPutLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putLifecycle (this: That, params: T.SlmPutLifecycleRequest, options?: TransportRequestOptions): Promise + async putLifecycle (this: That, params: T.SlmPutLifecycleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['slm.put_lifecycle'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_slm/policy/${encodeURIComponent(params.policy_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'slm.put_lifecycle', + pathParts: { + policy_id: params.policy_id + }, + acceptedParams: [ + 'policy_id', + 'config', + 'name', + 'repository', + 'retention', + 'schedule', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Start snapshot lifecycle management. Snapshot lifecycle management (SLM) starts automatically when a cluster is formed. Manually starting SLM is necessary only if it has been stopped using the stop SLM API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-start | Elasticsearch API documentation} + */ + async start (this: That, params?: T.SlmStartRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async start (this: That, params?: T.SlmStartRequest, options?: TransportRequestOptionsWithMeta): Promise> + async start (this: That, params?: T.SlmStartRequest, options?: TransportRequestOptions): Promise + async start (this: That, params?: T.SlmStartRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['slm.start'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_slm/start' + const meta: TransportRequestMetadata = { + name: 'slm.start', + acceptedParams: [ + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Stop snapshot lifecycle management. Stop all snapshot lifecycle management (SLM) operations and the SLM plugin. This API is useful when you are performing maintenance on a cluster and need to prevent SLM from performing any actions on your data streams or indices. Stopping SLM does not stop any snapshots that are in progress. You can manually trigger snapshots with the run snapshot lifecycle policy API even if SLM is stopped. The API returns a response as soon as the request is acknowledged, but the plugin might continue to run until in-progress operations complete and it can be safely stopped. Use the get snapshot lifecycle management status API to see if SLM is running. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-stop | Elasticsearch API documentation} + */ + async stop (this: That, params?: T.SlmStopRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stop (this: That, params?: T.SlmStopRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stop (this: That, params?: T.SlmStopRequest, options?: TransportRequestOptions): Promise + async stop (this: That, params?: T.SlmStopRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['slm.stop'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_slm/stop' + const meta: TransportRequestMetadata = { + name: 'slm.stop', + acceptedParams: [ + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/api/snapshot.ts b/src/api/api/snapshot.ts new file mode 100644 index 000000000..556b7a8f6 --- /dev/null +++ b/src/api/api/snapshot.ts @@ -0,0 +1,997 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import { kAcceptedParams } from '../../client' + +interface That { + transport: Transport + [kAcceptedParams]: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +export default class Snapshot { + transport: Transport + [kAcceptedParams]: Record + constructor (transport: Transport) { + this.transport = transport + this[kAcceptedParams] = { + 'snapshot.cleanup_repository': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'snapshot.clone': { + path: [ + 'repository', + 'snapshot', + 'target_snapshot' + ], + body: [ + 'indices' + ], + query: [ + 'master_timeout' + ] + }, + 'snapshot.create': { + path: [ + 'repository', + 'snapshot' + ], + body: [ + 'expand_wildcards', + 'feature_states', + 'ignore_unavailable', + 'include_global_state', + 'indices', + 'metadata', + 'partial' + ], + query: [ + 'master_timeout', + 'wait_for_completion' + ] + }, + 'snapshot.create_repository': { + path: [ + 'name' + ], + body: [ + 'repository' + ], + query: [ + 'master_timeout', + 'timeout', + 'verify' + ] + }, + 'snapshot.delete': { + path: [ + 'repository', + 'snapshot' + ], + body: [], + query: [ + 'master_timeout', + 'wait_for_completion' + ] + }, + 'snapshot.delete_repository': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'snapshot.get': { + path: [ + 'repository', + 'snapshot' + ], + body: [], + query: [ + 'after', + 'from_sort_value', + 'ignore_unavailable', + 'index_details', + 'index_names', + 'include_repository', + 'master_timeout', + 'order', + 'offset', + 'size', + 'slm_policy_filter', + 'sort', + 'state', + 'verbose' + ] + }, + 'snapshot.get_repository': { + path: [ + 'name' + ], + body: [], + query: [ + 'local', + 'master_timeout' + ] + }, + 'snapshot.repository_analyze': { + path: [ + 'name' + ], + body: [], + query: [ + 'blob_count', + 'concurrency', + 'detailed', + 'early_read_node_count', + 'max_blob_size', + 'max_total_data_size', + 'rare_action_probability', + 'rarely_abort_writes', + 'read_node_count', + 'register_operation_count', + 'seed', + 'timeout' + ] + }, + 'snapshot.repository_verify_integrity': { + path: [ + 'name' + ], + body: [], + query: [ + 'blob_thread_pool_concurrency', + 'index_snapshot_verification_concurrency', + 'index_verification_concurrency', + 'max_bytes_per_sec', + 'max_failed_shard_snapshots', + 'meta_thread_pool_concurrency', + 'snapshot_verification_concurrency', + 'verify_blob_contents' + ] + }, + 'snapshot.restore': { + path: [ + 'repository', + 'snapshot' + ], + body: [ + 'feature_states', + 'ignore_index_settings', + 'ignore_unavailable', + 'include_aliases', + 'include_global_state', + 'index_settings', + 'indices', + 'partial', + 'rename_pattern', + 'rename_replacement' + ], + query: [ + 'master_timeout', + 'wait_for_completion' + ] + }, + 'snapshot.status': { + path: [ + 'repository', + 'snapshot' + ], + body: [], + query: [ + 'ignore_unavailable', + 'master_timeout' + ] + }, + 'snapshot.verify_repository': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + } + } + } + + /** + * Clean up the snapshot repository. Trigger the review of the contents of a snapshot repository and delete any stale data not referenced by existing snapshots. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-cleanup-repository | Elasticsearch API documentation} + */ + async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptions): Promise + async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['snapshot.cleanup_repository'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/_snapshot/${encodeURIComponent(params.name.toString())}/_cleanup` + const meta: TransportRequestMetadata = { + name: 'snapshot.cleanup_repository', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'repository', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Clone a snapshot. Clone part of all of a snapshot into another snapshot in the same repository. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-clone | Elasticsearch API documentation} + */ + async clone (this: That, params: T.SnapshotCloneRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async clone (this: That, params: T.SnapshotCloneRequest, options?: TransportRequestOptionsWithMeta): Promise> + async clone (this: That, params: T.SnapshotCloneRequest, options?: TransportRequestOptions): Promise + async clone (this: That, params: T.SnapshotCloneRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['snapshot.clone'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_snapshot/${encodeURIComponent(params.repository.toString())}/${encodeURIComponent(params.snapshot.toString())}/_clone/${encodeURIComponent(params.target_snapshot.toString())}` + const meta: TransportRequestMetadata = { + name: 'snapshot.clone', + pathParts: { + repository: params.repository, + snapshot: params.snapshot, + target_snapshot: params.target_snapshot + }, + acceptedParams: [ + 'repository', + 'snapshot', + 'target_snapshot', + 'indices', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create a snapshot. Take a snapshot of a cluster or of data streams and indices. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-create | Elasticsearch API documentation} + */ + async create (this: That, params: T.SnapshotCreateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async create (this: That, params: T.SnapshotCreateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async create (this: That, params: T.SnapshotCreateRequest, options?: TransportRequestOptions): Promise + async create (this: That, params: T.SnapshotCreateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['snapshot.create'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_snapshot/${encodeURIComponent(params.repository.toString())}/${encodeURIComponent(params.snapshot.toString())}` + const meta: TransportRequestMetadata = { + name: 'snapshot.create', + pathParts: { + repository: params.repository, + snapshot: params.snapshot + }, + acceptedParams: [ + 'repository', + 'snapshot', + 'expand_wildcards', + 'feature_states', + 'ignore_unavailable', + 'include_global_state', + 'indices', + 'metadata', + 'partial', + 'master_timeout', + 'wait_for_completion' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create or update a snapshot repository. IMPORTANT: If you are migrating searchable snapshots, the repository name must be identical in the source and destination clusters. To register a snapshot repository, the cluster's global metadata must be writeable. Ensure there are no cluster blocks (for example, `cluster.blocks.read_only` and `clsuter.blocks.read_only_allow_delete` settings) that prevent write access. Several options for this API can be specified using a query parameter or a request body parameter. If both parameters are specified, only the query parameter is used. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-create-repository | Elasticsearch API documentation} + */ + async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest, options?: TransportRequestOptions): Promise + async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['snapshot.create_repository'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: any = params.body ?? undefined + for (const key in params) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_snapshot/${encodeURIComponent(params.name.toString())}` + const meta: TransportRequestMetadata = { + name: 'snapshot.create_repository', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'repository', + 'repository', + 'master_timeout', + 'timeout', + 'verify' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Delete snapshots. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-delete | Elasticsearch API documentation} + */ + async delete (this: That, params: T.SnapshotDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async delete (this: That, params: T.SnapshotDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> + async delete (this: That, params: T.SnapshotDeleteRequest, options?: TransportRequestOptions): Promise + async delete (this: That, params: T.SnapshotDeleteRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['snapshot.delete'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_snapshot/${encodeURIComponent(params.repository.toString())}/${encodeURIComponent(params.snapshot.toString())}` + const meta: TransportRequestMetadata = { + name: 'snapshot.delete', + pathParts: { + repository: params.repository, + snapshot: params.snapshot + }, + acceptedParams: [ + 'repository', + 'snapshot', + 'master_timeout', + 'wait_for_completion' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Delete snapshot repositories. When a repository is unregistered, Elasticsearch removes only the reference to the location where the repository is storing the snapshots. The snapshots themselves are left untouched and in place. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-delete-repository | Elasticsearch API documentation} + */ + async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptions): Promise + async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['snapshot.delete_repository'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_snapshot/${encodeURIComponent(params.name.toString())}` + const meta: TransportRequestMetadata = { + name: 'snapshot.delete_repository', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'repository', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get snapshot information. NOTE: The `after` parameter and `next` field enable you to iterate through snapshots with some consistency guarantees regarding concurrent creation or deletion of snapshots. It is guaranteed that any snapshot that exists at the beginning of the iteration and is not concurrently deleted will be seen during the iteration. Snapshots concurrently created may be seen during an iteration. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-get | Elasticsearch API documentation} + */ + async get (this: That, params: T.SnapshotGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async get (this: That, params: T.SnapshotGetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async get (this: That, params: T.SnapshotGetRequest, options?: TransportRequestOptions): Promise + async get (this: That, params: T.SnapshotGetRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['snapshot.get'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_snapshot/${encodeURIComponent(params.repository.toString())}/${encodeURIComponent(params.snapshot.toString())}` + const meta: TransportRequestMetadata = { + name: 'snapshot.get', + pathParts: { + repository: params.repository, + snapshot: params.snapshot + }, + acceptedParams: [ + 'repository', + 'snapshot', + 'after', + 'from_sort_value', + 'ignore_unavailable', + 'index_details', + 'index_names', + 'include_repository', + 'master_timeout', + 'order', + 'offset', + 'size', + 'slm_policy_filter', + 'sort', + 'state', + 'verbose' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get snapshot repository information. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-get-repository | Elasticsearch API documentation} + */ + async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest, options?: TransportRequestOptions): Promise + async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['snapshot.get_repository'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.name != null) { + method = 'GET' + path = `/_snapshot/${encodeURIComponent(params.name.toString())}` + } else { + method = 'GET' + path = '/_snapshot' + } + const meta: TransportRequestMetadata = { + name: 'snapshot.get_repository', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'repository', + 'local', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Analyze a snapshot repository. Performs operations on a snapshot repository in order to check for incorrect behaviour. There are a large number of third-party storage systems available, not all of which are suitable for use as a snapshot repository by Elasticsearch. Some storage systems behave incorrectly, or perform poorly, especially when accessed concurrently by multiple clients as the nodes of an Elasticsearch cluster do. This API performs a collection of read and write operations on your repository which are designed to detect incorrect behaviour and to measure the performance characteristics of your storage system. The default values for the parameters are deliberately low to reduce the impact of running an analysis inadvertently and to provide a sensible starting point for your investigations. Run your first analysis with the default parameter values to check for simple problems. Some repositories may behave correctly when lightly loaded but incorrectly under production-like workloads. If the first analysis is successful, run a sequence of increasingly large analyses until you encounter a failure or you reach a `blob_count` of at least `2000`, a `max_blob_size` of at least `2gb`, a `max_total_data_size` of at least `1tb`, and a `register_operation_count` of at least `100`. Always specify a generous timeout, possibly `1h` or longer, to allow time for each analysis to run to completion. Some repositories may behave correctly when accessed by a small number of Elasticsearch nodes but incorrectly when accessed concurrently by a production-scale cluster. Perform the analyses using a multi-node cluster of a similar size to your production cluster so that it can detect any problems that only arise when the repository is accessed by many nodes at once. If the analysis fails, Elasticsearch detected that your repository behaved unexpectedly. This usually means you are using a third-party storage system with an incorrect or incompatible implementation of the API it claims to support. If so, this storage system is not suitable for use as a snapshot repository. Repository analysis triggers conditions that occur only rarely when taking snapshots in a production system. Snapshotting to unsuitable storage may appear to work correctly most of the time despite repository analysis failures. However your snapshot data is at risk if you store it in a snapshot repository that does not reliably pass repository analysis. You can demonstrate that the analysis failure is due to an incompatible storage implementation by verifying that Elasticsearch does not detect the same problem when analysing the reference implementation of the storage protocol you are using. For instance, if you are using storage that offers an API which the supplier claims to be compatible with AWS S3, verify that repositories in AWS S3 do not fail repository analysis. This allows you to demonstrate to your storage supplier that a repository analysis failure must only be caused by an incompatibility with AWS S3 and cannot be attributed to a problem in Elasticsearch. Please do not report Elasticsearch issues involving third-party storage systems unless you can demonstrate that the same issue exists when analysing a repository that uses the reference implementation of the same storage protocol. You will need to work with the supplier of your storage system to address the incompatibilities that Elasticsearch detects. If the analysis is successful, the API returns details of the testing process, optionally including how long each operation took. You can use this information to determine the performance of your storage system. If any operation fails or returns an incorrect result, the API returns an error. If the API returns an error, it may not have removed all the data it wrote to the repository. The error will indicate the location of any leftover data and this path is also recorded in the Elasticsearch logs. You should verify that this location has been cleaned up correctly. If there is still leftover data at the specified location, you should manually remove it. If the connection from your client to Elasticsearch is closed while the client is waiting for the result of the analysis, the test is cancelled. Some clients are configured to close their connection if no response is received within a certain timeout. An analysis takes a long time to complete so you might need to relax any such client-side timeouts. On cancellation the analysis attempts to clean up the data it was writing, but it may not be able to remove it all. The path to the leftover data is recorded in the Elasticsearch logs. You should verify that this location has been cleaned up correctly. If there is still leftover data at the specified location, you should manually remove it. If the analysis is successful then it detected no incorrect behaviour, but this does not mean that correct behaviour is guaranteed. The analysis attempts to detect common bugs but it does not offer 100% coverage. Additionally, it does not test the following: * Your repository must perform durable writes. Once a blob has been written it must remain in place until it is deleted, even after a power loss or similar disaster. * Your repository must not suffer from silent data corruption. Once a blob has been written, its contents must remain unchanged until it is deliberately modified or deleted. * Your repository must behave correctly even if connectivity from the cluster is disrupted. Reads and writes may fail in this case, but they must not return incorrect results. IMPORTANT: An analysis writes a substantial amount of data to your repository and then reads it back again. This consumes bandwidth on the network between the cluster and the repository, and storage space and I/O bandwidth on the repository itself. You must ensure this load does not affect other users of these systems. Analyses respect the repository settings `max_snapshot_bytes_per_sec` and `max_restore_bytes_per_sec` if available and the cluster setting `indices.recovery.max_bytes_per_sec` which you can use to limit the bandwidth they consume. NOTE: This API is intended for exploratory use by humans. You should expect the request parameters and the response format to vary in future versions. The response exposes immplementation details of the analysis which may change from version to version. NOTE: Different versions of Elasticsearch may perform different checks for repository compatibility, with newer versions typically being stricter than older ones. A storage system that passes repository analysis with one version of Elasticsearch may fail with a different version. This indicates it behaves incorrectly in ways that the former version did not detect. You must work with the supplier of your storage system to address the incompatibilities detected by the repository analysis API in any version of Elasticsearch. NOTE: This API may not work correctly in a mixed-version cluster. *Implementation details* NOTE: This section of documentation describes how the repository analysis API works in this version of Elasticsearch, but you should expect the implementation to vary between versions. The request parameters and response format depend on details of the implementation so may also be different in newer versions. The analysis comprises a number of blob-level tasks, as set by the `blob_count` parameter and a number of compare-and-exchange operations on linearizable registers, as set by the `register_operation_count` parameter. These tasks are distributed over the data and master-eligible nodes in the cluster for execution. For most blob-level tasks, the executing node first writes a blob to the repository and then instructs some of the other nodes in the cluster to attempt to read the data it just wrote. The size of the blob is chosen randomly, according to the `max_blob_size` and `max_total_data_size` parameters. If any of these reads fails then the repository does not implement the necessary read-after-write semantics that Elasticsearch requires. For some blob-level tasks, the executing node will instruct some of its peers to attempt to read the data before the writing process completes. These reads are permitted to fail, but must not return partial data. If any read returns partial data then the repository does not implement the necessary atomicity semantics that Elasticsearch requires. For some blob-level tasks, the executing node will overwrite the blob while its peers are reading it. In this case the data read may come from either the original or the overwritten blob, but the read operation must not return partial data or a mix of data from the two blobs. If any of these reads returns partial data or a mix of the two blobs then the repository does not implement the necessary atomicity semantics that Elasticsearch requires for overwrites. The executing node will use a variety of different methods to write the blob. For instance, where applicable, it will use both single-part and multi-part uploads. Similarly, the reading nodes will use a variety of different methods to read the data back again. For instance they may read the entire blob from start to end or may read only a subset of the data. For some blob-level tasks, the executing node will cancel the write before it is complete. In this case, it still instructs some of the other nodes in the cluster to attempt to read the blob but all of these reads must fail to find the blob. Linearizable registers are special blobs that Elasticsearch manipulates using an atomic compare-and-exchange operation. This operation ensures correct and strongly-consistent behavior even when the blob is accessed by multiple nodes at the same time. The detailed implementation of the compare-and-exchange operation on linearizable registers varies by repository type. Repository analysis verifies that that uncontended compare-and-exchange operations on a linearizable register blob always succeed. Repository analysis also verifies that contended operations either succeed or report the contention but do not return incorrect results. If an operation fails due to contention, Elasticsearch retries the operation until it succeeds. Most of the compare-and-exchange operations performed by repository analysis atomically increment a counter which is represented as an 8-byte blob. Some operations also verify the behavior on small blobs with sizes other than 8 bytes. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-repository-analyze | Elasticsearch API documentation} + */ + async repositoryAnalyze (this: That, params: T.SnapshotRepositoryAnalyzeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async repositoryAnalyze (this: That, params: T.SnapshotRepositoryAnalyzeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async repositoryAnalyze (this: That, params: T.SnapshotRepositoryAnalyzeRequest, options?: TransportRequestOptions): Promise + async repositoryAnalyze (this: That, params: T.SnapshotRepositoryAnalyzeRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['snapshot.repository_analyze'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/_snapshot/${encodeURIComponent(params.name.toString())}/_analyze` + const meta: TransportRequestMetadata = { + name: 'snapshot.repository_analyze', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'repository', + 'blob_count', + 'concurrency', + 'detailed', + 'early_read_node_count', + 'max_blob_size', + 'max_total_data_size', + 'rare_action_probability', + 'rarely_abort_writes', + 'read_node_count', + 'register_operation_count', + 'seed', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Verify the repository integrity. Verify the integrity of the contents of a snapshot repository. This API enables you to perform a comprehensive check of the contents of a repository, looking for any anomalies in its data or metadata which might prevent you from restoring snapshots from the repository or which might cause future snapshot create or delete operations to fail. If you suspect the integrity of the contents of one of your snapshot repositories, cease all write activity to this repository immediately, set its `read_only` option to `true`, and use this API to verify its integrity. Until you do so: * It may not be possible to restore some snapshots from this repository. * Searchable snapshots may report errors when searched or may have unassigned shards. * Taking snapshots into this repository may fail or may appear to succeed but have created a snapshot which cannot be restored. * Deleting snapshots from this repository may fail or may appear to succeed but leave the underlying data on disk. * Continuing to write to the repository while it is in an invalid state may causing additional damage to its contents. If the API finds any problems with the integrity of the contents of your repository, Elasticsearch will not be able to repair the damage. The only way to bring the repository back into a fully working state after its contents have been damaged is by restoring its contents from a repository backup which was taken before the damage occurred. You must also identify what caused the damage and take action to prevent it from happening again. If you cannot restore a repository backup, register a new repository and use this for all future snapshot operations. In some cases it may be possible to recover some of the contents of a damaged repository, either by restoring as many of its snapshots as needed and taking new snapshots of the restored data, or by using the reindex API to copy data from any searchable snapshots mounted from the damaged repository. Avoid all operations which write to the repository while the verify repository integrity API is running. If something changes the repository contents while an integrity verification is running then Elasticsearch may incorrectly report having detected some anomalies in its contents due to the concurrent writes. It may also incorrectly fail to report some anomalies that the concurrent writes prevented it from detecting. NOTE: This API is intended for exploratory use by humans. You should expect the request parameters and the response format to vary in future versions. NOTE: This API may not work correctly in a mixed-version cluster. The default values for the parameters of this API are designed to limit the impact of the integrity verification on other activities in your cluster. For instance, by default it will only use at most half of the `snapshot_meta` threads to verify the integrity of each snapshot, allowing other snapshot operations to use the other half of this thread pool. If you modify these parameters to speed up the verification process, you risk disrupting other snapshot-related operations in your cluster. For large repositories, consider setting up a separate single-node Elasticsearch cluster just for running the integrity verification API. The response exposes implementation details of the analysis which may change from version to version. The response body format is therefore not considered stable and may be different in newer versions. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-repository-verify-integrity | Elasticsearch API documentation} + */ + async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptionsWithMeta): Promise> + async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptions): Promise + async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['snapshot.repository_verify_integrity'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/_snapshot/${encodeURIComponent(params.name.toString())}/_verify_integrity` + const meta: TransportRequestMetadata = { + name: 'snapshot.repository_verify_integrity', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'repository', + 'blob_thread_pool_concurrency', + 'index_snapshot_verification_concurrency', + 'index_verification_concurrency', + 'max_bytes_per_sec', + 'max_failed_shard_snapshots', + 'meta_thread_pool_concurrency', + 'snapshot_verification_concurrency', + 'verify_blob_contents' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Restore a snapshot. Restore a snapshot of a cluster or data streams and indices. You can restore a snapshot only to a running cluster with an elected master node. The snapshot repository must be registered and available to the cluster. The snapshot and cluster versions must be compatible. To restore a snapshot, the cluster's global metadata must be writable. Ensure there are't any cluster blocks that prevent writes. The restore operation ignores index blocks. Before you restore a data stream, ensure the cluster contains a matching index template with data streams enabled. To check, use the index management feature in Kibana or the get index template API: ``` GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream ``` If no such template exists, you can create one or restore a cluster state that contains one. Without a matching index template, a data stream can't roll over or create backing indices. If your snapshot contains data from App Search or Workplace Search, you must restore the Enterprise Search encryption key before you restore the snapshot. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-restore | Elasticsearch API documentation} + */ + async restore (this: That, params: T.SnapshotRestoreRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async restore (this: That, params: T.SnapshotRestoreRequest, options?: TransportRequestOptionsWithMeta): Promise> + async restore (this: That, params: T.SnapshotRestoreRequest, options?: TransportRequestOptions): Promise + async restore (this: That, params: T.SnapshotRestoreRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['snapshot.restore'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/_snapshot/${encodeURIComponent(params.repository.toString())}/${encodeURIComponent(params.snapshot.toString())}/_restore` + const meta: TransportRequestMetadata = { + name: 'snapshot.restore', + pathParts: { + repository: params.repository, + snapshot: params.snapshot + }, + acceptedParams: [ + 'repository', + 'snapshot', + 'feature_states', + 'ignore_index_settings', + 'ignore_unavailable', + 'include_aliases', + 'include_global_state', + 'index_settings', + 'indices', + 'partial', + 'rename_pattern', + 'rename_replacement', + 'master_timeout', + 'wait_for_completion' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get the snapshot status. Get a detailed description of the current state for each shard participating in the snapshot. Note that this API should be used only to obtain detailed shard-level information for ongoing snapshots. If this detail is not needed or you want to obtain information about one or more existing snapshots, use the get snapshot API. If you omit the `` request path parameter, the request retrieves information only for currently running snapshots. This usage is preferred. If needed, you can specify `` and `` to retrieve information for specific snapshots, even if they're not currently running. Note that the stats will not be available for any shard snapshots in an ongoing snapshot completed by a node that (even momentarily) left the cluster. Loading the stats from the repository is an expensive operation (see the WARNING below). Therefore the stats values for such shards will be -1 even though the "stage" value will be "DONE", in order to minimize latency. A "description" field will be present for a shard snapshot completed by a departed node explaining why the shard snapshot's stats results are invalid. Consequently, the total stats for the index will be less than expected due to the missing values from these shards. WARNING: Using the API to return the status of any snapshots other than currently running snapshots can be expensive. The API requires a read from the repository for each shard in each snapshot. For example, if you have 100 snapshots with 1,000 shards each, an API request that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 shards). Depending on the latency of your storage, such requests can take an extremely long time to return results. These requests can also tax machine resources and, when using cloud storage, incur high processing costs. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-status | Elasticsearch API documentation} + */ + async status (this: That, params?: T.SnapshotStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async status (this: That, params?: T.SnapshotStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> + async status (this: That, params?: T.SnapshotStatusRequest, options?: TransportRequestOptions): Promise + async status (this: That, params?: T.SnapshotStatusRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['snapshot.status'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.repository != null && params.snapshot != null) { + method = 'GET' + path = `/_snapshot/${encodeURIComponent(params.repository.toString())}/${encodeURIComponent(params.snapshot.toString())}/_status` + } else if (params.repository != null) { + method = 'GET' + path = `/_snapshot/${encodeURIComponent(params.repository.toString())}/_status` + } else { + method = 'GET' + path = '/_snapshot/_status' + } + const meta: TransportRequestMetadata = { + name: 'snapshot.status', + pathParts: { + repository: params.repository, + snapshot: params.snapshot + }, + acceptedParams: [ + 'repository', + 'snapshot', + 'ignore_unavailable', + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Verify a snapshot repository. Check for common misconfigurations in a snapshot repository. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-verify-repository | Elasticsearch API documentation} + */ + async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptions): Promise + async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['snapshot.verify_repository'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/_snapshot/${encodeURIComponent(params.name.toString())}/_verify` + const meta: TransportRequestMetadata = { + name: 'snapshot.verify_repository', + pathParts: { + name: params.name + }, + acceptedParams: [ + 'repository', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/api/sql.ts b/src/api/api/sql.ts new file mode 100644 index 000000000..47c411cfb --- /dev/null +++ b/src/api/api/sql.ts @@ -0,0 +1,451 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import { kAcceptedParams } from '../../client' + +interface That { + transport: Transport + [kAcceptedParams]: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +export default class Sql { + transport: Transport + [kAcceptedParams]: Record + constructor (transport: Transport) { + this.transport = transport + this[kAcceptedParams] = { + 'sql.clear_cursor': { + path: [], + body: [ + 'cursor' + ], + query: [] + }, + 'sql.delete_async': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'sql.get_async': { + path: [ + 'id' + ], + body: [], + query: [ + 'delimiter', + 'format', + 'keep_alive', + 'wait_for_completion_timeout' + ] + }, + 'sql.get_async_status': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'sql.query': { + path: [], + body: [ + 'allow_partial_search_results', + 'catalog', + 'columnar', + 'cursor', + 'fetch_size', + 'field_multi_value_leniency', + 'filter', + 'index_using_frozen', + 'keep_alive', + 'keep_on_completion', + 'page_timeout', + 'params', + 'query', + 'request_timeout', + 'runtime_mappings', + 'time_zone', + 'wait_for_completion_timeout' + ], + query: [ + 'format', + 'project_routing' + ] + }, + 'sql.translate': { + path: [], + body: [ + 'fetch_size', + 'filter', + 'query', + 'time_zone' + ], + query: [] + } + } + } + + /** + * Clear an SQL search cursor. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-clear-cursor | Elasticsearch API documentation} + */ + async clearCursor (this: That, params: T.SqlClearCursorRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async clearCursor (this: That, params: T.SqlClearCursorRequest, options?: TransportRequestOptionsWithMeta): Promise> + async clearCursor (this: That, params: T.SqlClearCursorRequest, options?: TransportRequestOptions): Promise + async clearCursor (this: That, params: T.SqlClearCursorRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['sql.clear_cursor'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = '/_sql/close' + const meta: TransportRequestMetadata = { + name: 'sql.clear_cursor', + acceptedParams: [ + 'cursor' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Delete an async SQL search. Delete an async SQL search or a stored synchronous SQL search. If the search is still running, the API cancels it. If the Elasticsearch security features are enabled, only the following users can use this API to delete a search: * Users with the `cancel_task` cluster privilege. * The user who first submitted the search. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-delete-async | Elasticsearch API documentation} + */ + async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest, options?: TransportRequestOptions): Promise + async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['sql.delete_async'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_sql/async/delete/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'sql.delete_async', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get async SQL search results. Get the current status and available results for an async SQL search or stored synchronous SQL search. If the Elasticsearch security features are enabled, only the user who first submitted the SQL search can retrieve the search using this API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-get-async | Elasticsearch API documentation} + */ + async getAsync (this: That, params: T.SqlGetAsyncRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getAsync (this: That, params: T.SqlGetAsyncRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getAsync (this: That, params: T.SqlGetAsyncRequest, options?: TransportRequestOptions): Promise + async getAsync (this: That, params: T.SqlGetAsyncRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['sql.get_async'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_sql/async/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'sql.get_async', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id', + 'delimiter', + 'format', + 'keep_alive', + 'wait_for_completion_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get the async SQL search status. Get the current status of an async SQL search or a stored synchronous SQL search. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-get-async-status | Elasticsearch API documentation} + */ + async getAsyncStatus (this: That, params: T.SqlGetAsyncStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getAsyncStatus (this: That, params: T.SqlGetAsyncStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getAsyncStatus (this: That, params: T.SqlGetAsyncStatusRequest, options?: TransportRequestOptions): Promise + async getAsyncStatus (this: That, params: T.SqlGetAsyncStatusRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['sql.get_async_status'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_sql/async/status/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'sql.get_async_status', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get SQL search results. Run an SQL request. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-query | Elasticsearch API documentation} + */ + async query (this: That, params?: T.SqlQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async query (this: That, params?: T.SqlQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> + async query (this: That, params?: T.SqlQueryRequest, options?: TransportRequestOptions): Promise + async query (this: That, params?: T.SqlQueryRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['sql.query'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = body != null ? 'POST' : 'GET' + const path = '/_sql' + const meta: TransportRequestMetadata = { + name: 'sql.query', + acceptedParams: [ + 'allow_partial_search_results', + 'catalog', + 'columnar', + 'cursor', + 'fetch_size', + 'field_multi_value_leniency', + 'filter', + 'index_using_frozen', + 'keep_alive', + 'keep_on_completion', + 'page_timeout', + 'params', + 'query', + 'request_timeout', + 'runtime_mappings', + 'time_zone', + 'wait_for_completion_timeout', + 'format', + 'project_routing' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Translate SQL into Elasticsearch queries. Translate an SQL search into a search API request containing Query DSL. It accepts the same request body parameters as the SQL search API, excluding `cursor`. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-translate | Elasticsearch API documentation} + */ + async translate (this: That, params: T.SqlTranslateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async translate (this: That, params: T.SqlTranslateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async translate (this: That, params: T.SqlTranslateRequest, options?: TransportRequestOptions): Promise + async translate (this: That, params: T.SqlTranslateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['sql.translate'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = body != null ? 'POST' : 'GET' + const path = '/_sql/translate' + const meta: TransportRequestMetadata = { + name: 'sql.translate', + acceptedParams: [ + 'fetch_size', + 'filter', + 'query', + 'time_zone' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/api/ssl.ts b/src/api/api/ssl.ts new file mode 100644 index 000000000..ada1ae8db --- /dev/null +++ b/src/api/api/ssl.ts @@ -0,0 +1,89 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import { kAcceptedParams } from '../../client' + +interface That { + transport: Transport + [kAcceptedParams]: Record +} + +export default class Ssl { + transport: Transport + [kAcceptedParams]: Record + constructor (transport: Transport) { + this.transport = transport + this[kAcceptedParams] = { + 'ssl.certificates': { + path: [], + body: [], + query: [] + } + } + } + + /** + * Get SSL certificates. Get information about the X.509 certificates that are used to encrypt communications in the cluster. The API returns a list that includes certificates from all TLS contexts including: - Settings for transport and HTTP interfaces - TLS settings that are used within authentication realms - TLS settings for remote monitoring exporters The list includes certificates that are used for configuring trust, such as those configured in the `xpack.security.transport.ssl.truststore` and `xpack.security.transport.ssl.certificate_authorities` settings. It also includes certificates that are used for configuring server identity, such as `xpack.security.http.ssl.keystore` and `xpack.security.http.ssl.certificate settings`. The list does not include certificates that are sourced from the default SSL context of the Java Runtime Environment (JRE), even if those certificates are in use within Elasticsearch. NOTE: When a PKCS#11 token is configured as the truststore of the JRE, the API returns all the certificates that are included in the PKCS#11 token irrespective of whether these are used in the Elasticsearch TLS configuration. If Elasticsearch is configured to use a keystore or truststore, the API output includes all certificates in that store, even though some of the certificates might not be in active use within the cluster. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ssl-certificates | Elasticsearch API documentation} + */ + async certificates (this: That, params?: T.SslCertificatesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async certificates (this: That, params?: T.SslCertificatesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async certificates (this: That, params?: T.SslCertificatesRequest, options?: TransportRequestOptions): Promise + async certificates (this: That, params?: T.SslCertificatesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['ssl.certificates'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_ssl/certificates' + const meta: TransportRequestMetadata = { + name: 'ssl.certificates', + acceptedParams: [ + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/api/streams.ts b/src/api/api/streams.ts new file mode 100644 index 000000000..97ea936a6 --- /dev/null +++ b/src/api/api/streams.ts @@ -0,0 +1,202 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import { kAcceptedParams } from '../../client' + +interface That { + transport: Transport + [kAcceptedParams]: Record +} + +export default class Streams { + transport: Transport + [kAcceptedParams]: Record + constructor (transport: Transport) { + this.transport = transport + this[kAcceptedParams] = { + 'streams.logs_disable': { + path: [], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'streams.logs_enable': { + path: [], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'streams.status': { + path: [], + body: [], + query: [ + 'master_timeout' + ] + } + } + } + + /** + * Disable logs stream. Turn off the logs stream feature for this cluster. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch#TODO | Elasticsearch API documentation} + */ + async logsDisable (this: That, params?: T.StreamsLogsDisableRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async logsDisable (this: That, params?: T.StreamsLogsDisableRequest, options?: TransportRequestOptionsWithMeta): Promise> + async logsDisable (this: That, params?: T.StreamsLogsDisableRequest, options?: TransportRequestOptions): Promise + async logsDisable (this: That, params?: T.StreamsLogsDisableRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['streams.logs_disable'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_streams/logs/_disable' + const meta: TransportRequestMetadata = { + name: 'streams.logs_disable', + acceptedParams: [ + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Enable logs stream. Turn on the logs stream feature for this cluster. NOTE: To protect existing data, this feature can be turned on only if the cluster does not have existing indices or data streams that match the pattern `logs|logs.*`. If those indices or data streams exist, a `409 - Conflict` response and error is returned. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch#TODO | Elasticsearch API documentation} + */ + async logsEnable (this: That, params?: T.StreamsLogsEnableRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async logsEnable (this: That, params?: T.StreamsLogsEnableRequest, options?: TransportRequestOptionsWithMeta): Promise> + async logsEnable (this: That, params?: T.StreamsLogsEnableRequest, options?: TransportRequestOptions): Promise + async logsEnable (this: That, params?: T.StreamsLogsEnableRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['streams.logs_enable'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_streams/logs/_enable' + const meta: TransportRequestMetadata = { + name: 'streams.logs_enable', + acceptedParams: [ + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get the status of streams. Get the current status for all types of streams. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch#TODO | Elasticsearch API documentation} + */ + async status (this: That, params?: T.StreamsStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async status (this: That, params?: T.StreamsStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> + async status (this: That, params?: T.StreamsStatusRequest, options?: TransportRequestOptions): Promise + async status (this: That, params?: T.StreamsStatusRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['streams.status'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_streams/status' + const meta: TransportRequestMetadata = { + name: 'streams.status', + acceptedParams: [ + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/api/synonyms.ts b/src/api/api/synonyms.ts new file mode 100644 index 000000000..e672db93e --- /dev/null +++ b/src/api/api/synonyms.ts @@ -0,0 +1,479 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import { kAcceptedParams } from '../../client' + +interface That { + transport: Transport + [kAcceptedParams]: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +export default class Synonyms { + transport: Transport + [kAcceptedParams]: Record + constructor (transport: Transport) { + this.transport = transport + this[kAcceptedParams] = { + 'synonyms.delete_synonym': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'synonyms.delete_synonym_rule': { + path: [ + 'set_id', + 'rule_id' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'synonyms.get_synonym': { + path: [ + 'id' + ], + body: [], + query: [ + 'from', + 'size' + ] + }, + 'synonyms.get_synonym_rule': { + path: [ + 'set_id', + 'rule_id' + ], + body: [], + query: [] + }, + 'synonyms.get_synonyms_sets': { + path: [], + body: [], + query: [ + 'from', + 'size' + ] + }, + 'synonyms.put_synonym': { + path: [ + 'id' + ], + body: [ + 'synonyms_set' + ], + query: [ + 'refresh' + ] + }, + 'synonyms.put_synonym_rule': { + path: [ + 'set_id', + 'rule_id' + ], + body: [ + 'synonyms' + ], + query: [ + 'refresh' + ] + } + } + } + + /** + * Delete a synonym set. You can only delete a synonyms set that is not in use by any index analyzer. Synonyms sets can be used in synonym graph token filters and synonym token filters. These synonym filters can be used as part of search analyzers. Analyzers need to be loaded when an index is restored (such as when a node starts, or the index becomes open). Even if the analyzer is not used on any field mapping, it still needs to be loaded on the index recovery phase. If any analyzers cannot be loaded, the index becomes unavailable and the cluster status becomes red or yellow as index shards are not available. To prevent that, synonyms sets that are used in analyzers can't be deleted. A delete request in this case will return a 400 response code. To remove a synonyms set, you must first remove all indices that contain analyzers using it. You can migrate an index by creating a new index that does not contain the token filter with the synonyms set, and use the reindex API in order to copy over the index data. Once finished, you can delete the index. When the synonyms set is not used in analyzers, you will be able to delete it. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-delete-synonym | Elasticsearch API documentation} + */ + async deleteSynonym (this: That, params: T.SynonymsDeleteSynonymRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteSynonym (this: That, params: T.SynonymsDeleteSynonymRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteSynonym (this: That, params: T.SynonymsDeleteSynonymRequest, options?: TransportRequestOptions): Promise + async deleteSynonym (this: That, params: T.SynonymsDeleteSynonymRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['synonyms.delete_synonym'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_synonyms/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'synonyms.delete_synonym', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Delete a synonym rule. Delete a synonym rule from a synonym set. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-delete-synonym-rule | Elasticsearch API documentation} + */ + async deleteSynonymRule (this: That, params: T.SynonymsDeleteSynonymRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteSynonymRule (this: That, params: T.SynonymsDeleteSynonymRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteSynonymRule (this: That, params: T.SynonymsDeleteSynonymRuleRequest, options?: TransportRequestOptions): Promise + async deleteSynonymRule (this: That, params: T.SynonymsDeleteSynonymRuleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['synonyms.delete_synonym_rule'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_synonyms/${encodeURIComponent(params.set_id.toString())}/${encodeURIComponent(params.rule_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'synonyms.delete_synonym_rule', + pathParts: { + set_id: params.set_id, + rule_id: params.rule_id + }, + acceptedParams: [ + 'set_id', + 'rule_id', + 'refresh' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get a synonym set. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym | Elasticsearch API documentation} + */ + async getSynonym (this: That, params: T.SynonymsGetSynonymRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getSynonym (this: That, params: T.SynonymsGetSynonymRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getSynonym (this: That, params: T.SynonymsGetSynonymRequest, options?: TransportRequestOptions): Promise + async getSynonym (this: That, params: T.SynonymsGetSynonymRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['synonyms.get_synonym'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_synonyms/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'synonyms.get_synonym', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id', + 'from', + 'size' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get a synonym rule. Get a synonym rule from a synonym set. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym-rule | Elasticsearch API documentation} + */ + async getSynonymRule (this: That, params: T.SynonymsGetSynonymRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getSynonymRule (this: That, params: T.SynonymsGetSynonymRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getSynonymRule (this: That, params: T.SynonymsGetSynonymRuleRequest, options?: TransportRequestOptions): Promise + async getSynonymRule (this: That, params: T.SynonymsGetSynonymRuleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['synonyms.get_synonym_rule'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_synonyms/${encodeURIComponent(params.set_id.toString())}/${encodeURIComponent(params.rule_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'synonyms.get_synonym_rule', + pathParts: { + set_id: params.set_id, + rule_id: params.rule_id + }, + acceptedParams: [ + 'set_id', + 'rule_id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get all synonym sets. Get a summary of all defined synonym sets. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym | Elasticsearch API documentation} + */ + async getSynonymsSets (this: That, params?: T.SynonymsGetSynonymsSetsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getSynonymsSets (this: That, params?: T.SynonymsGetSynonymsSetsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getSynonymsSets (this: That, params?: T.SynonymsGetSynonymsSetsRequest, options?: TransportRequestOptions): Promise + async getSynonymsSets (this: That, params?: T.SynonymsGetSynonymsSetsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['synonyms.get_synonyms_sets'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_synonyms' + const meta: TransportRequestMetadata = { + name: 'synonyms.get_synonyms_sets', + acceptedParams: [ + 'from', + 'size' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create or update a synonym set. Synonyms sets are limited to a maximum of 10,000 synonym rules per set. If you need to manage more synonym rules, you can create multiple synonym sets. When an existing synonyms set is updated, the search analyzers that use the synonyms set are reloaded automatically for all indices. This is equivalent to invoking the reload search analyzers API for all indices that use the synonyms set. For practical examples of how to create or update a synonyms set, refer to the External documentation. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-put-synonym | Elasticsearch API documentation} + */ + async putSynonym (this: That, params: T.SynonymsPutSynonymRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putSynonym (this: That, params: T.SynonymsPutSynonymRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putSynonym (this: That, params: T.SynonymsPutSynonymRequest, options?: TransportRequestOptions): Promise + async putSynonym (this: That, params: T.SynonymsPutSynonymRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['synonyms.put_synonym'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_synonyms/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'synonyms.put_synonym', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id', + 'synonyms_set', + 'refresh' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create or update a synonym rule. Create or update a synonym rule in a synonym set. If any of the synonym rules included is invalid, the API returns an error. When you update a synonym rule, all analyzers using the synonyms set will be reloaded automatically to reflect the new rule. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-put-synonym-rule | Elasticsearch API documentation} + */ + async putSynonymRule (this: That, params: T.SynonymsPutSynonymRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putSynonymRule (this: That, params: T.SynonymsPutSynonymRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putSynonymRule (this: That, params: T.SynonymsPutSynonymRuleRequest, options?: TransportRequestOptions): Promise + async putSynonymRule (this: That, params: T.SynonymsPutSynonymRuleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['synonyms.put_synonym_rule'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_synonyms/${encodeURIComponent(params.set_id.toString())}/${encodeURIComponent(params.rule_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'synonyms.put_synonym_rule', + pathParts: { + set_id: params.set_id, + rule_id: params.rule_id + }, + acceptedParams: [ + 'set_id', + 'rule_id', + 'synonyms', + 'refresh' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/api/tasks.ts b/src/api/api/tasks.ts new file mode 100644 index 000000000..48de0e23f --- /dev/null +++ b/src/api/api/tasks.ts @@ -0,0 +1,236 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import { kAcceptedParams } from '../../client' + +interface That { + transport: Transport + [kAcceptedParams]: Record +} + +export default class Tasks { + transport: Transport + [kAcceptedParams]: Record + constructor (transport: Transport) { + this.transport = transport + this[kAcceptedParams] = { + 'tasks.cancel': { + path: [ + 'task_id' + ], + body: [], + query: [ + 'actions', + 'nodes', + 'parent_task_id', + 'wait_for_completion' + ] + }, + 'tasks.get': { + path: [ + 'task_id' + ], + body: [], + query: [ + 'timeout', + 'wait_for_completion' + ] + }, + 'tasks.list': { + path: [], + body: [], + query: [ + 'actions', + 'detailed', + 'group_by', + 'nodes', + 'parent_task_id', + 'timeout', + 'wait_for_completion' + ] + } + } + } + + /** + * Cancel a task. WARNING: The task management API is new and should still be considered a beta feature. The API may change in ways that are not backwards compatible. A task may continue to run for some time after it has been cancelled because it may not be able to safely stop its current activity straight away. It is also possible that Elasticsearch must complete its work on other tasks before it can process the cancellation. The get task information API will continue to list these cancelled tasks until they complete. The cancelled flag in the response indicates that the cancellation command has been processed and the task will stop as soon as possible. To troubleshoot why a cancelled task does not complete promptly, use the get task information API with the `?detailed` parameter to identify the other tasks the system is running. You can also use the node hot threads API to obtain detailed information about the work the system is doing instead of completing the cancelled task. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks | Elasticsearch API documentation} + */ + async cancel (this: That, params?: T.TasksCancelRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async cancel (this: That, params?: T.TasksCancelRequest, options?: TransportRequestOptionsWithMeta): Promise> + async cancel (this: That, params?: T.TasksCancelRequest, options?: TransportRequestOptions): Promise + async cancel (this: That, params?: T.TasksCancelRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['tasks.cancel'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.task_id != null) { + method = 'POST' + path = `/_tasks/${encodeURIComponent(params.task_id.toString())}/_cancel` + } else { + method = 'POST' + path = '/_tasks/_cancel' + } + const meta: TransportRequestMetadata = { + name: 'tasks.cancel', + pathParts: { + task_id: params.task_id + }, + acceptedParams: [ + 'task_id', + 'actions', + 'nodes', + 'parent_task_id', + 'wait_for_completion' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get task information. Get information about a task currently running in the cluster. WARNING: The task management API is new and should still be considered a beta feature. The API may change in ways that are not backwards compatible. If the task identifier is not found, a 404 response code indicates that there are no resources that match the request. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks | Elasticsearch API documentation} + */ + async get (this: That, params: T.TasksGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async get (this: That, params: T.TasksGetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async get (this: That, params: T.TasksGetRequest, options?: TransportRequestOptions): Promise + async get (this: That, params: T.TasksGetRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['tasks.get'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_tasks/${encodeURIComponent(params.task_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'tasks.get', + pathParts: { + task_id: params.task_id + }, + acceptedParams: [ + 'task_id', + 'timeout', + 'wait_for_completion' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get all tasks. Get information about the tasks currently running on one or more nodes in the cluster. WARNING: The task management API is new and should still be considered a beta feature. The API may change in ways that are not backwards compatible. **Identifying running tasks** The `X-Opaque-Id header`, when provided on the HTTP request header, is going to be returned as a header in the response as well as in the headers field for in the task information. This enables you to track certain calls or associate certain tasks with the client that started them. For example: ``` curl -i -H "X-Opaque-Id: 123456" "http://localhost:9200/_tasks?group_by=parents" ``` The API returns the following result: ``` HTTP/1.1 200 OK X-Opaque-Id: 123456 content-type: application/json; charset=UTF-8 content-length: 831 { "tasks" : { "u5lcZHqcQhu-rUoFaqDphA:45" : { "node" : "u5lcZHqcQhu-rUoFaqDphA", "id" : 45, "type" : "transport", "action" : "cluster:monitor/tasks/lists", "start_time_in_millis" : 1513823752749, "running_time_in_nanos" : 293139, "cancellable" : false, "headers" : { "X-Opaque-Id" : "123456" }, "children" : [ { "node" : "u5lcZHqcQhu-rUoFaqDphA", "id" : 46, "type" : "direct", "action" : "cluster:monitor/tasks/lists[n]", "start_time_in_millis" : 1513823752750, "running_time_in_nanos" : 92133, "cancellable" : false, "parent_task_id" : "u5lcZHqcQhu-rUoFaqDphA:45", "headers" : { "X-Opaque-Id" : "123456" } } ] } } } ``` In this example, `X-Opaque-Id: 123456` is the ID as a part of the response header. The `X-Opaque-Id` in the task `headers` is the ID for the task that was initiated by the REST request. The `X-Opaque-Id` in the children `headers` is the child task of the task that was initiated by the REST request. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks | Elasticsearch API documentation} + */ + async list (this: That, params?: T.TasksListRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async list (this: That, params?: T.TasksListRequest, options?: TransportRequestOptionsWithMeta): Promise> + async list (this: That, params?: T.TasksListRequest, options?: TransportRequestOptions): Promise + async list (this: That, params?: T.TasksListRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['tasks.list'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_tasks' + const meta: TransportRequestMetadata = { + name: 'tasks.list', + acceptedParams: [ + 'actions', + 'detailed', + 'group_by', + 'nodes', + 'parent_task_id', + 'timeout', + 'wait_for_completion' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/api/terms_enum.ts b/src/api/api/terms_enum.ts new file mode 100644 index 000000000..dec56df1a --- /dev/null +++ b/src/api/api/terms_enum.ts @@ -0,0 +1,114 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + terms_enum: { + path: [ + 'index' + ], + body: [ + 'field', + 'size', + 'timeout', + 'case_insensitive', + 'index_filter', + 'string', + 'search_after' + ], + query: [] + } +} + +/** + * Get terms in an index. Discover terms that match a partial string in an index. This API is designed for low-latency look-ups used in auto-complete scenarios. > info > The terms enum API may return terms from deleted documents. Deleted documents are initially only marked as deleted. It is not until their segments are merged that documents are actually deleted. Until that happens, the terms enum API will return terms from these documents. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-terms-enum | Elasticsearch API documentation} + */ +export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest, options?: TransportRequestOptions): Promise +export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.terms_enum + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = body != null ? 'POST' : 'GET' + const path = `/${encodeURIComponent(params.index.toString())}/_terms_enum` + const meta: TransportRequestMetadata = { + name: 'terms_enum', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'field', + 'size', + 'timeout', + 'case_insensitive', + 'index_filter', + 'string', + 'search_after' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) +} diff --git a/src/api/api/termvectors.ts b/src/api/api/termvectors.ts new file mode 100644 index 000000000..b38dd64f2 --- /dev/null +++ b/src/api/api/termvectors.ts @@ -0,0 +1,157 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + termvectors: { + path: [ + 'index', + 'id' + ], + body: [ + 'doc', + 'filter', + 'per_field_analyzer', + 'fields', + 'field_statistics', + 'offsets', + 'payloads', + 'positions', + 'term_statistics', + 'routing', + 'version', + 'version_type' + ], + query: [ + 'fields', + 'field_statistics', + 'offsets', + 'payloads', + 'positions', + 'preference', + 'realtime', + 'routing', + 'term_statistics', + 'version', + 'version_type' + ] + } +} + +/** + * Get term vector information. Get information and statistics about terms in the fields of a particular document. You can retrieve term vectors for documents stored in the index or for artificial documents passed in the body of the request. You can specify the fields you are interested in through the `fields` parameter or by adding the fields to the request body. For example: ``` GET /my-index-000001/_termvectors/1?fields=message ``` Fields can be specified using wildcards, similar to the multi match query. Term vectors are real-time by default, not near real-time. This can be changed by setting `realtime` parameter to `false`. You can request three types of values: _term information_, _term statistics_, and _field statistics_. By default, all term information and field statistics are returned for all fields but term statistics are excluded. **Term information** * term frequency in the field (always returned) * term positions (`positions: true`) * start and end offsets (`offsets: true`) * term payloads (`payloads: true`), as base64 encoded bytes If the requested information wasn't stored in the index, it will be computed on the fly if possible. Additionally, term vectors could be computed for documents not even existing in the index, but instead provided by the user. > warn > Start and end offsets assume UTF-16 encoding is being used. If you want to use these offsets in order to get the original text that produced this token, you should make sure that the string you are taking a sub-string of is also encoded using UTF-16. **Behaviour** The term and field statistics are not accurate. Deleted documents are not taken into account. The information is only retrieved for the shard the requested document resides in. The term and field statistics are therefore only useful as relative measures whereas the absolute numbers have no meaning in this context. By default, when requesting term vectors of artificial documents, a shard to get the statistics from is randomly selected. Use `routing` only to hit a particular shard. Refer to the linked documentation for detailed examples of how to use this API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-termvectors | Elasticsearch API documentation} + */ +export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest, options?: TransportRequestOptions): Promise +export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.termvectors + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + let method = '' + let path = '' + if (params.index != null && params.id != null) { + method = body != null ? 'POST' : 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_termvectors/${encodeURIComponent(params.id.toString())}` + } else { + method = body != null ? 'POST' : 'GET' + path = `/${encodeURIComponent(params.index.toString())}/_termvectors` + } + const meta: TransportRequestMetadata = { + name: 'termvectors', + pathParts: { + index: params.index, + id: params.id + }, + acceptedParams: [ + 'index', + 'id', + 'doc', + 'filter', + 'per_field_analyzer', + 'fields', + 'field_statistics', + 'offsets', + 'payloads', + 'positions', + 'term_statistics', + 'routing', + 'version', + 'version_type', + 'fields', + 'field_statistics', + 'offsets', + 'payloads', + 'positions', + 'preference', + 'realtime', + 'routing', + 'term_statistics', + 'version', + 'version_type' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) +} diff --git a/src/api/api/text_structure.ts b/src/api/api/text_structure.ts new file mode 100644 index 000000000..318cadc00 --- /dev/null +++ b/src/api/api/text_structure.ts @@ -0,0 +1,360 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import { kAcceptedParams } from '../../client' + +interface That { + transport: Transport + [kAcceptedParams]: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +export default class TextStructure { + transport: Transport + [kAcceptedParams]: Record + constructor (transport: Transport) { + this.transport = transport + this[kAcceptedParams] = { + 'text_structure.find_field_structure': { + path: [], + body: [], + query: [ + 'column_names', + 'delimiter', + 'documents_to_sample', + 'ecs_compatibility', + 'explain', + 'field', + 'format', + 'grok_pattern', + 'index', + 'quote', + 'should_trim_fields', + 'timeout', + 'timestamp_field', + 'timestamp_format' + ] + }, + 'text_structure.find_message_structure': { + path: [], + body: [ + 'messages' + ], + query: [ + 'column_names', + 'delimiter', + 'ecs_compatibility', + 'explain', + 'format', + 'grok_pattern', + 'quote', + 'should_trim_fields', + 'timeout', + 'timestamp_field', + 'timestamp_format' + ] + }, + 'text_structure.find_structure': { + path: [], + body: [ + 'text_files' + ], + query: [ + 'charset', + 'column_names', + 'delimiter', + 'ecs_compatibility', + 'explain', + 'format', + 'grok_pattern', + 'has_header_row', + 'line_merge_size_limit', + 'lines_to_sample', + 'quote', + 'should_trim_fields', + 'timeout', + 'timestamp_field', + 'timestamp_format' + ] + }, + 'text_structure.test_grok_pattern': { + path: [], + body: [ + 'grok_pattern', + 'text' + ], + query: [ + 'ecs_compatibility' + ] + } + } + } + + /** + * Find the structure of a text field. Find the structure of a text field in an Elasticsearch index. This API provides a starting point for extracting further information from log messages already ingested into Elasticsearch. For example, if you have ingested data into a very simple index that has just `@timestamp` and message fields, you can use this API to see what common structure exists in the message field. The response from the API contains: * Sample messages. * Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields. * Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. * Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. All this information can be calculated by the structure finder with no guidance. However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. If the structure finder produces unexpected results, specify the `explain` query parameter and an explanation will appear in the response. It helps determine why the returned structure was chosen. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-text_structure | Elasticsearch API documentation} + */ + async findFieldStructure (this: That, params: T.TextStructureFindFieldStructureRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async findFieldStructure (this: That, params: T.TextStructureFindFieldStructureRequest, options?: TransportRequestOptionsWithMeta): Promise> + async findFieldStructure (this: That, params: T.TextStructureFindFieldStructureRequest, options?: TransportRequestOptions): Promise + async findFieldStructure (this: That, params: T.TextStructureFindFieldStructureRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['text_structure.find_field_structure'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_text_structure/find_field_structure' + const meta: TransportRequestMetadata = { + name: 'text_structure.find_field_structure', + acceptedParams: [ + 'column_names', + 'delimiter', + 'documents_to_sample', + 'ecs_compatibility', + 'explain', + 'field', + 'format', + 'grok_pattern', + 'index', + 'quote', + 'should_trim_fields', + 'timeout', + 'timestamp_field', + 'timestamp_format' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Find the structure of text messages. Find the structure of a list of text messages. The messages must contain data that is suitable to be ingested into Elasticsearch. This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. Use this API rather than the find text structure API if your input text has already been split up into separate messages by some other process. The response from the API contains: * Sample messages. * Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields. * Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. All this information can be calculated by the structure finder with no guidance. However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. If the structure finder produces unexpected results, specify the `explain` query parameter and an explanation will appear in the response. It helps determine why the returned structure was chosen. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-find-message-structure | Elasticsearch API documentation} + */ + async findMessageStructure (this: That, params: T.TextStructureFindMessageStructureRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async findMessageStructure (this: That, params: T.TextStructureFindMessageStructureRequest, options?: TransportRequestOptionsWithMeta): Promise> + async findMessageStructure (this: That, params: T.TextStructureFindMessageStructureRequest, options?: TransportRequestOptions): Promise + async findMessageStructure (this: That, params: T.TextStructureFindMessageStructureRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['text_structure.find_message_structure'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = body != null ? 'POST' : 'GET' + const path = '/_text_structure/find_message_structure' + const meta: TransportRequestMetadata = { + name: 'text_structure.find_message_structure', + acceptedParams: [ + 'messages', + 'column_names', + 'delimiter', + 'ecs_compatibility', + 'explain', + 'format', + 'grok_pattern', + 'quote', + 'should_trim_fields', + 'timeout', + 'timestamp_field', + 'timestamp_format' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Find the structure of a text file. The text file must contain data that is suitable to be ingested into Elasticsearch. This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. Unlike other Elasticsearch endpoints, the data that is posted to this endpoint does not need to be UTF-8 encoded and in JSON format. It must, however, be text; binary text formats are not currently supported. The size is limited to the Elasticsearch HTTP receive buffer size, which defaults to 100 Mb. The response from the API contains: * A couple of messages from the beginning of the text. * Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields. * Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. * Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. All this information can be calculated by the structure finder with no guidance. However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-find-structure | Elasticsearch API documentation} + */ + async findStructure (this: That, params: T.TextStructureFindStructureRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async findStructure (this: That, params: T.TextStructureFindStructureRequest, options?: TransportRequestOptionsWithMeta): Promise> + async findStructure (this: That, params: T.TextStructureFindStructureRequest, options?: TransportRequestOptions): Promise + async findStructure (this: That, params: T.TextStructureFindStructureRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['text_structure.find_structure'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: any = params.body ?? undefined + for (const key in params) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = '/_text_structure/find_structure' + const meta: TransportRequestMetadata = { + name: 'text_structure.find_structure', + acceptedParams: [ + 'text_files', + 'charset', + 'column_names', + 'delimiter', + 'ecs_compatibility', + 'explain', + 'format', + 'grok_pattern', + 'has_header_row', + 'line_merge_size_limit', + 'lines_to_sample', + 'quote', + 'should_trim_fields', + 'timeout', + 'timestamp_field', + 'timestamp_format' + ] + } + return await this.transport.request({ path, method, querystring, bulkBody: body, meta }, options) + } + + /** + * Test a Grok pattern. Test a Grok pattern on one or more lines of text. The API indicates whether the lines match the pattern together with the offsets and lengths of the matched substrings. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-test-grok-pattern | Elasticsearch API documentation} + */ + async testGrokPattern (this: That, params: T.TextStructureTestGrokPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async testGrokPattern (this: That, params: T.TextStructureTestGrokPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> + async testGrokPattern (this: That, params: T.TextStructureTestGrokPatternRequest, options?: TransportRequestOptions): Promise + async testGrokPattern (this: That, params: T.TextStructureTestGrokPatternRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['text_structure.test_grok_pattern'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = body != null ? 'POST' : 'GET' + const path = '/_text_structure/test_grok_pattern' + const meta: TransportRequestMetadata = { + name: 'text_structure.test_grok_pattern', + acceptedParams: [ + 'grok_pattern', + 'text', + 'ecs_compatibility' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/api/transform.ts b/src/api/api/transform.ts new file mode 100644 index 000000000..249a62264 --- /dev/null +++ b/src/api/api/transform.ts @@ -0,0 +1,921 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import { kAcceptedParams } from '../../client' + +interface That { + transport: Transport + [kAcceptedParams]: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +export default class Transform { + transport: Transport + [kAcceptedParams]: Record + constructor (transport: Transport) { + this.transport = transport + this[kAcceptedParams] = { + 'transform.delete_transform': { + path: [ + 'transform_id' + ], + body: [], + query: [ + 'force', + 'delete_dest_index', + 'timeout' + ] + }, + 'transform.get_node_stats': { + path: [], + body: [], + query: [] + }, + 'transform.get_transform': { + path: [ + 'transform_id' + ], + body: [], + query: [ + 'allow_no_match', + 'from', + 'size', + 'exclude_generated' + ] + }, + 'transform.get_transform_stats': { + path: [ + 'transform_id' + ], + body: [], + query: [ + 'allow_no_match', + 'from', + 'size', + 'timeout' + ] + }, + 'transform.preview_transform': { + path: [ + 'transform_id' + ], + body: [ + 'dest', + 'description', + 'frequency', + 'pivot', + 'source', + 'settings', + 'sync', + 'retention_policy', + 'latest' + ], + query: [ + 'timeout' + ] + }, + 'transform.put_transform': { + path: [ + 'transform_id' + ], + body: [ + 'dest', + 'description', + 'frequency', + 'latest', + '_meta', + 'pivot', + 'retention_policy', + 'settings', + 'source', + 'sync' + ], + query: [ + 'defer_validation', + 'timeout' + ] + }, + 'transform.reset_transform': { + path: [ + 'transform_id' + ], + body: [], + query: [ + 'force', + 'timeout' + ] + }, + 'transform.schedule_now_transform': { + path: [ + 'transform_id' + ], + body: [], + query: [ + 'timeout' + ] + }, + 'transform.set_upgrade_mode': { + path: [], + body: [], + query: [ + 'enabled', + 'timeout' + ] + }, + 'transform.start_transform': { + path: [ + 'transform_id' + ], + body: [], + query: [ + 'timeout', + 'from' + ] + }, + 'transform.stop_transform': { + path: [ + 'transform_id' + ], + body: [], + query: [ + 'allow_no_match', + 'force', + 'timeout', + 'wait_for_checkpoint', + 'wait_for_completion' + ] + }, + 'transform.update_transform': { + path: [ + 'transform_id' + ], + body: [ + 'dest', + 'description', + 'frequency', + '_meta', + 'source', + 'settings', + 'sync', + 'retention_policy' + ], + query: [ + 'defer_validation', + 'timeout' + ] + }, + 'transform.upgrade_transforms': { + path: [], + body: [], + query: [ + 'dry_run', + 'timeout' + ] + } + } + } + + /** + * Delete a transform. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-delete-transform | Elasticsearch API documentation} + */ + async deleteTransform (this: That, params: T.TransformDeleteTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteTransform (this: That, params: T.TransformDeleteTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteTransform (this: That, params: T.TransformDeleteTransformRequest, options?: TransportRequestOptions): Promise + async deleteTransform (this: That, params: T.TransformDeleteTransformRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['transform.delete_transform'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_transform/${encodeURIComponent(params.transform_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'transform.delete_transform', + pathParts: { + transform_id: params.transform_id + }, + acceptedParams: [ + 'transform_id', + 'force', + 'delete_dest_index', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Retrieves transform usage information for transform nodes + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-transform-node-stats.html | Elasticsearch API documentation} + */ + async getNodeStats (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise + async getNodeStats (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> + async getNodeStats (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise + async getNodeStats (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['transform.get_node_stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_transform/_node_stats' + const meta: TransportRequestMetadata = { + name: 'transform.get_node_stats', + acceptedParams: [ + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get transforms. Get configuration information for transforms. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-get-transform | Elasticsearch API documentation} + */ + async getTransform (this: That, params?: T.TransformGetTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getTransform (this: That, params?: T.TransformGetTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getTransform (this: That, params?: T.TransformGetTransformRequest, options?: TransportRequestOptions): Promise + async getTransform (this: That, params?: T.TransformGetTransformRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['transform.get_transform'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.transform_id != null) { + method = 'GET' + path = `/_transform/${encodeURIComponent(params.transform_id.toString())}` + } else { + method = 'GET' + path = '/_transform' + } + const meta: TransportRequestMetadata = { + name: 'transform.get_transform', + pathParts: { + transform_id: params.transform_id + }, + acceptedParams: [ + 'transform_id', + 'allow_no_match', + 'from', + 'size', + 'exclude_generated' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get transform stats. Get usage information for transforms. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-get-transform-stats | Elasticsearch API documentation} + */ + async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest, options?: TransportRequestOptions): Promise + async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['transform.get_transform_stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_transform/${encodeURIComponent(params.transform_id.toString())}/_stats` + const meta: TransportRequestMetadata = { + name: 'transform.get_transform_stats', + pathParts: { + transform_id: params.transform_id + }, + acceptedParams: [ + 'transform_id', + 'allow_no_match', + 'from', + 'size', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Preview a transform. Generates a preview of the results that you will get when you create a transform with the same configuration. It returns a maximum of 100 results. The calculations are based on all the current data in the source index. It also generates a list of mappings and settings for the destination index. These values are determined based on the field types of the source index and the transform aggregations. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-preview-transform | Elasticsearch API documentation} + */ + async previewTransform (this: That, params?: T.TransformPreviewTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise> + async previewTransform (this: That, params?: T.TransformPreviewTransformRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> + async previewTransform (this: That, params?: T.TransformPreviewTransformRequest, options?: TransportRequestOptions): Promise> + async previewTransform (this: That, params?: T.TransformPreviewTransformRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['transform.preview_transform'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + let method = '' + let path = '' + if (params.transform_id != null) { + method = body != null ? 'POST' : 'GET' + path = `/_transform/${encodeURIComponent(params.transform_id.toString())}/_preview` + } else { + method = body != null ? 'POST' : 'GET' + path = '/_transform/_preview' + } + const meta: TransportRequestMetadata = { + name: 'transform.preview_transform', + pathParts: { + transform_id: params.transform_id + }, + acceptedParams: [ + 'transform_id', + 'dest', + 'description', + 'frequency', + 'pivot', + 'source', + 'settings', + 'sync', + 'retention_policy', + 'latest', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create a transform. Creates a transform. A transform copies data from source indices, transforms it, and persists it into an entity-centric destination index. You can also think of the destination index as a two-dimensional tabular data structure (known as a data frame). The ID for each document in the data frame is generated from a hash of the entity, so there is a unique row per entity. You must choose either the latest or pivot method for your transform; you cannot use both in a single transform. If you choose to use the pivot method for your transform, the entities are defined by the set of `group_by` fields in the pivot object. If you choose to use the latest method, the entities are defined by the `unique_key` field values in the latest object. You must have `create_index`, `index`, and `read` privileges on the destination index and `read` and `view_index_metadata` privileges on the source indices. When Elasticsearch security features are enabled, the transform remembers which roles the user that created it had at the time of creation and uses those same roles. If those roles do not have the required privileges on the source and destination indices, the transform fails when it attempts unauthorized operations. NOTE: You must use Kibana or this API to create a transform. Do not add a transform directly into any `.transform-internal*` indices using the Elasticsearch index API. If Elasticsearch security features are enabled, do not give users any privileges on `.transform-internal*` indices. If you used transforms prior to 7.5, also do not give users any privileges on `.data-frame-internal*` indices. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-put-transform | Elasticsearch API documentation} + */ + async putTransform (this: That, params: T.TransformPutTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putTransform (this: That, params: T.TransformPutTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putTransform (this: That, params: T.TransformPutTransformRequest, options?: TransportRequestOptions): Promise + async putTransform (this: That, params: T.TransformPutTransformRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['transform.put_transform'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_transform/${encodeURIComponent(params.transform_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'transform.put_transform', + pathParts: { + transform_id: params.transform_id + }, + acceptedParams: [ + 'transform_id', + 'dest', + 'description', + 'frequency', + 'latest', + '_meta', + 'pivot', + 'retention_policy', + 'settings', + 'source', + 'sync', + 'defer_validation', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Reset a transform. Before you can reset it, you must stop it; alternatively, use the `force` query parameter. If the destination index was created by the transform, it is deleted. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-reset-transform | Elasticsearch API documentation} + */ + async resetTransform (this: That, params: T.TransformResetTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async resetTransform (this: That, params: T.TransformResetTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> + async resetTransform (this: That, params: T.TransformResetTransformRequest, options?: TransportRequestOptions): Promise + async resetTransform (this: That, params: T.TransformResetTransformRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['transform.reset_transform'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/_transform/${encodeURIComponent(params.transform_id.toString())}/_reset` + const meta: TransportRequestMetadata = { + name: 'transform.reset_transform', + pathParts: { + transform_id: params.transform_id + }, + acceptedParams: [ + 'transform_id', + 'force', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Schedule a transform to start now. Instantly run a transform to process data. If you run this API, the transform will process the new data instantly, without waiting for the configured frequency interval. After the API is called, the transform will be processed again at `now + frequency` unless the API is called again in the meantime. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-schedule-now-transform | Elasticsearch API documentation} + */ + async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> + async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest, options?: TransportRequestOptions): Promise + async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['transform.schedule_now_transform'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/_transform/${encodeURIComponent(params.transform_id.toString())}/_schedule_now` + const meta: TransportRequestMetadata = { + name: 'transform.schedule_now_transform', + pathParts: { + transform_id: params.transform_id + }, + acceptedParams: [ + 'transform_id', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Set upgrade_mode for transform indices. Sets a cluster wide upgrade_mode setting that prepares transform indices for an upgrade. When upgrading your cluster, in some circumstances you must restart your nodes and reindex your transform indices. In those circumstances, there must be no transforms running. You can close the transforms, do the upgrade, then open all the transforms again. Alternatively, you can use this API to temporarily halt tasks associated with the transforms and prevent new transforms from opening. You can also use this API during upgrades that do not require you to reindex your transform indices, though stopping transforms is not a requirement in that case. You can see the current value for the upgrade_mode setting by using the get transform info API. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-set-upgrade-mode | Elasticsearch API documentation} + */ + async setUpgradeMode (this: That, params?: T.TransformSetUpgradeModeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async setUpgradeMode (this: That, params?: T.TransformSetUpgradeModeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async setUpgradeMode (this: That, params?: T.TransformSetUpgradeModeRequest, options?: TransportRequestOptions): Promise + async setUpgradeMode (this: That, params?: T.TransformSetUpgradeModeRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['transform.set_upgrade_mode'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_transform/set_upgrade_mode' + const meta: TransportRequestMetadata = { + name: 'transform.set_upgrade_mode', + acceptedParams: [ + 'enabled', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Start a transform. When you start a transform, it creates the destination index if it does not already exist. The `number_of_shards` is set to `1` and the `auto_expand_replicas` is set to `0-1`. If it is a pivot transform, it deduces the mapping definitions for the destination index from the source indices and the transform aggregations. If fields in the destination index are derived from scripts (as in the case of `scripted_metric` or `bucket_script` aggregations), the transform uses dynamic mappings unless an index template exists. If it is a latest transform, it does not deduce mapping definitions; it uses dynamic mappings. To use explicit mappings, create the destination index before you start the transform. Alternatively, you can create an index template, though it does not affect the deduced mappings in a pivot transform. When the transform starts, a series of validations occur to ensure its success. If you deferred validation when you created the transform, they occur when you start the transform—with the exception of privilege checks. When Elasticsearch security features are enabled, the transform remembers which roles the user that created it had at the time of creation and uses those same roles. If those roles do not have the required privileges on the source and destination indices, the transform fails when it attempts unauthorized operations. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-start-transform | Elasticsearch API documentation} + */ + async startTransform (this: That, params: T.TransformStartTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async startTransform (this: That, params: T.TransformStartTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> + async startTransform (this: That, params: T.TransformStartTransformRequest, options?: TransportRequestOptions): Promise + async startTransform (this: That, params: T.TransformStartTransformRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['transform.start_transform'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/_transform/${encodeURIComponent(params.transform_id.toString())}/_start` + const meta: TransportRequestMetadata = { + name: 'transform.start_transform', + pathParts: { + transform_id: params.transform_id + }, + acceptedParams: [ + 'transform_id', + 'timeout', + 'from' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Stop transforms. Stops one or more transforms. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-stop-transform | Elasticsearch API documentation} + */ + async stopTransform (this: That, params: T.TransformStopTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stopTransform (this: That, params: T.TransformStopTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stopTransform (this: That, params: T.TransformStopTransformRequest, options?: TransportRequestOptions): Promise + async stopTransform (this: That, params: T.TransformStopTransformRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['transform.stop_transform'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/_transform/${encodeURIComponent(params.transform_id.toString())}/_stop` + const meta: TransportRequestMetadata = { + name: 'transform.stop_transform', + pathParts: { + transform_id: params.transform_id + }, + acceptedParams: [ + 'transform_id', + 'allow_no_match', + 'force', + 'timeout', + 'wait_for_checkpoint', + 'wait_for_completion' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Update a transform. Updates certain properties of a transform. All updated properties except `description` do not take effect until after the transform starts the next checkpoint, thus there is data consistency in each checkpoint. To use this API, you must have `read` and `view_index_metadata` privileges for the source indices. You must also have `index` and `read` privileges for the destination index. When Elasticsearch security features are enabled, the transform remembers which roles the user who updated it had at the time of update and runs with those privileges. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-update-transform | Elasticsearch API documentation} + */ + async updateTransform (this: That, params: T.TransformUpdateTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateTransform (this: That, params: T.TransformUpdateTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateTransform (this: That, params: T.TransformUpdateTransformRequest, options?: TransportRequestOptions): Promise + async updateTransform (this: That, params: T.TransformUpdateTransformRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['transform.update_transform'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/_transform/${encodeURIComponent(params.transform_id.toString())}/_update` + const meta: TransportRequestMetadata = { + name: 'transform.update_transform', + pathParts: { + transform_id: params.transform_id + }, + acceptedParams: [ + 'transform_id', + 'dest', + 'description', + 'frequency', + '_meta', + 'source', + 'settings', + 'sync', + 'retention_policy', + 'defer_validation', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Upgrade all transforms. Transforms are compatible across minor versions and between supported major versions. However, over time, the format of transform configuration information may change. This API identifies transforms that have a legacy configuration format and upgrades them to the latest version. It also cleans up the internal data structures that store the transform state and checkpoints. The upgrade does not affect the source and destination indices. The upgrade also does not affect the roles that transforms use when Elasticsearch security features are enabled; the role used to read source data and write to the destination index remains unchanged. If a transform upgrade step fails, the upgrade stops and an error is returned about the underlying issue. Resolve the issue then re-run the process again. A summary is returned when the upgrade is finished. To ensure continuous transforms remain running during a major version upgrade of the cluster – for example, from 7.16 to 8.0 – it is recommended to upgrade transforms before upgrading the cluster. You may want to perform a recent cluster backup prior to the upgrade. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-upgrade-transforms | Elasticsearch API documentation} + */ + async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest, options?: TransportRequestOptions): Promise + async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['transform.upgrade_transforms'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_transform/_upgrade' + const meta: TransportRequestMetadata = { + name: 'transform.upgrade_transforms', + acceptedParams: [ + 'dry_run', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/api/update.ts b/src/api/api/update.ts new file mode 100644 index 000000000..5711f8f9f --- /dev/null +++ b/src/api/api/update.ts @@ -0,0 +1,144 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + update: { + path: [ + 'id', + 'index' + ], + body: [ + 'detect_noop', + 'doc', + 'doc_as_upsert', + 'script', + 'scripted_upsert', + '_source', + 'upsert' + ], + query: [ + 'if_primary_term', + 'if_seq_no', + 'include_source_on_error', + 'lang', + 'refresh', + 'require_alias', + 'retry_on_conflict', + 'routing', + 'timeout', + 'wait_for_active_shards', + '_source', + '_source_excludes', + '_source_includes' + ] + } +} + +/** + * Update a document. Update a document by running a script or passing a partial document. If the Elasticsearch security features are enabled, you must have the `index` or `write` index privilege for the target index or index alias. The script can update, delete, or skip modifying the document. The API also supports passing a partial document, which is merged into the existing document. To fully replace an existing document, use the index API. This operation: * Gets the document (collocated with the shard) from the index. * Runs the specified script. * Indexes the result. The document must still be reindexed, but using this API removes some network roundtrips and reduces chances of version conflicts between the GET and the index operation. The `_source` field must be enabled to use this API. In addition to `_source`, you can access the following variables through the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the current timestamp). For usage examples such as partial updates, upserts, and scripted updates, see the External documentation. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update | Elasticsearch API documentation} + */ +export default async function UpdateApi (this: That, params: T.UpdateRequest, options?: TransportRequestOptionsWithOutMeta): Promise> +export default async function UpdateApi (this: That, params: T.UpdateRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> +export default async function UpdateApi (this: That, params: T.UpdateRequest, options?: TransportRequestOptions): Promise> +export default async function UpdateApi (this: That, params: T.UpdateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.update + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/${encodeURIComponent(params.index.toString())}/_update/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'update', + pathParts: { + id: params.id, + index: params.index + }, + acceptedParams: [ + 'id', + 'index', + 'detect_noop', + 'doc', + 'doc_as_upsert', + 'script', + 'scripted_upsert', + '_source', + 'upsert', + 'if_primary_term', + 'if_seq_no', + 'include_source_on_error', + 'lang', + 'refresh', + 'require_alias', + 'retry_on_conflict', + 'routing', + 'timeout', + 'wait_for_active_shards', + '_source', + '_source_excludes', + '_source_includes' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) +} diff --git a/src/api/api/update_by_query.ts b/src/api/api/update_by_query.ts new file mode 100644 index 000000000..55e0aaee7 --- /dev/null +++ b/src/api/api/update_by_query.ts @@ -0,0 +1,173 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + update_by_query: { + path: [ + 'index' + ], + body: [ + 'max_docs', + 'query', + 'script', + 'slice', + 'conflicts' + ], + query: [ + 'allow_no_indices', + 'analyzer', + 'analyze_wildcard', + 'conflicts', + 'default_operator', + 'df', + 'expand_wildcards', + 'from', + 'ignore_unavailable', + 'lenient', + 'max_docs', + 'pipeline', + 'preference', + 'q', + 'refresh', + 'request_cache', + 'requests_per_second', + 'routing', + 'scroll', + 'scroll_size', + 'search_timeout', + 'search_type', + 'slices', + 'sort', + 'stats', + 'terminate_after', + 'timeout', + 'version', + 'version_type', + 'wait_for_active_shards', + 'wait_for_completion' + ] + } +} + +/** + * Update documents. Updates documents that match the specified query. If no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias: * `read` * `index` or `write` You can specify the query criteria in the request URI or the request body using the same syntax as the search API. When you submit an update by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and updates matching documents using internal versioning. When the versions match, the document is updated and the version number is incremented. If a document changes between the time that the snapshot is taken and the update operation is processed, it results in a version conflict and the operation fails. You can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`. Note that if you opt to count version conflicts, the operation could attempt to update more documents from the source than `max_docs` until it has successfully updated `max_docs` documents or it has gone through every document in the source query. NOTE: Documents with a version equal to 0 cannot be updated using update by query because internal versioning does not support 0 as a valid version number. While processing an update by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents. A bulk update request is performed for each batch of matching documents. Any query or update failures cause the update by query request to fail and the failures are shown in the response. Any update requests that completed successfully still stick, they are not rolled back. **Refreshing shards** Specifying the `refresh` parameter refreshes all shards once the request completes. This is different to the update API's `refresh` parameter, which causes only the shard that received the request to be refreshed. Unlike the update API, it does not support `wait_for`. **Running update by query asynchronously** If the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a [task](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks) you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. **Waiting for active shards** `wait_for_active_shards` controls how many copies of a shard must be active before proceeding with the request. See [`wait_for_active_shards`](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create#operation-create-wait_for_active_shards) for details. `timeout` controls how long each write request waits for unavailable shards to become available. Both work exactly the way they work in the [Bulk API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk). Update by query uses scrolled searches, so you can also specify the `scroll` parameter to control how long it keeps the search context alive, for example `?scroll=10m`. The default is 5 minutes. **Throttling update requests** To control the rate at which update by query issues batches of update operations, you can set `requests_per_second` to any positive decimal number. This pads each batch with a wait time to throttle the rate. Set `requests_per_second` to `-1` to turn off throttling. Throttling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account. The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. By default the batch size is 1000, so if `requests_per_second` is set to `500`: ``` target_time = 1000 / 500 per second = 2 seconds wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds ``` Since the batch is issued as a single _bulk request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. This is "bursty" instead of "smooth". **Slicing** Update by query supports sliced scroll to parallelize the update process. This can improve efficiency and provide a convenient way to break the request down into smaller parts. Setting `slices` to `auto` chooses a reasonable number for most data streams and indices. This setting will use one slice per shard, up to a certain limit. If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards. Adding `slices` to `_update_by_query` just automates the manual process of creating sub-requests, which means it has some quirks: * You can see these requests in the tasks APIs. These sub-requests are "child" tasks of the task for the request with slices. * Fetching the status of the task for the request with `slices` only contains the status of completed slices. * These sub-requests are individually addressable for things like cancellation and rethrottling. * Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. * Canceling the request with slices will cancel each sub-request. * Due to the nature of slices each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. * Parameters like `requests_per_second` and `max_docs` on a request with slices are distributed proportionally to each sub-request. Combine that with the point above about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being updated. * Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time. If you're slicing manually or otherwise tuning automatic slicing, keep in mind that: * Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many slices hurts performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead. * Update performance scales linearly across available resources with the number of slices. Whether query or update performance dominates the runtime depends on the documents being reindexed and cluster resources. Refer to the linked documentation for examples of how to update documents using the `_update_by_query` API: + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query | Elasticsearch API documentation} + */ +export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest, options?: TransportRequestOptions): Promise +export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.update_by_query + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/${encodeURIComponent(params.index.toString())}/_update_by_query` + const meta: TransportRequestMetadata = { + name: 'update_by_query', + pathParts: { + index: params.index + }, + acceptedParams: [ + 'index', + 'max_docs', + 'query', + 'script', + 'slice', + 'conflicts', + 'allow_no_indices', + 'analyzer', + 'analyze_wildcard', + 'conflicts', + 'default_operator', + 'df', + 'expand_wildcards', + 'from', + 'ignore_unavailable', + 'lenient', + 'max_docs', + 'pipeline', + 'preference', + 'q', + 'refresh', + 'request_cache', + 'requests_per_second', + 'routing', + 'scroll', + 'scroll_size', + 'search_timeout', + 'search_type', + 'slices', + 'sort', + 'stats', + 'terminate_after', + 'timeout', + 'version', + 'version_type', + 'wait_for_active_shards', + 'wait_for_completion' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) +} diff --git a/src/api/api/update_by_query_rethrottle.ts b/src/api/api/update_by_query_rethrottle.ts new file mode 100644 index 000000000..b239b5157 --- /dev/null +++ b/src/api/api/update_by_query_rethrottle.ts @@ -0,0 +1,88 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + update_by_query_rethrottle: { + path: [ + 'task_id' + ], + body: [], + query: [ + 'requests_per_second' + ] + } +} + +/** + * Throttle an update by query operation. Change the number of requests per second for a particular update by query operation. Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query-rethrottle | Elasticsearch API documentation} + */ +export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest, options?: TransportRequestOptionsWithMeta): Promise> +export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest, options?: TransportRequestOptions): Promise +export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = acceptedParams.update_by_query_rethrottle + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = `/_update_by_query/${encodeURIComponent(params.task_id.toString())}/_rethrottle` + const meta: TransportRequestMetadata = { + name: 'update_by_query_rethrottle', + pathParts: { + task_id: params.task_id + }, + acceptedParams: [ + 'task_id', + 'requests_per_second' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) +} diff --git a/src/api/api/watcher.ts b/src/api/api/watcher.ts new file mode 100644 index 000000000..dca5bb1eb --- /dev/null +++ b/src/api/api/watcher.ts @@ -0,0 +1,884 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import { kAcceptedParams } from '../../client' + +interface That { + transport: Transport + [kAcceptedParams]: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +export default class Watcher { + transport: Transport + [kAcceptedParams]: Record + constructor (transport: Transport) { + this.transport = transport + this[kAcceptedParams] = { + 'watcher.ack_watch': { + path: [ + 'watch_id', + 'action_id' + ], + body: [], + query: [] + }, + 'watcher.activate_watch': { + path: [ + 'watch_id' + ], + body: [], + query: [] + }, + 'watcher.deactivate_watch': { + path: [ + 'watch_id' + ], + body: [], + query: [] + }, + 'watcher.delete_watch': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'watcher.execute_watch': { + path: [ + 'id' + ], + body: [ + 'action_modes', + 'alternative_input', + 'ignore_condition', + 'record_execution', + 'simulated_actions', + 'trigger_data', + 'watch' + ], + query: [ + 'debug' + ] + }, + 'watcher.get_settings': { + path: [], + body: [], + query: [ + 'master_timeout' + ] + }, + 'watcher.get_watch': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'watcher.put_watch': { + path: [ + 'id' + ], + body: [ + 'actions', + 'condition', + 'input', + 'metadata', + 'throttle_period', + 'throttle_period_in_millis', + 'transform', + 'trigger' + ], + query: [ + 'active', + 'if_primary_term', + 'if_seq_no', + 'version' + ] + }, + 'watcher.query_watches': { + path: [], + body: [ + 'from', + 'size', + 'query', + 'sort', + 'search_after' + ], + query: [] + }, + 'watcher.start': { + path: [], + body: [], + query: [ + 'master_timeout' + ] + }, + 'watcher.stats': { + path: [ + 'metric' + ], + body: [], + query: [ + 'emit_stacktraces', + 'metric' + ] + }, + 'watcher.stop': { + path: [], + body: [], + query: [ + 'master_timeout' + ] + }, + 'watcher.update_settings': { + path: [], + body: [ + 'index.auto_expand_replicas', + 'index.number_of_replicas' + ], + query: [ + 'master_timeout', + 'timeout' + ] + } + } + } + + /** + * Acknowledge a watch. Acknowledging a watch enables you to manually throttle the execution of the watch's actions. The acknowledgement state of an action is stored in the `status.actions..ack.state` structure. IMPORTANT: If the specified watch is currently being executed, this API will return an error The reason for this behavior is to prevent overwriting the watch status from a watch execution. Acknowledging an action throttles further executions of that action until its `ack.state` is reset to `awaits_successful_execution`. This happens when the condition of the watch is not met (the condition evaluates to false). To demonstrate how throttling works in practice and how it can be configured for individual actions within a watch, refer to External documentation. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-ack-watch | Elasticsearch API documentation} + */ + async ackWatch (this: That, params: T.WatcherAckWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async ackWatch (this: That, params: T.WatcherAckWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> + async ackWatch (this: That, params: T.WatcherAckWatchRequest, options?: TransportRequestOptions): Promise + async ackWatch (this: That, params: T.WatcherAckWatchRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['watcher.ack_watch'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.watch_id != null && params.action_id != null) { + method = 'PUT' + path = `/_watcher/watch/${encodeURIComponent(params.watch_id.toString())}/_ack/${encodeURIComponent(params.action_id.toString())}` + } else { + method = 'PUT' + path = `/_watcher/watch/${encodeURIComponent(params.watch_id.toString())}/_ack` + } + const meta: TransportRequestMetadata = { + name: 'watcher.ack_watch', + pathParts: { + watch_id: params.watch_id, + action_id: params.action_id + }, + acceptedParams: [ + 'watch_id', + 'action_id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Activate a watch. A watch can be either active or inactive. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-activate-watch | Elasticsearch API documentation} + */ + async activateWatch (this: That, params: T.WatcherActivateWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async activateWatch (this: That, params: T.WatcherActivateWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> + async activateWatch (this: That, params: T.WatcherActivateWatchRequest, options?: TransportRequestOptions): Promise + async activateWatch (this: That, params: T.WatcherActivateWatchRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['watcher.activate_watch'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_watcher/watch/${encodeURIComponent(params.watch_id.toString())}/_activate` + const meta: TransportRequestMetadata = { + name: 'watcher.activate_watch', + pathParts: { + watch_id: params.watch_id + }, + acceptedParams: [ + 'watch_id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Deactivate a watch. A watch can be either active or inactive. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-deactivate-watch | Elasticsearch API documentation} + */ + async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest, options?: TransportRequestOptions): Promise + async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['watcher.deactivate_watch'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_watcher/watch/${encodeURIComponent(params.watch_id.toString())}/_deactivate` + const meta: TransportRequestMetadata = { + name: 'watcher.deactivate_watch', + pathParts: { + watch_id: params.watch_id + }, + acceptedParams: [ + 'watch_id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Delete a watch. When the watch is removed, the document representing the watch in the `.watches` index is gone and it will never be run again. Deleting a watch does not delete any watch execution records related to this watch from the watch history. IMPORTANT: Deleting a watch must be done by using only this API. Do not delete the watch directly from the `.watches` index using the Elasticsearch delete document API When Elasticsearch security features are enabled, make sure no write privileges are granted to anyone for the `.watches` index. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-delete-watch | Elasticsearch API documentation} + */ + async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest, options?: TransportRequestOptions): Promise + async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['watcher.delete_watch'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_watcher/watch/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'watcher.delete_watch', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Run a watch. This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes. For testing and debugging purposes, you also have fine-grained control on how the watch runs. You can run the watch without running all of its actions or alternatively by simulating them. You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after it runs. You can use the run watch API to run watches that are not yet registered by specifying the watch definition inline. This serves as great tool for testing and debugging your watches prior to adding them to Watcher. When Elasticsearch security features are enabled on your cluster, watches are run with the privileges of the user that stored the watches. If your user is allowed to read index `a`, but not index `b`, then the exact same set of rules will apply during execution of a watch. When using the run watch API, the authorization data of the user that called the API will be used as a base, instead of the information who stored the watch. Refer to the external documentation for examples of watch execution requests, including existing, customized, and inline watches. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-execute-watch | Elasticsearch API documentation} + */ + async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> + async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest, options?: TransportRequestOptions): Promise + async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['watcher.execute_watch'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + let method = '' + let path = '' + if (params.id != null) { + method = 'PUT' + path = `/_watcher/watch/${encodeURIComponent(params.id.toString())}/_execute` + } else { + method = 'PUT' + path = '/_watcher/watch/_execute' + } + const meta: TransportRequestMetadata = { + name: 'watcher.execute_watch', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id', + 'action_modes', + 'alternative_input', + 'ignore_condition', + 'record_execution', + 'simulated_actions', + 'trigger_data', + 'watch', + 'debug' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get Watcher index settings. Get settings for the Watcher internal index (`.watches`). Only a subset of settings are shown, for example `index.auto_expand_replicas` and `index.number_of_replicas`. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-get-settings | Elasticsearch API documentation} + */ + async getSettings (this: That, params?: T.WatcherGetSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getSettings (this: That, params?: T.WatcherGetSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getSettings (this: That, params?: T.WatcherGetSettingsRequest, options?: TransportRequestOptions): Promise + async getSettings (this: That, params?: T.WatcherGetSettingsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['watcher.get_settings'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_watcher/settings' + const meta: TransportRequestMetadata = { + name: 'watcher.get_settings', + acceptedParams: [ + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get a watch. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-get-watch | Elasticsearch API documentation} + */ + async getWatch (this: That, params: T.WatcherGetWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getWatch (this: That, params: T.WatcherGetWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getWatch (this: That, params: T.WatcherGetWatchRequest, options?: TransportRequestOptions): Promise + async getWatch (this: That, params: T.WatcherGetWatchRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['watcher.get_watch'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_watcher/watch/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'watcher.get_watch', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create or update a watch. When a watch is registered, a new document that represents the watch is added to the `.watches` index and its trigger is immediately registered with the relevant trigger engine. Typically for the `schedule` trigger, the scheduler is the trigger engine. IMPORTANT: You must use Kibana or this API to create a watch. Do not add a watch directly to the `.watches` index by using the Elasticsearch index API. If Elasticsearch security features are enabled, do not give users write privileges on the `.watches` index. When you add a watch you can also define its initial active state by setting the *active* parameter. When Elasticsearch security features are enabled, your watch can index or search only on indices for which the user that stored the watch has privileges. If the user is able to read index `a`, but not index `b`, the same will apply when the watch runs. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-put-watch | Elasticsearch API documentation} + */ + async putWatch (this: That, params: T.WatcherPutWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putWatch (this: That, params: T.WatcherPutWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putWatch (this: That, params: T.WatcherPutWatchRequest, options?: TransportRequestOptions): Promise + async putWatch (this: That, params: T.WatcherPutWatchRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['watcher.put_watch'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_watcher/watch/${encodeURIComponent(params.id.toString())}` + const meta: TransportRequestMetadata = { + name: 'watcher.put_watch', + pathParts: { + id: params.id + }, + acceptedParams: [ + 'id', + 'actions', + 'condition', + 'input', + 'metadata', + 'throttle_period', + 'throttle_period_in_millis', + 'transform', + 'trigger', + 'active', + 'if_primary_term', + 'if_seq_no', + 'version' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Query watches. Get all registered watches in a paginated manner and optionally filter watches by a query. Note that only the `_id` and `metadata.*` fields are queryable or sortable. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-query-watches | Elasticsearch API documentation} + */ + async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest, options?: TransportRequestOptionsWithMeta): Promise> + async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest, options?: TransportRequestOptions): Promise + async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['watcher.query_watches'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = body != null ? 'POST' : 'GET' + const path = '/_watcher/_query/watches' + const meta: TransportRequestMetadata = { + name: 'watcher.query_watches', + acceptedParams: [ + 'from', + 'size', + 'query', + 'sort', + 'search_after' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Start the watch service. Start the Watcher service if it is not already running. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-start | Elasticsearch API documentation} + */ + async start (this: That, params?: T.WatcherStartRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async start (this: That, params?: T.WatcherStartRequest, options?: TransportRequestOptionsWithMeta): Promise> + async start (this: That, params?: T.WatcherStartRequest, options?: TransportRequestOptions): Promise + async start (this: That, params?: T.WatcherStartRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['watcher.start'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_watcher/_start' + const meta: TransportRequestMetadata = { + name: 'watcher.start', + acceptedParams: [ + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get Watcher statistics. This API always returns basic metrics. You retrieve more metrics by using the metric parameter. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-stats | Elasticsearch API documentation} + */ + async stats (this: That, params?: T.WatcherStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stats (this: That, params?: T.WatcherStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stats (this: That, params?: T.WatcherStatsRequest, options?: TransportRequestOptions): Promise + async stats (this: That, params?: T.WatcherStatsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['watcher.stats'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.metric != null) { + method = 'GET' + path = `/_watcher/stats/${encodeURIComponent(params.metric.toString())}` + } else { + method = 'GET' + path = '/_watcher/stats' + } + const meta: TransportRequestMetadata = { + name: 'watcher.stats', + pathParts: { + metric: params.metric + }, + acceptedParams: [ + 'metric', + 'emit_stacktraces', + 'metric' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Stop the watch service. Stop the Watcher service if it is running. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-stop | Elasticsearch API documentation} + */ + async stop (this: That, params?: T.WatcherStopRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async stop (this: That, params?: T.WatcherStopRequest, options?: TransportRequestOptionsWithMeta): Promise> + async stop (this: That, params?: T.WatcherStopRequest, options?: TransportRequestOptions): Promise + async stop (this: That, params?: T.WatcherStopRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['watcher.stop'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_watcher/_stop' + const meta: TransportRequestMetadata = { + name: 'watcher.stop', + acceptedParams: [ + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Update Watcher index settings. Update settings for the Watcher internal index (`.watches`). Only a subset of settings can be modified. This includes `index.auto_expand_replicas`, `index.number_of_replicas`, `index.routing.allocation.exclude.*`, `index.routing.allocation.include.*` and `index.routing.allocation.require.*`. Modification of `index.routing.allocation.include._tier_preference` is an exception and is not allowed as the Watcher shards must always be in the `data_content` tier. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-update-settings | Elasticsearch API documentation} + */ + async updateSettings (this: That, params?: T.WatcherUpdateSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateSettings (this: That, params?: T.WatcherUpdateSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateSettings (this: That, params?: T.WatcherUpdateSettingsRequest, options?: TransportRequestOptions): Promise + async updateSettings (this: That, params?: T.WatcherUpdateSettingsRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this[kAcceptedParams]['watcher.update_settings'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = '/_watcher/settings' + const meta: TransportRequestMetadata = { + name: 'watcher.update_settings', + acceptedParams: [ + 'index.auto_expand_replicas', + 'index.number_of_replicas', + 'master_timeout', + 'timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/api/xpack.ts b/src/api/api/xpack.ts new file mode 100644 index 000000000..1c14a1314 --- /dev/null +++ b/src/api/api/xpack.ts @@ -0,0 +1,149 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import { kAcceptedParams } from '../../client' + +interface That { + transport: Transport + [kAcceptedParams]: Record +} + +export default class Xpack { + transport: Transport + [kAcceptedParams]: Record + constructor (transport: Transport) { + this.transport = transport + this[kAcceptedParams] = { + 'xpack.info': { + path: [], + body: [], + query: [ + 'categories', + 'accept_enterprise', + 'human' + ] + }, + 'xpack.usage': { + path: [], + body: [], + query: [ + 'master_timeout' + ] + } + } + } + + /** + * Get information. The information provided by the API includes: * Build information including the build number and timestamp. * License information about the currently installed license. * Feature information for the features that are currently enabled and available under the current license. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-info | Elasticsearch API documentation} + */ + async info (this: That, params?: T.XpackInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async info (this: That, params?: T.XpackInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> + async info (this: That, params?: T.XpackInfoRequest, options?: TransportRequestOptions): Promise + async info (this: That, params?: T.XpackInfoRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['xpack.info'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_xpack' + const meta: TransportRequestMetadata = { + name: 'xpack.info', + acceptedParams: [ + 'categories', + 'accept_enterprise', + 'human' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Get usage information. Get information about the features that are currently enabled and available under the current license. The API also provides some usage statistics. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-xpack | Elasticsearch API documentation} + */ + async usage (this: That, params?: T.XpackUsageRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async usage (this: That, params?: T.XpackUsageRequest, options?: TransportRequestOptionsWithMeta): Promise> + async usage (this: That, params?: T.XpackUsageRequest, options?: TransportRequestOptions): Promise + async usage (this: That, params?: T.XpackUsageRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = this[kAcceptedParams]['xpack.usage'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_xpack/usage' + const meta: TransportRequestMetadata = { + name: 'xpack.usage', + acceptedParams: [ + 'master_timeout' + ] + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/index.ts b/src/api/index.ts new file mode 100644 index 000000000..65ffbd45e --- /dev/null +++ b/src/api/index.ts @@ -0,0 +1,507 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import AsyncSearchApi from './api/async_search' +import AutoscalingApi from './api/autoscaling' +import bulkApi from './api/bulk' +import capabilitiesApi from './api/capabilities' +import CatApi from './api/cat' +import CcrApi from './api/ccr' +import clearScrollApi from './api/clear_scroll' +import closePointInTimeApi from './api/close_point_in_time' +import ClusterApi from './api/cluster' +import ConnectorApi from './api/connector' +import countApi from './api/count' +import createApi from './api/create' +import DanglingIndicesApi from './api/dangling_indices' +import deleteApi from './api/delete' +import deleteByQueryApi from './api/delete_by_query' +import deleteByQueryRethrottleApi from './api/delete_by_query_rethrottle' +import deleteScriptApi from './api/delete_script' +import EnrichApi from './api/enrich' +import EqlApi from './api/eql' +import EsqlApi from './api/esql' +import existsApi from './api/exists' +import existsSourceApi from './api/exists_source' +import explainApi from './api/explain' +import FeaturesApi from './api/features' +import fieldCapsApi from './api/field_caps' +import FleetApi from './api/fleet' +import getApi from './api/get' +import getScriptApi from './api/get_script' +import getScriptContextApi from './api/get_script_context' +import getScriptLanguagesApi from './api/get_script_languages' +import getSourceApi from './api/get_source' +import GraphApi from './api/graph' +import healthReportApi from './api/health_report' +import IlmApi from './api/ilm' +import indexApi from './api/index' +import IndicesApi from './api/indices' +import InferenceApi from './api/inference' +import infoApi from './api/info' +import IngestApi from './api/ingest' +import knnSearchApi from './api/knn_search' +import LicenseApi from './api/license' +import LogstashApi from './api/logstash' +import mgetApi from './api/mget' +import MigrationApi from './api/migration' +import MlApi from './api/ml' +import MonitoringApi from './api/monitoring' +import msearchApi from './api/msearch' +import msearchTemplateApi from './api/msearch_template' +import mtermvectorsApi from './api/mtermvectors' +import NodesApi from './api/nodes' +import openPointInTimeApi from './api/open_point_in_time' +import pingApi from './api/ping' +import ProfilingApi from './api/profiling' +import ProjectApi from './api/project' +import putScriptApi from './api/put_script' +import QueryRulesApi from './api/query_rules' +import rankEvalApi from './api/rank_eval' +import reindexApi from './api/reindex' +import reindexRethrottleApi from './api/reindex_rethrottle' +import renderSearchTemplateApi from './api/render_search_template' +import RollupApi from './api/rollup' +import scriptsPainlessExecuteApi from './api/scripts_painless_execute' +import scrollApi from './api/scroll' +import searchApi from './api/search' +import SearchApplicationApi from './api/search_application' +import searchMvtApi from './api/search_mvt' +import searchShardsApi from './api/search_shards' +import searchTemplateApi from './api/search_template' +import SearchableSnapshotsApi from './api/searchable_snapshots' +import SecurityApi from './api/security' +import ShutdownApi from './api/shutdown' +import SimulateApi from './api/simulate' +import SlmApi from './api/slm' +import SnapshotApi from './api/snapshot' +import SqlApi from './api/sql' +import SslApi from './api/ssl' +import StreamsApi from './api/streams' +import SynonymsApi from './api/synonyms' +import TasksApi from './api/tasks' +import termsEnumApi from './api/terms_enum' +import termvectorsApi from './api/termvectors' +import TextStructureApi from './api/text_structure' +import TransformApi from './api/transform' +import updateApi from './api/update' +import updateByQueryApi from './api/update_by_query' +import updateByQueryRethrottleApi from './api/update_by_query_rethrottle' +import WatcherApi from './api/watcher' +import XpackApi from './api/xpack' + +export default interface API { + new(): API + asyncSearch: AsyncSearchApi + autoscaling: AutoscalingApi + bulk: typeof bulkApi + capabilities: typeof capabilitiesApi + cat: CatApi + ccr: CcrApi + clearScroll: typeof clearScrollApi + closePointInTime: typeof closePointInTimeApi + cluster: ClusterApi + connector: ConnectorApi + count: typeof countApi + create: typeof createApi + danglingIndices: DanglingIndicesApi + delete: typeof deleteApi + deleteByQuery: typeof deleteByQueryApi + deleteByQueryRethrottle: typeof deleteByQueryRethrottleApi + deleteScript: typeof deleteScriptApi + enrich: EnrichApi + eql: EqlApi + esql: EsqlApi + exists: typeof existsApi + existsSource: typeof existsSourceApi + explain: typeof explainApi + features: FeaturesApi + fieldCaps: typeof fieldCapsApi + fleet: FleetApi + get: typeof getApi + getScript: typeof getScriptApi + getScriptContext: typeof getScriptContextApi + getScriptLanguages: typeof getScriptLanguagesApi + getSource: typeof getSourceApi + graph: GraphApi + healthReport: typeof healthReportApi + ilm: IlmApi + index: typeof indexApi + indices: IndicesApi + inference: InferenceApi + info: typeof infoApi + ingest: IngestApi + knnSearch: typeof knnSearchApi + license: LicenseApi + logstash: LogstashApi + mget: typeof mgetApi + migration: MigrationApi + ml: MlApi + monitoring: MonitoringApi + msearch: typeof msearchApi + msearchTemplate: typeof msearchTemplateApi + mtermvectors: typeof mtermvectorsApi + nodes: NodesApi + openPointInTime: typeof openPointInTimeApi + ping: typeof pingApi + profiling: ProfilingApi + project: ProjectApi + putScript: typeof putScriptApi + queryRules: QueryRulesApi + rankEval: typeof rankEvalApi + reindex: typeof reindexApi + reindexRethrottle: typeof reindexRethrottleApi + renderSearchTemplate: typeof renderSearchTemplateApi + rollup: RollupApi + scriptsPainlessExecute: typeof scriptsPainlessExecuteApi + scroll: typeof scrollApi + search: typeof searchApi + searchApplication: SearchApplicationApi + searchMvt: typeof searchMvtApi + searchShards: typeof searchShardsApi + searchTemplate: typeof searchTemplateApi + searchableSnapshots: SearchableSnapshotsApi + security: SecurityApi + shutdown: ShutdownApi + simulate: SimulateApi + slm: SlmApi + snapshot: SnapshotApi + sql: SqlApi + ssl: SslApi + streams: StreamsApi + synonyms: SynonymsApi + tasks: TasksApi + termsEnum: typeof termsEnumApi + termvectors: typeof termvectorsApi + textStructure: TextStructureApi + transform: TransformApi + update: typeof updateApi + updateByQuery: typeof updateByQueryApi + updateByQueryRethrottle: typeof updateByQueryRethrottleApi + watcher: WatcherApi + xpack: XpackApi +} + +const kAsyncSearch = Symbol('AsyncSearch') +const kAutoscaling = Symbol('Autoscaling') +const kCat = Symbol('Cat') +const kCcr = Symbol('Ccr') +const kCluster = Symbol('Cluster') +const kConnector = Symbol('Connector') +const kDanglingIndices = Symbol('DanglingIndices') +const kEnrich = Symbol('Enrich') +const kEql = Symbol('Eql') +const kEsql = Symbol('Esql') +const kFeatures = Symbol('Features') +const kFleet = Symbol('Fleet') +const kGraph = Symbol('Graph') +const kIlm = Symbol('Ilm') +const kIndices = Symbol('Indices') +const kInference = Symbol('Inference') +const kIngest = Symbol('Ingest') +const kLicense = Symbol('License') +const kLogstash = Symbol('Logstash') +const kMigration = Symbol('Migration') +const kMl = Symbol('Ml') +const kMonitoring = Symbol('Monitoring') +const kNodes = Symbol('Nodes') +const kProfiling = Symbol('Profiling') +const kProject = Symbol('Project') +const kQueryRules = Symbol('QueryRules') +const kRollup = Symbol('Rollup') +const kSearchApplication = Symbol('SearchApplication') +const kSearchableSnapshots = Symbol('SearchableSnapshots') +const kSecurity = Symbol('Security') +const kShutdown = Symbol('Shutdown') +const kSimulate = Symbol('Simulate') +const kSlm = Symbol('Slm') +const kSnapshot = Symbol('Snapshot') +const kSql = Symbol('Sql') +const kSsl = Symbol('Ssl') +const kStreams = Symbol('Streams') +const kSynonyms = Symbol('Synonyms') +const kTasks = Symbol('Tasks') +const kTextStructure = Symbol('TextStructure') +const kTransform = Symbol('Transform') +const kWatcher = Symbol('Watcher') +const kXpack = Symbol('Xpack') + +export default class API { + [kAsyncSearch]: symbol | null + [kAutoscaling]: symbol | null + [kCat]: symbol | null + [kCcr]: symbol | null + [kCluster]: symbol | null + [kConnector]: symbol | null + [kDanglingIndices]: symbol | null + [kEnrich]: symbol | null + [kEql]: symbol | null + [kEsql]: symbol | null + [kFeatures]: symbol | null + [kFleet]: symbol | null + [kGraph]: symbol | null + [kIlm]: symbol | null + [kIndices]: symbol | null + [kInference]: symbol | null + [kIngest]: symbol | null + [kLicense]: symbol | null + [kLogstash]: symbol | null + [kMigration]: symbol | null + [kMl]: symbol | null + [kMonitoring]: symbol | null + [kNodes]: symbol | null + [kProfiling]: symbol | null + [kProject]: symbol | null + [kQueryRules]: symbol | null + [kRollup]: symbol | null + [kSearchApplication]: symbol | null + [kSearchableSnapshots]: symbol | null + [kSecurity]: symbol | null + [kShutdown]: symbol | null + [kSimulate]: symbol | null + [kSlm]: symbol | null + [kSnapshot]: symbol | null + [kSql]: symbol | null + [kSsl]: symbol | null + [kStreams]: symbol | null + [kSynonyms]: symbol | null + [kTasks]: symbol | null + [kTextStructure]: symbol | null + [kTransform]: symbol | null + [kWatcher]: symbol | null + [kXpack]: symbol | null + constructor () { + this[kAsyncSearch] = null + this[kAutoscaling] = null + this[kCat] = null + this[kCcr] = null + this[kCluster] = null + this[kConnector] = null + this[kDanglingIndices] = null + this[kEnrich] = null + this[kEql] = null + this[kEsql] = null + this[kFeatures] = null + this[kFleet] = null + this[kGraph] = null + this[kIlm] = null + this[kIndices] = null + this[kInference] = null + this[kIngest] = null + this[kLicense] = null + this[kLogstash] = null + this[kMigration] = null + this[kMl] = null + this[kMonitoring] = null + this[kNodes] = null + this[kProfiling] = null + this[kProject] = null + this[kQueryRules] = null + this[kRollup] = null + this[kSearchApplication] = null + this[kSearchableSnapshots] = null + this[kSecurity] = null + this[kShutdown] = null + this[kSimulate] = null + this[kSlm] = null + this[kSnapshot] = null + this[kSql] = null + this[kSsl] = null + this[kStreams] = null + this[kSynonyms] = null + this[kTasks] = null + this[kTextStructure] = null + this[kTransform] = null + this[kWatcher] = null + this[kXpack] = null + } +} + +API.prototype.bulk = bulkApi +API.prototype.capabilities = capabilitiesApi +API.prototype.clearScroll = clearScrollApi +API.prototype.closePointInTime = closePointInTimeApi +API.prototype.count = countApi +API.prototype.create = createApi +API.prototype.delete = deleteApi +API.prototype.deleteByQuery = deleteByQueryApi +API.prototype.deleteByQueryRethrottle = deleteByQueryRethrottleApi +API.prototype.deleteScript = deleteScriptApi +API.prototype.exists = existsApi +API.prototype.existsSource = existsSourceApi +API.prototype.explain = explainApi +API.prototype.fieldCaps = fieldCapsApi +API.prototype.get = getApi +API.prototype.getScript = getScriptApi +API.prototype.getScriptContext = getScriptContextApi +API.prototype.getScriptLanguages = getScriptLanguagesApi +API.prototype.getSource = getSourceApi +API.prototype.healthReport = healthReportApi +API.prototype.index = indexApi +API.prototype.info = infoApi +API.prototype.knnSearch = knnSearchApi +API.prototype.mget = mgetApi +API.prototype.msearch = msearchApi +API.prototype.msearchTemplate = msearchTemplateApi +API.prototype.mtermvectors = mtermvectorsApi +API.prototype.openPointInTime = openPointInTimeApi +API.prototype.ping = pingApi +API.prototype.putScript = putScriptApi +API.prototype.rankEval = rankEvalApi +API.prototype.reindex = reindexApi +API.prototype.reindexRethrottle = reindexRethrottleApi +API.prototype.renderSearchTemplate = renderSearchTemplateApi +API.prototype.scriptsPainlessExecute = scriptsPainlessExecuteApi +API.prototype.scroll = scrollApi +API.prototype.search = searchApi +API.prototype.searchMvt = searchMvtApi +API.prototype.searchShards = searchShardsApi +API.prototype.searchTemplate = searchTemplateApi +API.prototype.termsEnum = termsEnumApi +API.prototype.termvectors = termvectorsApi +API.prototype.update = updateApi +API.prototype.updateByQuery = updateByQueryApi +API.prototype.updateByQueryRethrottle = updateByQueryRethrottleApi + +Object.defineProperties(API.prototype, { + asyncSearch: { + get () { return this[kAsyncSearch] === null ? (this[kAsyncSearch] = new AsyncSearchApi(this.transport)) : this[kAsyncSearch] } + }, + autoscaling: { + get () { return this[kAutoscaling] === null ? (this[kAutoscaling] = new AutoscalingApi(this.transport)) : this[kAutoscaling] } + }, + cat: { + get () { return this[kCat] === null ? (this[kCat] = new CatApi(this.transport)) : this[kCat] } + }, + ccr: { + get () { return this[kCcr] === null ? (this[kCcr] = new CcrApi(this.transport)) : this[kCcr] } + }, + cluster: { + get () { return this[kCluster] === null ? (this[kCluster] = new ClusterApi(this.transport)) : this[kCluster] } + }, + connector: { + get () { return this[kConnector] === null ? (this[kConnector] = new ConnectorApi(this.transport)) : this[kConnector] } + }, + danglingIndices: { + get () { return this[kDanglingIndices] === null ? (this[kDanglingIndices] = new DanglingIndicesApi(this.transport)) : this[kDanglingIndices] } + }, + enrich: { + get () { return this[kEnrich] === null ? (this[kEnrich] = new EnrichApi(this.transport)) : this[kEnrich] } + }, + eql: { + get () { return this[kEql] === null ? (this[kEql] = new EqlApi(this.transport)) : this[kEql] } + }, + esql: { + get () { return this[kEsql] === null ? (this[kEsql] = new EsqlApi(this.transport)) : this[kEsql] } + }, + features: { + get () { return this[kFeatures] === null ? (this[kFeatures] = new FeaturesApi(this.transport)) : this[kFeatures] } + }, + fleet: { + get () { return this[kFleet] === null ? (this[kFleet] = new FleetApi(this.transport)) : this[kFleet] } + }, + graph: { + get () { return this[kGraph] === null ? (this[kGraph] = new GraphApi(this.transport)) : this[kGraph] } + }, + ilm: { + get () { return this[kIlm] === null ? (this[kIlm] = new IlmApi(this.transport)) : this[kIlm] } + }, + indices: { + get () { return this[kIndices] === null ? (this[kIndices] = new IndicesApi(this.transport)) : this[kIndices] } + }, + inference: { + get () { return this[kInference] === null ? (this[kInference] = new InferenceApi(this.transport)) : this[kInference] } + }, + ingest: { + get () { return this[kIngest] === null ? (this[kIngest] = new IngestApi(this.transport)) : this[kIngest] } + }, + license: { + get () { return this[kLicense] === null ? (this[kLicense] = new LicenseApi(this.transport)) : this[kLicense] } + }, + logstash: { + get () { return this[kLogstash] === null ? (this[kLogstash] = new LogstashApi(this.transport)) : this[kLogstash] } + }, + migration: { + get () { return this[kMigration] === null ? (this[kMigration] = new MigrationApi(this.transport)) : this[kMigration] } + }, + ml: { + get () { return this[kMl] === null ? (this[kMl] = new MlApi(this.transport)) : this[kMl] } + }, + monitoring: { + get () { return this[kMonitoring] === null ? (this[kMonitoring] = new MonitoringApi(this.transport)) : this[kMonitoring] } + }, + nodes: { + get () { return this[kNodes] === null ? (this[kNodes] = new NodesApi(this.transport)) : this[kNodes] } + }, + profiling: { + get () { return this[kProfiling] === null ? (this[kProfiling] = new ProfilingApi(this.transport)) : this[kProfiling] } + }, + project: { + get () { return this[kProject] === null ? (this[kProject] = new ProjectApi(this.transport)) : this[kProject] } + }, + queryRules: { + get () { return this[kQueryRules] === null ? (this[kQueryRules] = new QueryRulesApi(this.transport)) : this[kQueryRules] } + }, + rollup: { + get () { return this[kRollup] === null ? (this[kRollup] = new RollupApi(this.transport)) : this[kRollup] } + }, + searchApplication: { + get () { return this[kSearchApplication] === null ? (this[kSearchApplication] = new SearchApplicationApi(this.transport)) : this[kSearchApplication] } + }, + searchableSnapshots: { + get () { return this[kSearchableSnapshots] === null ? (this[kSearchableSnapshots] = new SearchableSnapshotsApi(this.transport)) : this[kSearchableSnapshots] } + }, + security: { + get () { return this[kSecurity] === null ? (this[kSecurity] = new SecurityApi(this.transport)) : this[kSecurity] } + }, + shutdown: { + get () { return this[kShutdown] === null ? (this[kShutdown] = new ShutdownApi(this.transport)) : this[kShutdown] } + }, + simulate: { + get () { return this[kSimulate] === null ? (this[kSimulate] = new SimulateApi(this.transport)) : this[kSimulate] } + }, + slm: { + get () { return this[kSlm] === null ? (this[kSlm] = new SlmApi(this.transport)) : this[kSlm] } + }, + snapshot: { + get () { return this[kSnapshot] === null ? (this[kSnapshot] = new SnapshotApi(this.transport)) : this[kSnapshot] } + }, + sql: { + get () { return this[kSql] === null ? (this[kSql] = new SqlApi(this.transport)) : this[kSql] } + }, + ssl: { + get () { return this[kSsl] === null ? (this[kSsl] = new SslApi(this.transport)) : this[kSsl] } + }, + streams: { + get () { return this[kStreams] === null ? (this[kStreams] = new StreamsApi(this.transport)) : this[kStreams] } + }, + synonyms: { + get () { return this[kSynonyms] === null ? (this[kSynonyms] = new SynonymsApi(this.transport)) : this[kSynonyms] } + }, + tasks: { + get () { return this[kTasks] === null ? (this[kTasks] = new TasksApi(this.transport)) : this[kTasks] } + }, + textStructure: { + get () { return this[kTextStructure] === null ? (this[kTextStructure] = new TextStructureApi(this.transport)) : this[kTextStructure] } + }, + transform: { + get () { return this[kTransform] === null ? (this[kTransform] = new TransformApi(this.transport)) : this[kTransform] } + }, + watcher: { + get () { return this[kWatcher] === null ? (this[kWatcher] = new WatcherApi(this.transport)) : this[kWatcher] } + }, + xpack: { + get () { return this[kXpack] === null ? (this[kXpack] = new XpackApi(this.transport)) : this[kXpack] } + } +}) diff --git a/src/api/types.ts b/src/api/types.ts new file mode 100644 index 000000000..a5657a4f8 --- /dev/null +++ b/src/api/types.ts @@ -0,0 +1,40378 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable @typescript-eslint/array-type */ +/* eslint-disable @typescript-eslint/no-empty-interface */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +/** + * We are still working on this type, it will arrive soon. + * If it's critical for you, please open an issue. + * https://github.com/elastic/elasticsearch-js + */ +export type TODO = Record + +export interface BulkCreateOperation extends BulkWriteOperation { +} + +export interface BulkDeleteOperation extends BulkOperationBase { +} + +export type BulkFailureStoreStatus = 'not_applicable_or_unknown' | 'used' | 'not_enabled' | 'failed' + +export interface BulkIndexOperation extends BulkWriteOperation { +} + +export interface BulkOperationBase { + /** The document ID. */ + _id?: Id + /** The name of the index or index alias to perform the action on. */ + _index?: IndexName + /** A custom value used to route operations to a specific shard. */ + routing?: Routing + if_primary_term?: long + if_seq_no?: SequenceNumber + version?: VersionNumber + version_type?: VersionType +} + +export interface BulkOperationContainer { + /** Index the specified document. + * If the document exists, it replaces the document and increments the version. + * The following line must contain the source data to be indexed. */ + index?: BulkIndexOperation + /** Index the specified document if it does not already exist. + * The following line must contain the source data to be indexed. */ + create?: BulkCreateOperation + /** Perform a partial document update. + * The following line must contain the partial document and update options. */ + update?: BulkUpdateOperation + /** Remove the specified document from the index. */ + delete?: BulkDeleteOperation +} + +export type BulkOperationType = 'index' | 'create' | 'update' | 'delete' + +export interface BulkRequest extends RequestBase { + /** The name of the data stream, index, or index alias to perform bulk actions on. */ + index?: IndexName + /** True or false if to include the document source in the error message in case of parsing errors. */ + include_source_on_error?: boolean + /** If `true`, the response will include the ingest pipelines that were run for each index or create. */ + list_executed_pipelines?: boolean + /** The pipeline identifier to use to preprocess incoming documents. + * If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. + * If a final pipeline is configured, it will always run regardless of the value of this parameter. */ + pipeline?: string + /** If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. + * If `wait_for`, wait for a refresh to make this operation visible to search. + * If `false`, do nothing with refreshes. + * Valid values: `true`, `false`, `wait_for`. */ + refresh?: Refresh + /** A custom value that is used to route operations to a specific shard. */ + routing?: Routing + /** Indicates whether to return the `_source` field (`true` or `false`) or contains a list of fields to return. */ + _source?: SearchSourceConfigParam + /** A comma-separated list of source fields to exclude from the response. + * You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. + * If the `_source` parameter is `false`, this parameter is ignored. */ + _source_excludes?: Fields + /** A comma-separated list of source fields to include in the response. + * If this parameter is specified, only these source fields are returned. + * You can exclude fields from this subset using the `_source_excludes` query parameter. + * If the `_source` parameter is `false`, this parameter is ignored. */ + _source_includes?: Fields + /** The period each action waits for the following operations: automatic index creation, dynamic mapping updates, and waiting for active shards. + * The default is `1m` (one minute), which guarantees Elasticsearch waits for at least the timeout before failing. + * The actual wait time could be longer, particularly when multiple waits occur. */ + timeout?: Duration + /** The number of shard copies that must be active before proceeding with the operation. + * Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + * The default is `1`, which waits for each primary shard to be active. */ + wait_for_active_shards?: WaitForActiveShards + /** If `true`, the request's actions must target an index alias. */ + require_alias?: boolean + /** If `true`, the request's actions must target a data stream (existing or to be created). */ + require_data_stream?: boolean + operations?: (BulkOperationContainer | BulkUpdateAction | TDocument)[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, include_source_on_error?: never, list_executed_pipelines?: never, pipeline?: never, refresh?: never, routing?: never, _source?: never, _source_excludes?: never, _source_includes?: never, timeout?: never, wait_for_active_shards?: never, require_alias?: never, require_data_stream?: never, operations?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, include_source_on_error?: never, list_executed_pipelines?: never, pipeline?: never, refresh?: never, routing?: never, _source?: never, _source_excludes?: never, _source_includes?: never, timeout?: never, wait_for_active_shards?: never, require_alias?: never, require_data_stream?: never, operations?: never } +} + +export interface BulkResponse { + /** If `true`, one or more of the operations in the bulk request did not complete successfully. */ + errors: boolean + /** The result of each operation in the bulk request, in the order they were submitted. */ + items: Partial>[] + /** The length of time, in milliseconds, it took to process the bulk request. */ + took: long + ingest_took?: long +} + +export interface BulkResponseItem { + /** The document ID associated with the operation. */ + _id?: string | null + /** The name of the index associated with the operation. + * If the operation targeted a data stream, this is the backing index into which the document was written. */ + _index: string + /** The HTTP status code returned for the operation. */ + status: integer + failure_store?: BulkFailureStoreStatus + /** Additional information about the failed operation. + * The property is returned only for failed operations. */ + error?: ErrorCause + /** The primary term assigned to the document for the operation. + * This property is returned only for successful operations. */ + _primary_term?: long + /** The result of the operation. + * Successful values are `created`, `deleted`, and `updated`. */ + result?: string + /** The sequence number assigned to the document for the operation. + * Sequence numbers are used to ensure an older version of a document doesn't overwrite a newer version. */ + _seq_no?: SequenceNumber + /** Shard information for the operation. */ + _shards?: ShardStatistics + /** The document version associated with the operation. + * The document version is incremented each time the document is updated. + * This property is returned only for successful actions. */ + _version?: VersionNumber + forced_refresh?: boolean + get?: InlineGet> +} + +export interface BulkUpdateAction { + /** If true, the `result` in the response is set to 'noop' when no changes to the document occur. */ + detect_noop?: boolean + /** A partial update to an existing document. */ + doc?: TPartialDocument + /** Set to `true` to use the contents of `doc` as the value of `upsert`. */ + doc_as_upsert?: boolean + /** The script to run to update the document. */ + script?: Script | ScriptSource + /** Set to `true` to run the script whether or not the document exists. */ + scripted_upsert?: boolean + /** If `false`, source retrieval is turned off. + * You can also specify a comma-separated list of the fields you want to retrieve. */ + _source?: SearchSourceConfig + /** If the document does not already exist, the contents of `upsert` are inserted as a new document. + * If the document exists, the `script` is run. */ + upsert?: TDocument +} + +export interface BulkUpdateOperation extends BulkOperationBase { + /** If `true`, the request's actions must target an index alias. */ + require_alias?: boolean + /** The number of times an update should be retried in the case of a version conflict. */ + retry_on_conflict?: integer +} + +export interface BulkWriteOperation extends BulkOperationBase { + /** A map from the full name of fields to the name of dynamic templates. + * It defaults to an empty map. + * If a name matches a dynamic template, that template will be applied regardless of other match predicates defined in the template. + * If a field is already defined in the mapping, then this parameter won't be used. */ + dynamic_templates?: Record + /** The ID of the pipeline to use to preprocess incoming documents. + * If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. + * If a final pipeline is configured, it will always run regardless of the value of this parameter. */ + pipeline?: string + /** If `true`, the request's actions must target an index alias. */ + require_alias?: boolean +} + +export interface ClearScrollRequest extends RequestBase { + /** A comma-separated list of scroll IDs to clear. + * To clear all scroll IDs, use `_all`. + * IMPORTANT: Scroll IDs can be long. It is recommended to specify scroll IDs in the request body parameter. */ + scroll_id?: ScrollIds + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { scroll_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { scroll_id?: never } +} + +export interface ClearScrollResponse { + /** If `true`, the request succeeded. + * This does not indicate whether any scrolling search requests were cleared. */ + succeeded: boolean + /** The number of scrolling search requests cleared. */ + num_freed: integer +} + +export interface ClosePointInTimeRequest extends RequestBase { + /** The ID of the point-in-time. */ + id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } +} + +export interface ClosePointInTimeResponse { + /** If `true`, all search contexts associated with the point-in-time ID were successfully closed. */ + succeeded: boolean + /** The number of search contexts that were successfully closed. */ + num_freed: integer +} + +export interface CountRequest extends RequestBase { + /** A comma-separated list of data streams, indices, and aliases to search. + * It supports wildcards (`*`). + * To search all data streams and indices, omit this parameter or use `*` or `_all`. */ + index?: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. + * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ + allow_no_indices?: boolean + /** The analyzer to use for the query string. + * This parameter can be used only when the `q` query string parameter is specified. */ + analyzer?: string + /** If `true`, wildcard and prefix queries are analyzed. + * This parameter can be used only when the `q` query string parameter is specified. */ + analyze_wildcard?: boolean + /** The default operator for query string query: `and` or `or`. + * This parameter can be used only when the `q` query string parameter is specified. */ + default_operator?: QueryDslOperator + /** The field to use as a default when no field prefix is given in the query string. + * This parameter can be used only when the `q` query string parameter is specified. */ + df?: string + /** The type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * It supports comma-separated values, such as `open,hidden`. */ + expand_wildcards?: ExpandWildcards + /** If `true`, concrete, expanded, or aliased indices are ignored when frozen. */ + ignore_throttled?: boolean + /** If `false`, the request returns an error if it targets a missing or closed index. */ + ignore_unavailable?: boolean + /** If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. + * This parameter can be used only when the `q` query string parameter is specified. */ + lenient?: boolean + /** The minimum `_score` value that documents must have to be included in the result. */ + min_score?: double + /** The node or shard the operation should be performed on. + * By default, it is random. */ + preference?: string + /** Specifies a subset of projects to target for the search using project + * metadata tags in a subset of Lucene query syntax. + * Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). + * Examples: + * _alias:my-project + * _alias:_origin + * _alias:*pr* + * Supported in serverless only. */ + project_routing?: ProjectRouting + /** A custom value used to route operations to a specific shard. */ + routing?: Routing + /** The maximum number of documents to collect for each shard. + * If a query reaches this limit, Elasticsearch terminates the query early. + * Elasticsearch collects documents before sorting. + * + * IMPORTANT: Use with caution. + * Elasticsearch applies this parameter to each shard handling the request. + * When possible, let Elasticsearch perform early termination automatically. + * Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. */ + terminate_after?: long + /** The query in Lucene query string syntax. This parameter cannot be used with a request body. */ + q?: string + /** Defines the search query using Query DSL. A request body query cannot be used + * with the `q` query string parameter. */ + query?: QueryDslQueryContainer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, analyzer?: never, analyze_wildcard?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, lenient?: never, min_score?: never, preference?: never, project_routing?: never, routing?: never, terminate_after?: never, q?: never, query?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, analyzer?: never, analyze_wildcard?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, lenient?: never, min_score?: never, preference?: never, project_routing?: never, routing?: never, terminate_after?: never, q?: never, query?: never } +} + +export interface CountResponse { + count: long + _shards: ShardStatistics +} + +export interface CreateRequest extends RequestBase { + /** A unique identifier for the document. + * To automatically generate a document ID, use the `POST //_doc/` request format. */ + id: Id + /** The name of the data stream or index to target. + * If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. + * If the target doesn't exist and doesn’t match a data stream template, this request creates the index. */ + index: IndexName + /** True or false if to include the document source in the error message in case of parsing errors. */ + include_source_on_error?: boolean + /** The ID of the pipeline to use to preprocess incoming documents. + * If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. + * If a final pipeline is configured, it will always run regardless of the value of this parameter. */ + pipeline?: string + /** If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. + * If `wait_for`, it waits for a refresh to make this operation visible to search. + * If `false`, it does nothing with refreshes. */ + refresh?: Refresh + /** If `true`, the destination must be an index alias. */ + require_alias?: boolean + /** If `true`, the request's actions must target a data stream (existing or to be created). */ + require_data_stream?: boolean + /** A custom value that is used to route operations to a specific shard. */ + routing?: Routing + /** The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. + * Elasticsearch waits for at least the specified timeout period before failing. + * The actual wait time could be longer, particularly when multiple waits occur. + * + * This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. + * Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. + * By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. + * The actual wait time could be longer, particularly when multiple waits occur. */ + timeout?: Duration + /** The explicit version number for concurrency control. + * It must be a non-negative long number. */ + version?: VersionNumber + /** The version type. */ + version_type?: VersionType + /** The number of shard copies that must be active before proceeding with the operation. + * You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + * The default value of `1` means it waits for each primary shard to be active. */ + wait_for_active_shards?: WaitForActiveShards + document?: TDocument + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, index?: never, include_source_on_error?: never, pipeline?: never, refresh?: never, require_alias?: never, require_data_stream?: never, routing?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never, document?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, index?: never, include_source_on_error?: never, pipeline?: never, refresh?: never, require_alias?: never, require_data_stream?: never, routing?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never, document?: never } +} + +export type CreateResponse = WriteResponseBase + +export interface DeleteRequest extends RequestBase { + /** A unique identifier for the document. */ + id: Id + /** The name of the target index. */ + index: IndexName + /** Only perform the operation if the document has this primary term. */ + if_primary_term?: long + /** Only perform the operation if the document has this sequence number. */ + if_seq_no?: SequenceNumber + /** If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. + * If `wait_for`, it waits for a refresh to make this operation visible to search. + * If `false`, it does nothing with refreshes. */ + refresh?: Refresh + /** A custom value used to route operations to a specific shard. */ + routing?: Routing + /** The period to wait for active shards. + * + * This parameter is useful for situations where the primary shard assigned to perform the delete operation might not be available when the delete operation runs. + * Some reasons for this might be that the primary shard is currently recovering from a store or undergoing relocation. + * By default, the delete operation will wait on the primary shard to become available for up to 1 minute before failing and responding with an error. */ + timeout?: Duration + /** An explicit version number for concurrency control. + * It must match the current version of the document for the request to succeed. */ + version?: VersionNumber + /** The version type. */ + version_type?: VersionType + /** The minimum number of shard copies that must be active before proceeding with the operation. + * You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + * The default value of `1` means it waits for each primary shard to be active. */ + wait_for_active_shards?: WaitForActiveShards + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, index?: never, if_primary_term?: never, if_seq_no?: never, refresh?: never, routing?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, index?: never, if_primary_term?: never, if_seq_no?: never, refresh?: never, routing?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never } +} + +export type DeleteResponse = WriteResponseBase + +export interface DeleteByQueryRequest extends RequestBase { + /** A comma-separated list of data streams, indices, and aliases to search. + * It supports wildcards (`*`). + * To search all data streams or indices, omit this parameter or use `*` or `_all`. */ + index: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. + * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ + allow_no_indices?: boolean + /** Analyzer to use for the query string. + * This parameter can be used only when the `q` query string parameter is specified. */ + analyzer?: string + /** If `true`, wildcard and prefix queries are analyzed. + * This parameter can be used only when the `q` query string parameter is specified. */ + analyze_wildcard?: boolean + /** What to do if delete by query hits version conflicts: `abort` or `proceed`. */ + conflicts?: Conflicts + /** The default operator for query string query: `and` or `or`. + * This parameter can be used only when the `q` query string parameter is specified. */ + default_operator?: QueryDslOperator + /** The field to use as default where no field prefix is given in the query string. + * This parameter can be used only when the `q` query string parameter is specified. */ + df?: string + /** The type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * It supports comma-separated values, such as `open,hidden`. */ + expand_wildcards?: ExpandWildcards + /** Skips the specified number of documents. */ + from?: long + /** If `false`, the request returns an error if it targets a missing or closed index. */ + ignore_unavailable?: boolean + /** If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. + * This parameter can be used only when the `q` query string parameter is specified. */ + lenient?: boolean + /** The node or shard the operation should be performed on. + * It is random by default. */ + preference?: string + /** If `true`, Elasticsearch refreshes all shards involved in the delete by query after the request completes. + * This is different than the delete API's `refresh` parameter, which causes just the shard that received the delete request to be refreshed. + * Unlike the delete API, it does not support `wait_for`. */ + refresh?: boolean + /** If `true`, the request cache is used for this request. + * Defaults to the index-level setting. */ + request_cache?: boolean + /** The throttle for this request in sub-requests per second. */ + requests_per_second?: float + /** A custom value used to route operations to a specific shard. */ + routing?: Routing + /** A query in the Lucene query string syntax. */ + q?: string + /** The period to retain the search context for scrolling. */ + scroll?: Duration + /** The size of the scroll request that powers the operation. */ + scroll_size?: long + /** The explicit timeout for each search request. + * It defaults to no timeout. */ + search_timeout?: Duration + /** The type of the search operation. + * Available options include `query_then_fetch` and `dfs_query_then_fetch`. */ + search_type?: SearchType + /** The number of slices this task should be divided into. */ + slices?: Slices + /** The specific `tag` of the request for logging and statistical purposes. */ + stats?: string[] + /** The maximum number of documents to collect for each shard. + * If a query reaches this limit, Elasticsearch terminates the query early. + * Elasticsearch collects documents before sorting. + * + * Use with caution. + * Elasticsearch applies this parameter to each shard handling the request. + * When possible, let Elasticsearch perform early termination automatically. + * Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. */ + terminate_after?: long + /** The period each deletion request waits for active shards. */ + timeout?: Duration + /** If `true`, returns the document version as part of a hit. */ + version?: boolean + /** The number of shard copies that must be active before proceeding with the operation. + * Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + * The `timeout` value controls how long each write request waits for unavailable shards to become available. */ + wait_for_active_shards?: WaitForActiveShards + /** If `true`, the request blocks until the operation is complete. + * If `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. When you are done with a task, you should delete the task document so Elasticsearch can reclaim the space. */ + wait_for_completion?: boolean + /** The maximum number of documents to delete. */ + max_docs?: long + /** The documents to delete specified with Query DSL. */ + query?: QueryDslQueryContainer + /** Slice the request manually using the provided slice ID and total number of slices. */ + slice?: SlicedScroll + /** A sort object that specifies the order of deleted documents. */ + sort?: Sort + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, analyzer?: never, analyze_wildcard?: never, conflicts?: never, default_operator?: never, df?: never, expand_wildcards?: never, from?: never, ignore_unavailable?: never, lenient?: never, preference?: never, refresh?: never, request_cache?: never, requests_per_second?: never, routing?: never, q?: never, scroll?: never, scroll_size?: never, search_timeout?: never, search_type?: never, slices?: never, stats?: never, terminate_after?: never, timeout?: never, version?: never, wait_for_active_shards?: never, wait_for_completion?: never, max_docs?: never, query?: never, slice?: never, sort?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, analyzer?: never, analyze_wildcard?: never, conflicts?: never, default_operator?: never, df?: never, expand_wildcards?: never, from?: never, ignore_unavailable?: never, lenient?: never, preference?: never, refresh?: never, request_cache?: never, requests_per_second?: never, routing?: never, q?: never, scroll?: never, scroll_size?: never, search_timeout?: never, search_type?: never, slices?: never, stats?: never, terminate_after?: never, timeout?: never, version?: never, wait_for_active_shards?: never, wait_for_completion?: never, max_docs?: never, query?: never, slice?: never, sort?: never } +} + +export interface DeleteByQueryResponse { + /** The number of scroll responses pulled back by the delete by query. */ + batches?: long + /** The number of documents that were successfully deleted. */ + deleted?: long + /** An array of failures if there were any unrecoverable errors during the process. + * If this array is not empty, the request ended abnormally because of those failures. + * Delete by query is implemented using batches and any failures cause the entire process to end but all failures in the current batch are collected into the array. + * You can use the `conflicts` option to prevent reindex from ending on version conflicts. */ + failures?: BulkIndexByScrollFailure[] + /** This field is always equal to zero for delete by query. + * It exists only so that delete by query, update by query, and reindex APIs return responses with the same structure. */ + noops?: long + /** The number of requests per second effectively run during the delete by query. */ + requests_per_second?: float + /** The number of retries attempted by delete by query. + * `bulk` is the number of bulk actions retried. + * `search` is the number of search actions retried. */ + retries?: Retries + slice_id?: integer + task?: TaskId + throttled?: Duration + /** The number of milliseconds the request slept to conform to `requests_per_second`. */ + throttled_millis?: DurationValue + throttled_until?: Duration + /** This field should always be equal to zero in a `_delete_by_query` response. + * It has meaning only when using the task API, where it indicates the next time (in milliseconds since epoch) a throttled request will be run again in order to conform to `requests_per_second`. */ + throttled_until_millis?: DurationValue + /** If `true`, some requests run during the delete by query operation timed out. */ + timed_out?: boolean + /** The number of milliseconds from start to end of the whole operation. */ + took?: DurationValue + /** The number of documents that were successfully processed. */ + total?: long + /** The number of version conflicts that the delete by query hit. */ + version_conflicts?: long +} + +export interface DeleteByQueryRethrottleRequest extends RequestBase { + /** The ID for the task. */ + task_id: TaskId + /** The throttle for this request in sub-requests per second. + * To disable throttling, set it to `-1`. */ + requests_per_second?: float + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_id?: never, requests_per_second?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_id?: never, requests_per_second?: never } +} + +export type DeleteByQueryRethrottleResponse = TasksTaskListResponseBase + +export interface DeleteScriptRequest extends RequestBase { + /** The identifier for the stored script or search template. */ + id: Id + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ + master_timeout?: Duration + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never } +} + +export type DeleteScriptResponse = AcknowledgedResponseBase + +export interface ExistsRequest extends RequestBase { + /** A unique document identifier. */ + id: Id + /** A comma-separated list of data streams, indices, and aliases. + * It supports wildcards (`*`). */ + index: IndexName + /** The node or shard the operation should be performed on. + * By default, the operation is randomized between the shard replicas. + * + * If it is set to `_local`, the operation will prefer to be run on a local allocated shard when possible. + * If it is set to a custom value, the value is used to guarantee that the same shards will be used for the same custom value. + * This can help with "jumping values" when hitting different shards in different refresh states. + * A sample value can be something like the web session ID or the user name. */ + preference?: string + /** If `true`, the request is real-time as opposed to near-real-time. */ + realtime?: boolean + /** If `true`, the request refreshes the relevant shards before retrieving the document. + * Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). */ + refresh?: boolean + /** A custom value used to route operations to a specific shard. */ + routing?: Routing + /** Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. */ + _source?: SearchSourceConfigParam + /** A comma-separated list of source fields to exclude from the response. + * You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. + * If the `_source` parameter is `false`, this parameter is ignored. */ + _source_excludes?: Fields + /** A comma-separated list of source fields to include in the response. + * If this parameter is specified, only these source fields are returned. + * You can exclude fields from this subset using the `_source_excludes` query parameter. + * If the `_source` parameter is `false`, this parameter is ignored. */ + _source_includes?: Fields + /** A comma-separated list of stored fields to return as part of a hit. + * If no fields are specified, no stored fields are included in the response. + * If this field is specified, the `_source` parameter defaults to `false`. */ + stored_fields?: Fields + /** Explicit version number for concurrency control. + * The specified version must match the current version of the document for the request to succeed. */ + version?: VersionNumber + /** The version type. */ + version_type?: VersionType + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, index?: never, preference?: never, realtime?: never, refresh?: never, routing?: never, _source?: never, _source_excludes?: never, _source_includes?: never, stored_fields?: never, version?: never, version_type?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, index?: never, preference?: never, realtime?: never, refresh?: never, routing?: never, _source?: never, _source_excludes?: never, _source_includes?: never, stored_fields?: never, version?: never, version_type?: never } +} + +export type ExistsResponse = boolean + +export interface ExistsSourceRequest extends RequestBase { + /** A unique identifier for the document. */ + id: Id + /** A comma-separated list of data streams, indices, and aliases. + * It supports wildcards (`*`). */ + index: IndexName + /** The node or shard the operation should be performed on. + * By default, the operation is randomized between the shard replicas. */ + preference?: string + /** If `true`, the request is real-time as opposed to near-real-time. */ + realtime?: boolean + /** If `true`, the request refreshes the relevant shards before retrieving the document. + * Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). */ + refresh?: boolean + /** A custom value used to route operations to a specific shard. */ + routing?: Routing + /** Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. */ + _source?: SearchSourceConfigParam + /** A comma-separated list of source fields to exclude in the response. */ + _source_excludes?: Fields + /** A comma-separated list of source fields to include in the response. */ + _source_includes?: Fields + /** The version number for concurrency control. + * It must match the current version of the document for the request to succeed. */ + version?: VersionNumber + /** The version type. */ + version_type?: VersionType + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, index?: never, preference?: never, realtime?: never, refresh?: never, routing?: never, _source?: never, _source_excludes?: never, _source_includes?: never, version?: never, version_type?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, index?: never, preference?: never, realtime?: never, refresh?: never, routing?: never, _source?: never, _source_excludes?: never, _source_includes?: never, version?: never, version_type?: never } +} + +export type ExistsSourceResponse = boolean + +export interface ExplainExplanation { + description: string + details: ExplainExplanationDetail[] + value: float +} + +export interface ExplainExplanationDetail { + description: string + details?: ExplainExplanationDetail[] + value: float +} + +export interface ExplainRequest extends RequestBase { + /** The document identifier. */ + id: Id + /** Index names that are used to limit the request. + * Only a single index name can be provided to this parameter. */ + index: IndexName + /** The analyzer to use for the query string. + * This parameter can be used only when the `q` query string parameter is specified. */ + analyzer?: string + /** If `true`, wildcard and prefix queries are analyzed. + * This parameter can be used only when the `q` query string parameter is specified. */ + analyze_wildcard?: boolean + /** The default operator for query string query: `and` or `or`. + * This parameter can be used only when the `q` query string parameter is specified. */ + default_operator?: QueryDslOperator + /** The field to use as default where no field prefix is given in the query string. + * This parameter can be used only when the `q` query string parameter is specified. */ + df?: string + /** If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. + * This parameter can be used only when the `q` query string parameter is specified. */ + lenient?: boolean + /** The node or shard the operation should be performed on. + * It is random by default. */ + preference?: string + /** A custom value used to route operations to a specific shard. */ + routing?: Routing + /** `True` or `false` to return the `_source` field or not or a list of fields to return. */ + _source?: SearchSourceConfigParam + /** A comma-separated list of source fields to exclude from the response. + * You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. + * If the `_source` parameter is `false`, this parameter is ignored. */ + _source_excludes?: Fields + /** A comma-separated list of source fields to include in the response. + * If this parameter is specified, only these source fields are returned. + * You can exclude fields from this subset using the `_source_excludes` query parameter. + * If the `_source` parameter is `false`, this parameter is ignored. */ + _source_includes?: Fields + /** A comma-separated list of stored fields to return in the response. */ + stored_fields?: Fields + /** The query in the Lucene query string syntax. */ + q?: string + /** Defines the search definition using the Query DSL. */ + query?: QueryDslQueryContainer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, index?: never, analyzer?: never, analyze_wildcard?: never, default_operator?: never, df?: never, lenient?: never, preference?: never, routing?: never, _source?: never, _source_excludes?: never, _source_includes?: never, stored_fields?: never, q?: never, query?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, index?: never, analyzer?: never, analyze_wildcard?: never, default_operator?: never, df?: never, lenient?: never, preference?: never, routing?: never, _source?: never, _source_excludes?: never, _source_includes?: never, stored_fields?: never, q?: never, query?: never } +} + +export interface ExplainResponse { + _index: IndexName + _id: Id + matched: boolean + explanation?: ExplainExplanationDetail + get?: InlineGet +} + +export interface FieldCapsFieldCapability { + /** Whether this field can be aggregated on all indices. */ + aggregatable: boolean + /** The list of indices where this field has the same type family, or null if all indices have the same type family for the field. */ + indices?: Indices + /** Merged metadata across all indices as a map of string keys to arrays of values. A value length of 1 indicates that all indices had the same value for this key, while a length of 2 or more indicates that not all indices had the same value for this key. */ + meta?: Metadata + /** The list of indices where this field is not aggregatable, or null if all indices have the same definition for the field. */ + non_aggregatable_indices?: Indices + /** The list of indices where this field is not searchable, or null if all indices have the same definition for the field. */ + non_searchable_indices?: Indices + /** Whether this field is indexed for search on all indices. */ + searchable: boolean + type: string + /** Whether this field is registered as a metadata field. */ + metadata_field?: boolean + /** Whether this field is used as a time series dimension. + * @experimental */ + time_series_dimension?: boolean + /** Contains metric type if this fields is used as a time series + * metrics, absent if the field is not used as metric. + * @experimental */ + time_series_metric?: MappingTimeSeriesMetricType + /** If this list is present in response then some indices have the + * field marked as a dimension and other indices, the ones in this list, do not. + * @experimental */ + non_dimension_indices?: IndexName[] + /** The list of indices where this field is present if these indices + * don’t have the same `time_series_metric` value for this field. + * @experimental */ + metric_conflicts_indices?: IndexName[] +} + +export interface FieldCapsRequest extends RequestBase { + /** A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this parameter or use * or _all. */ + index?: Indices + /** If false, the request returns an error if any wildcard expression, index alias, + * or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request + * targeting `foo*,bar*` returns an error if an index starts with foo but no index starts with bar. */ + allow_no_indices?: boolean + /** The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. */ + expand_wildcards?: ExpandWildcards + /** If `true`, missing or closed indices are not included in the response. */ + ignore_unavailable?: boolean + /** If true, unmapped fields are included in the response. */ + include_unmapped?: boolean + /** A comma-separated list of filters to apply to the response. */ + filters?: string | string[] + /** A comma-separated list of field types to include. + * Any fields that do not match one of these types will be excluded from the results. + * It defaults to empty, meaning that all field types are returned. */ + types?: string[] + /** If false, empty fields are not included in the response. */ + include_empty_fields?: boolean + /** Specifies a subset of projects to target for the field-caps query using project + * metadata tags in a subset of Lucene query syntax. + * Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). + * Examples: + * _alias:my-project + * _alias:_origin + * _alias:*pr* + * Supported in serverless only. */ + project_routing?: ProjectRouting + /** A list of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported. */ + fields?: Fields + /** Filter indices if the provided query rewrites to `match_none` on every shard. + * + * IMPORTANT: The filtering is done on a best-effort basis, it uses index statistics and mappings to rewrite queries to `match_none` instead of fully running the request. + * For instance a range query over a date field can rewrite to `match_none` if all documents within a shard (including deleted documents) are outside of the provided range. + * However, not all queries can rewrite to `match_none` so this API may return an index even if the provided filter matches no document. */ + index_filter?: QueryDslQueryContainer + /** Define ad-hoc runtime fields in the request similar to the way it is done in search requests. + * These fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings. */ + runtime_mappings?: MappingRuntimeFields + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, include_unmapped?: never, filters?: never, types?: never, include_empty_fields?: never, project_routing?: never, fields?: never, index_filter?: never, runtime_mappings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, include_unmapped?: never, filters?: never, types?: never, include_empty_fields?: never, project_routing?: never, fields?: never, index_filter?: never, runtime_mappings?: never } +} + +export interface FieldCapsResponse { + /** The list of indices where this field has the same type family, or null if all indices have the same type family for the field. */ + indices: Indices + fields: Record> +} + +export interface GetGetResult { + /** The name of the index the document belongs to. */ + _index: IndexName + /** If the `stored_fields` parameter is set to `true` and `found` is `true`, it contains the document fields stored in the index. */ + fields?: Record + _ignored?: string[] + /** Indicates whether the document exists. */ + found: boolean + /** The unique identifier for the document. */ + _id: Id + /** The primary term assigned to the document for the indexing operation. */ + _primary_term?: long + /** The explicit routing, if set. */ + _routing?: string + /** The sequence number assigned to the document for the indexing operation. + * Sequence numbers are used to ensure an older version of a document doesn't overwrite a newer version. */ + _seq_no?: SequenceNumber + /** If `found` is `true`, it contains the document data formatted in JSON. + * If the `_source` parameter is set to `false` or the `stored_fields` parameter is set to `true`, it is excluded. */ + _source?: TDocument + /** The document version, which is ncremented each time the document is updated. */ + _version?: VersionNumber +} + +export interface GetRequest extends RequestBase { + /** A unique document identifier. */ + id: Id + /** The name of the index that contains the document. */ + index: IndexName + /** Indicates whether the request forces synthetic `_source`. + * Use this parameter to test if the mapping supports synthetic `_source` and to get a sense of the worst case performance. + * Fetches with this parameter enabled will be slower than enabling synthetic source natively in the index. */ + force_synthetic_source?: boolean + /** The node or shard the operation should be performed on. + * By default, the operation is randomized between the shard replicas. + * + * If it is set to `_local`, the operation will prefer to be run on a local allocated shard when possible. + * If it is set to a custom value, the value is used to guarantee that the same shards will be used for the same custom value. + * This can help with "jumping values" when hitting different shards in different refresh states. + * A sample value can be something like the web session ID or the user name. */ + preference?: string + /** If `true`, the request is real-time as opposed to near-real-time. */ + realtime?: boolean + /** If `true`, the request refreshes the relevant shards before retrieving the document. + * Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). */ + refresh?: boolean + /** A custom value used to route operations to a specific shard. */ + routing?: Routing + /** Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. */ + _source?: SearchSourceConfigParam + /** A comma-separated list of source fields to exclude from the response. + * You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. + * If the `_source` parameter is `false`, this parameter is ignored. */ + _source_excludes?: Fields + /** Whether vectors should be excluded from _source */ + _source_exclude_vectors?: boolean + /** A comma-separated list of source fields to include in the response. + * If this parameter is specified, only these source fields are returned. + * You can exclude fields from this subset using the `_source_excludes` query parameter. + * If the `_source` parameter is `false`, this parameter is ignored. */ + _source_includes?: Fields + /** A comma-separated list of stored fields to return as part of a hit. + * If no fields are specified, no stored fields are included in the response. + * If this field is specified, the `_source` parameter defaults to `false`. + * Only leaf fields can be retrieved with the `stored_fields` option. + * Object fields can't be returned; if specified, the request fails. */ + stored_fields?: Fields + /** The version number for concurrency control. + * It must match the current version of the document for the request to succeed. */ + version?: VersionNumber + /** The version type. */ + version_type?: VersionType + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, index?: never, force_synthetic_source?: never, preference?: never, realtime?: never, refresh?: never, routing?: never, _source?: never, _source_excludes?: never, _source_exclude_vectors?: never, _source_includes?: never, stored_fields?: never, version?: never, version_type?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, index?: never, force_synthetic_source?: never, preference?: never, realtime?: never, refresh?: never, routing?: never, _source?: never, _source_excludes?: never, _source_exclude_vectors?: never, _source_includes?: never, stored_fields?: never, version?: never, version_type?: never } +} + +export type GetResponse = GetGetResult + +export interface GetScriptRequest extends RequestBase { + /** The identifier for the stored script or search template. */ + id: Id + /** The period to wait for the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, master_timeout?: never } +} + +export interface GetScriptResponse { + _id: Id + found: boolean + script?: StoredScript +} + +export interface GetScriptContextContext { + methods: GetScriptContextContextMethod[] + name: Name +} + +export interface GetScriptContextContextMethod { + name: Name + return_type: string + params: GetScriptContextContextMethodParam[] +} + +export interface GetScriptContextContextMethodParam { + name: Name + type: string +} + +export interface GetScriptContextRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } +} + +export interface GetScriptContextResponse { + contexts: GetScriptContextContext[] +} + +export interface GetScriptLanguagesLanguageContext { + contexts: string[] + language: ScriptLanguage +} + +export interface GetScriptLanguagesRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } +} + +export interface GetScriptLanguagesResponse { + language_contexts: GetScriptLanguagesLanguageContext[] + types_allowed: string[] +} + +export interface GetSourceRequest extends RequestBase { + /** A unique document identifier. */ + id: Id + /** The name of the index that contains the document. */ + index: IndexName + /** The node or shard the operation should be performed on. + * By default, the operation is randomized between the shard replicas. */ + preference?: string + /** If `true`, the request is real-time as opposed to near-real-time. */ + realtime?: boolean + /** If `true`, the request refreshes the relevant shards before retrieving the document. + * Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). */ + refresh?: boolean + /** A custom value used to route operations to a specific shard. */ + routing?: Routing + /** Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. */ + _source?: SearchSourceConfigParam + /** A comma-separated list of source fields to exclude in the response. */ + _source_excludes?: Fields + /** A comma-separated list of source fields to include in the response. */ + _source_includes?: Fields + /** The version number for concurrency control. + * It must match the current version of the document for the request to succeed. */ + version?: VersionNumber + /** The version type. */ + version_type?: VersionType + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, index?: never, preference?: never, realtime?: never, refresh?: never, routing?: never, _source?: never, _source_excludes?: never, _source_includes?: never, version?: never, version_type?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, index?: never, preference?: never, realtime?: never, refresh?: never, routing?: never, _source?: never, _source_excludes?: never, _source_includes?: never, version?: never, version_type?: never } +} + +export type GetSourceResponse = TDocument + +export interface HealthReportBaseIndicator { + status: HealthReportIndicatorHealthStatus + symptom: string + impacts?: HealthReportImpact[] + diagnosis?: HealthReportDiagnosis[] +} + +export interface HealthReportDataStreamLifecycleDetails { + stagnating_backing_indices_count: integer + total_backing_indices_in_error: integer + stagnating_backing_indices?: HealthReportStagnatingBackingIndices[] +} + +export interface HealthReportDataStreamLifecycleIndicator extends HealthReportBaseIndicator { + details?: HealthReportDataStreamLifecycleDetails +} + +export interface HealthReportDiagnosis { + id: string + action: string + affected_resources: HealthReportDiagnosisAffectedResources + cause: string + help_url: string +} + +export interface HealthReportDiagnosisAffectedResources { + indices?: Indices + nodes?: HealthReportIndicatorNode[] + slm_policies?: string[] + feature_states?: string[] + snapshot_repositories?: string[] +} + +export interface HealthReportDiskIndicator extends HealthReportBaseIndicator { + details?: HealthReportDiskIndicatorDetails +} + +export interface HealthReportDiskIndicatorDetails { + indices_with_readonly_block: long + nodes_with_enough_disk_space: long + nodes_over_high_watermark: long + nodes_over_flood_stage_watermark: long + nodes_with_unknown_disk_status: long +} + +export interface HealthReportFileSettingsIndicator extends HealthReportBaseIndicator { + details?: HealthReportFileSettingsIndicatorDetails +} + +export interface HealthReportFileSettingsIndicatorDetails { + failure_streak: long + most_recent_failure: string +} + +export interface HealthReportIlmIndicator extends HealthReportBaseIndicator { + details?: HealthReportIlmIndicatorDetails +} + +export interface HealthReportIlmIndicatorDetails { + ilm_status: LifecycleOperationMode + policies: long + stagnating_indices: integer +} + +export interface HealthReportImpact { + description: string + id: string + impact_areas: HealthReportImpactArea[] + severity: integer +} + +export type HealthReportImpactArea = 'search' | 'ingest' | 'backup' | 'deployment_management' + +export type HealthReportIndicatorHealthStatus = 'green' | 'yellow' | 'red' | 'unknown' | 'unavailable' + +export interface HealthReportIndicatorNode { + name: string | null + node_id: string | null +} + +export interface HealthReportIndicators { + master_is_stable?: HealthReportMasterIsStableIndicator + shards_availability?: HealthReportShardsAvailabilityIndicator + disk?: HealthReportDiskIndicator + repository_integrity?: HealthReportRepositoryIntegrityIndicator + data_stream_lifecycle?: HealthReportDataStreamLifecycleIndicator + ilm?: HealthReportIlmIndicator + slm?: HealthReportSlmIndicator + shards_capacity?: HealthReportShardsCapacityIndicator + file_settings?: HealthReportFileSettingsIndicator +} + +export interface HealthReportMasterIsStableIndicator extends HealthReportBaseIndicator { + details?: HealthReportMasterIsStableIndicatorDetails +} + +export interface HealthReportMasterIsStableIndicatorClusterFormationNode { + name?: string + node_id: string + cluster_formation_message: string +} + +export interface HealthReportMasterIsStableIndicatorDetails { + current_master: HealthReportIndicatorNode + recent_masters: HealthReportIndicatorNode[] + exception_fetching_history?: HealthReportMasterIsStableIndicatorExceptionFetchingHistory + cluster_formation?: HealthReportMasterIsStableIndicatorClusterFormationNode[] +} + +export interface HealthReportMasterIsStableIndicatorExceptionFetchingHistory { + message: string + stack_trace: string +} + +export interface HealthReportRepositoryIntegrityIndicator extends HealthReportBaseIndicator { + details?: HealthReportRepositoryIntegrityIndicatorDetails +} + +export interface HealthReportRepositoryIntegrityIndicatorDetails { + total_repositories?: long + corrupted_repositories?: long + corrupted?: string[] +} + +export interface HealthReportRequest extends RequestBase { + /** A feature of the cluster, as returned by the top-level health report API. */ + feature?: string | string[] + /** Explicit operation timeout. */ + timeout?: Duration + /** Opt-in for more information about the health of the system. */ + verbose?: boolean + /** Limit the number of affected resources the health report API returns. */ + size?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { feature?: never, timeout?: never, verbose?: never, size?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { feature?: never, timeout?: never, verbose?: never, size?: never } +} + +export interface HealthReportResponse { + cluster_name: string + indicators: HealthReportIndicators + status?: HealthReportIndicatorHealthStatus +} + +export interface HealthReportShardsAvailabilityIndicator extends HealthReportBaseIndicator { + details?: HealthReportShardsAvailabilityIndicatorDetails +} + +export interface HealthReportShardsAvailabilityIndicatorDetails { + creating_primaries: long + creating_replicas: long + initializing_primaries: long + initializing_replicas: long + restarting_primaries: long + restarting_replicas: long + started_primaries: long + started_replicas: long + unassigned_primaries: long + unassigned_replicas: long +} + +export interface HealthReportShardsCapacityIndicator extends HealthReportBaseIndicator { + details?: HealthReportShardsCapacityIndicatorDetails +} + +export interface HealthReportShardsCapacityIndicatorDetails { + data: HealthReportShardsCapacityIndicatorTierDetail + frozen: HealthReportShardsCapacityIndicatorTierDetail +} + +export interface HealthReportShardsCapacityIndicatorTierDetail { + max_shards_in_cluster: integer + current_used_shards?: integer +} + +export interface HealthReportSlmIndicator extends HealthReportBaseIndicator { + details?: HealthReportSlmIndicatorDetails +} + +export interface HealthReportSlmIndicatorDetails { + slm_status: LifecycleOperationMode + policies: long + unhealthy_policies?: HealthReportSlmIndicatorUnhealthyPolicies +} + +export interface HealthReportSlmIndicatorUnhealthyPolicies { + count: long + invocations_since_last_success?: Record +} + +export interface HealthReportStagnatingBackingIndices { + index_name: IndexName + first_occurrence_timestamp: long + retry_count: integer +} + +export interface IndexRequest extends RequestBase { + /** A unique identifier for the document. + * To automatically generate a document ID, use the `POST //_doc/` request format and omit this parameter. */ + id?: Id + /** The name of the data stream or index to target. + * If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. + * If the target doesn't exist and doesn't match a data stream template, this request creates the index. + * You can check for existing targets with the resolve index API. */ + index: IndexName + /** Only perform the operation if the document has this primary term. */ + if_primary_term?: long + /** Only perform the operation if the document has this sequence number. */ + if_seq_no?: SequenceNumber + /** True or false if to include the document source in the error message in case of parsing errors. */ + include_source_on_error?: boolean + /** Set to `create` to only index the document if it does not already exist (put if absent). + * If a document with the specified `_id` already exists, the indexing operation will fail. + * The behavior is the same as using the `/_create` endpoint. + * If a document ID is specified, this paramater defaults to `index`. + * Otherwise, it defaults to `create`. + * If the request targets a data stream, an `op_type` of `create` is required. */ + op_type?: OpType + /** The ID of the pipeline to use to preprocess incoming documents. + * If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. + * If a final pipeline is configured it will always run, regardless of the value of this parameter. */ + pipeline?: string + /** If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. + * If `wait_for`, it waits for a refresh to make this operation visible to search. + * If `false`, it does nothing with refreshes. */ + refresh?: Refresh + /** A custom value that is used to route operations to a specific shard. */ + routing?: Routing + /** The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. + * + * This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. + * Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. + * By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. + * The actual wait time could be longer, particularly when multiple waits occur. */ + timeout?: Duration + /** An explicit version number for concurrency control. + * It must be a non-negative long number. */ + version?: VersionNumber + /** The version type. */ + version_type?: VersionType + /** The number of shard copies that must be active before proceeding with the operation. + * You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + * The default value of `1` means it waits for each primary shard to be active. */ + wait_for_active_shards?: WaitForActiveShards + /** If `true`, the destination must be an index alias. */ + require_alias?: boolean + /** If `true`, the request's actions must target a data stream (existing or to be created). */ + require_data_stream?: boolean + document?: TDocument + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, index?: never, if_primary_term?: never, if_seq_no?: never, include_source_on_error?: never, op_type?: never, pipeline?: never, refresh?: never, routing?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never, require_alias?: never, require_data_stream?: never, document?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, index?: never, if_primary_term?: never, if_seq_no?: never, include_source_on_error?: never, op_type?: never, pipeline?: never, refresh?: never, routing?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never, require_alias?: never, require_data_stream?: never, document?: never } +} + +export type IndexResponse = WriteResponseBase + +export interface InfoRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } +} + +export interface InfoResponse { + /** The responding cluster's name. */ + cluster_name: Name + cluster_uuid: Uuid + /** The responding node's name. */ + name: Name + tagline: string + /** The running version of Elasticsearch. */ + version: ElasticsearchVersionInfo +} + +export interface MgetMultiGetError { + error: ErrorCause + _id: Id + _index: IndexName +} + +export interface MgetOperation { + /** The unique document ID. */ + _id: Id + /** The index that contains the document. */ + _index?: IndexName + /** The key for the primary shard the document resides on. Required if routing is used during indexing. */ + routing?: Routing + /** If `false`, excludes all _source fields. */ + _source?: SearchSourceConfig + /** The stored fields you want to retrieve. */ + stored_fields?: Fields + version?: VersionNumber + version_type?: VersionType +} + +export interface MgetRequest extends RequestBase { + /** Name of the index to retrieve documents from when `ids` are specified, or when a document in the `docs` array does not specify an index. */ + index?: IndexName + /** Should this request force synthetic _source? + * Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. + * Fetches with this enabled will be slower the enabling synthetic source natively in the index. */ + force_synthetic_source?: boolean + /** Specifies the node or shard the operation should be performed on. Random by default. */ + preference?: string + /** If `true`, the request is real-time as opposed to near-real-time. */ + realtime?: boolean + /** If `true`, the request refreshes relevant shards before retrieving documents. */ + refresh?: boolean + /** Custom value used to route operations to a specific shard. */ + routing?: Routing + /** True or false to return the `_source` field or not, or a list of fields to return. */ + _source?: SearchSourceConfigParam + /** A comma-separated list of source fields to exclude from the response. + * You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. */ + _source_excludes?: Fields + /** A comma-separated list of source fields to include in the response. + * If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. + * If the `_source` parameter is `false`, this parameter is ignored. */ + _source_includes?: Fields + /** If `true`, retrieves the document fields stored in the index rather than the document `_source`. */ + stored_fields?: Fields + /** The documents you want to retrieve. Required if no index is specified in the request URI. */ + docs?: MgetOperation[] + /** The IDs of the documents you want to retrieve. Allowed when the index is specified in the request URI. */ + ids?: Ids + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, force_synthetic_source?: never, preference?: never, realtime?: never, refresh?: never, routing?: never, _source?: never, _source_excludes?: never, _source_includes?: never, stored_fields?: never, docs?: never, ids?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, force_synthetic_source?: never, preference?: never, realtime?: never, refresh?: never, routing?: never, _source?: never, _source_excludes?: never, _source_includes?: never, stored_fields?: never, docs?: never, ids?: never } +} + +export interface MgetResponse { + /** The response includes a docs array that contains the documents in the order specified in the request. + * The structure of the returned documents is similar to that returned by the get API. + * If there is a failure getting a particular document, the error is included in place of the document. */ + docs: MgetResponseItem[] +} + +export type MgetResponseItem = GetGetResult | MgetMultiGetError + +export interface MsearchMultiSearchItem extends SearchResponseBody { + status?: integer +} + +export interface MsearchMultiSearchResult> { + took: long + responses: MsearchResponseItem[] +} + +export interface MsearchMultisearchHeader { + allow_no_indices?: boolean + expand_wildcards?: ExpandWildcards + ignore_unavailable?: boolean + index?: Indices + preference?: string + project_routing?: ProjectRouting + request_cache?: boolean + routing?: Routing + search_type?: SearchType + ccs_minimize_roundtrips?: boolean + allow_partial_search_results?: boolean + ignore_throttled?: boolean +} + +export interface MsearchRequest extends RequestBase { + /** Comma-separated list of data streams, indices, and index aliases to search. */ + index?: Indices + /** If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. */ + allow_no_indices?: boolean + /** If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests. */ + ccs_minimize_roundtrips?: boolean + /** Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. */ + expand_wildcards?: ExpandWildcards + /** If true, concrete, expanded or aliased indices are ignored when frozen. */ + ignore_throttled?: boolean + /** If true, missing or closed indices are not included in the response. */ + ignore_unavailable?: boolean + /** Indicates whether hit.matched_queries should be rendered as a map that includes + * the name of the matched query associated with its score (true) + * or as an array containing the name of the matched queries (false) + * This functionality reruns each named query on every hit in a search response. + * Typically, this adds a small overhead to a request. + * However, using computationally expensive named queries on a large number of hits may add significant overhead. */ + include_named_queries_score?: boolean + /** Maximum number of concurrent searches the multi search API can execute. + * Defaults to `max(1, (# of data nodes * min(search thread pool size, 10)))`. */ + max_concurrent_searches?: integer + /** Maximum number of concurrent shard requests that each sub-search request executes per node. */ + max_concurrent_shard_requests?: integer + /** Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint. */ + pre_filter_shard_size?: long + /** Specifies a subset of projects to target for a search using project metadata + * tags in a subset Lucene syntax. Allowed Lucene queries: the _alias tag + * and a single value (possible wildcarded). Examples: + * _alias:my-project + * _alias:_origin + * _alias:*pr* + * Supported in serverless only. */ + project_routing?: ProjectRouting + /** If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object. */ + rest_total_hits_as_int?: boolean + /** Custom routing value used to route search operations to a specific shard. */ + routing?: Routing + /** Indicates whether global term and document frequencies should be used when scoring returned documents. */ + search_type?: SearchType + /** Specifies whether aggregation and suggester names should be prefixed by their respective types in the response. */ + typed_keys?: boolean + searches?: MsearchRequestItem[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, ccs_minimize_roundtrips?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, include_named_queries_score?: never, max_concurrent_searches?: never, max_concurrent_shard_requests?: never, pre_filter_shard_size?: never, project_routing?: never, rest_total_hits_as_int?: never, routing?: never, search_type?: never, typed_keys?: never, searches?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, ccs_minimize_roundtrips?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, include_named_queries_score?: never, max_concurrent_searches?: never, max_concurrent_shard_requests?: never, pre_filter_shard_size?: never, project_routing?: never, rest_total_hits_as_int?: never, routing?: never, search_type?: never, typed_keys?: never, searches?: never } +} + +export type MsearchRequestItem = MsearchMultisearchHeader | SearchSearchRequestBody + +export type MsearchResponse> = MsearchMultiSearchResult + +export type MsearchResponseItem = MsearchMultiSearchItem | ErrorResponseBase + +export interface MsearchTemplateRequest extends RequestBase { + /** A comma-separated list of data streams, indices, and aliases to search. + * It supports wildcards (`*`). + * To search all data streams and indices, omit this parameter or use `*`. */ + index?: Indices + /** If `true`, network round-trips are minimized for cross-cluster search requests. */ + ccs_minimize_roundtrips?: boolean + /** The maximum number of concurrent searches the API can run. */ + max_concurrent_searches?: long + /** Specifies a subset of projects to target for the search using project + * metadata tags in a subset of Lucene query syntax. + * Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). + * Examples: + * _alias:my-project + * _alias:_origin + * _alias:*pr* + * Supported in serverless only. */ + project_routing?: ProjectRouting + /** The type of the search operation. */ + search_type?: SearchType + /** If `true`, the response returns `hits.total` as an integer. + * If `false`, it returns `hits.total` as an object. */ + rest_total_hits_as_int?: boolean + /** If `true`, the response prefixes aggregation and suggester names with their respective types. */ + typed_keys?: boolean + search_templates?: MsearchTemplateRequestItem[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, ccs_minimize_roundtrips?: never, max_concurrent_searches?: never, project_routing?: never, search_type?: never, rest_total_hits_as_int?: never, typed_keys?: never, search_templates?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, ccs_minimize_roundtrips?: never, max_concurrent_searches?: never, project_routing?: never, search_type?: never, rest_total_hits_as_int?: never, typed_keys?: never, search_templates?: never } +} + +export type MsearchTemplateRequestItem = MsearchMultisearchHeader | MsearchTemplateTemplateConfig + +export type MsearchTemplateResponse> = MsearchMultiSearchResult + +export interface MsearchTemplateTemplateConfig { + /** If `true`, returns detailed information about score calculation as part of each hit. */ + explain?: boolean + /** The ID of the search template to use. If no `source` is specified, + * this parameter is required. */ + id?: Id + /** Key-value pairs used to replace Mustache variables in the template. + * The key is the variable name. + * The value is the variable value. */ + params?: Record + /** If `true`, the query execution is profiled. */ + profile?: boolean + /** An inline search template. Supports the same parameters as the search API's + * request body. It also supports Mustache variables. If no `id` is specified, this + * parameter is required. */ + source?: ScriptSource +} + +export interface MtermvectorsOperation { + /** The ID of the document. */ + _id?: Id + /** The index of the document. */ + _index?: IndexName + /** An artificial document (a document not present in the index) for which you want to retrieve term vectors. */ + doc?: any + /** Comma-separated list or wildcard expressions of fields to include in the statistics. + * Used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. */ + fields?: Fields + /** If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies. */ + field_statistics?: boolean + /** Filter terms based on their tf-idf scores. */ + filter?: TermvectorsFilter + /** If `true`, the response includes term offsets. */ + offsets?: boolean + /** If `true`, the response includes term payloads. */ + payloads?: boolean + /** If `true`, the response includes term positions. */ + positions?: boolean + /** Custom value used to route operations to a specific shard. */ + routing?: Routing + /** If true, the response includes term frequency and document frequency. */ + term_statistics?: boolean + /** If `true`, returns the document version as part of a hit. */ + version?: VersionNumber + /** Specific version type. */ + version_type?: VersionType +} + +export interface MtermvectorsRequest extends RequestBase { + /** The name of the index that contains the documents. */ + index?: IndexName + /** A comma-separated list or wildcard expressions of fields to include in the statistics. + * It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. */ + fields?: Fields + /** If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies. */ + field_statistics?: boolean + /** If `true`, the response includes term offsets. */ + offsets?: boolean + /** If `true`, the response includes term payloads. */ + payloads?: boolean + /** If `true`, the response includes term positions. */ + positions?: boolean + /** The node or shard the operation should be performed on. + * It is random by default. */ + preference?: string + /** If true, the request is real-time as opposed to near-real-time. */ + realtime?: boolean + /** A custom value used to route operations to a specific shard. */ + routing?: Routing + /** If true, the response includes term frequency and document frequency. */ + term_statistics?: boolean + /** If `true`, returns the document version as part of a hit. */ + version?: VersionNumber + /** The version type. */ + version_type?: VersionType + /** An array of existing or artificial documents. */ + docs?: MtermvectorsOperation[] + /** A simplified syntax to specify documents by their ID if they're in the same index. */ + ids?: Id[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, fields?: never, field_statistics?: never, offsets?: never, payloads?: never, positions?: never, preference?: never, realtime?: never, routing?: never, term_statistics?: never, version?: never, version_type?: never, docs?: never, ids?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, fields?: never, field_statistics?: never, offsets?: never, payloads?: never, positions?: never, preference?: never, realtime?: never, routing?: never, term_statistics?: never, version?: never, version_type?: never, docs?: never, ids?: never } +} + +export interface MtermvectorsResponse { + docs: MtermvectorsTermVectorsResult[] +} + +export interface MtermvectorsTermVectorsResult { + _id?: Id + _index: IndexName + _version?: VersionNumber + took?: long + found?: boolean + term_vectors?: Record + error?: ErrorCause +} + +export interface OpenPointInTimeRequest extends RequestBase { + /** A comma-separated list of index names to open point in time; use `_all` or empty string to perform the operation on all indices */ + index: Indices + /** Extend the length of time that the point in time persists. */ + keep_alive: Duration + /** If `false`, the request returns an error if it targets a missing or closed index. */ + ignore_unavailable?: boolean + /** The node or shard the operation should be performed on. + * By default, it is random. */ + preference?: string + /** Specifies a subset of projects to target for the PIT request using project + * metadata tags in a subset of Lucene query syntax. + * Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). + * Examples: + * _alias:my-project + * _alias:_origin + * _alias:*pr* + * Supported in serverless only. */ + project_routing?: ProjectRouting + /** A custom value that is used to route operations to a specific shard. */ + routing?: Routing + /** The type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * It supports comma-separated values, such as `open,hidden`. */ + expand_wildcards?: ExpandWildcards + /** Indicates whether the point in time tolerates unavailable shards or shard failures when initially creating the PIT. + * If `false`, creating a point in time request when a shard is missing or unavailable will throw an exception. + * If `true`, the point in time will contain all the shards that are available at the time of the request. */ + allow_partial_search_results?: boolean + /** Maximum number of concurrent shard requests that each sub-search request executes per node. */ + max_concurrent_shard_requests?: integer + /** Filter indices if the provided query rewrites to `match_none` on every shard. */ + index_filter?: QueryDslQueryContainer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, keep_alive?: never, ignore_unavailable?: never, preference?: never, project_routing?: never, routing?: never, expand_wildcards?: never, allow_partial_search_results?: never, max_concurrent_shard_requests?: never, index_filter?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, keep_alive?: never, ignore_unavailable?: never, preference?: never, project_routing?: never, routing?: never, expand_wildcards?: never, allow_partial_search_results?: never, max_concurrent_shard_requests?: never, index_filter?: never } +} + +export interface OpenPointInTimeResponse { + /** Shards used to create the PIT */ + _shards: ShardStatistics + id: Id +} + +export interface PingRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } +} + +export type PingResponse = boolean + +export interface PutScriptRequest extends RequestBase { + /** The identifier for the stored script or search template. + * It must be unique within the cluster. */ + id: Id + /** The context in which the script or search template should run. + * To prevent errors, the API immediately compiles the script or template in this context. */ + context?: Name + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ + master_timeout?: Duration + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ + timeout?: Duration + /** The script or search template, its parameters, and its language. */ + script: StoredScript + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, context?: never, master_timeout?: never, timeout?: never, script?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, context?: never, master_timeout?: never, timeout?: never, script?: never } +} + +export type PutScriptResponse = AcknowledgedResponseBase + +export interface RankEvalDocumentRating { + /** The document ID. */ + _id: Id + /** The document’s index. For data streams, this should be the document’s backing index. */ + _index: IndexName + /** The document’s relevance with regard to this search request. */ + rating: integer +} + +export interface RankEvalRankEvalHit { + _id: Id + _index: IndexName + _score: double +} + +export interface RankEvalRankEvalHitItem { + hit: RankEvalRankEvalHit + rating?: double | null +} + +export interface RankEvalRankEvalMetric { + precision?: RankEvalRankEvalMetricPrecision + recall?: RankEvalRankEvalMetricRecall + mean_reciprocal_rank?: RankEvalRankEvalMetricMeanReciprocalRank + dcg?: RankEvalRankEvalMetricDiscountedCumulativeGain + expected_reciprocal_rank?: RankEvalRankEvalMetricExpectedReciprocalRank +} + +export interface RankEvalRankEvalMetricBase { + /** Sets the maximum number of documents retrieved per query. This value will act in place of the usual size parameter in the query. */ + k?: integer +} + +export interface RankEvalRankEvalMetricDetail { + /** The metric_score in the details section shows the contribution of this query to the global quality metric score */ + metric_score: double + /** The unrated_docs section contains an _index and _id entry for each document in the search result for this query that didn’t have a ratings value. This can be used to ask the user to supply ratings for these documents */ + unrated_docs: RankEvalUnratedDocument[] + /** The hits section shows a grouping of the search results with their supplied ratings */ + hits: RankEvalRankEvalHitItem[] + /** The metric_details give additional information about the calculated quality metric (e.g. how many of the retrieved documents were relevant). The content varies for each metric but allows for better interpretation of the results */ + metric_details: Record> +} + +export interface RankEvalRankEvalMetricDiscountedCumulativeGain extends RankEvalRankEvalMetricBase { + /** If set to true, this metric will calculate the Normalized DCG. */ + normalize?: boolean +} + +export interface RankEvalRankEvalMetricExpectedReciprocalRank extends RankEvalRankEvalMetricBase { + /** The highest relevance grade used in the user-supplied relevance judgments. */ + maximum_relevance: integer +} + +export interface RankEvalRankEvalMetricMeanReciprocalRank extends RankEvalRankEvalMetricRatingTreshold { +} + +export interface RankEvalRankEvalMetricPrecision extends RankEvalRankEvalMetricRatingTreshold { + /** Controls how unlabeled documents in the search results are counted. If set to true, unlabeled documents are ignored and neither count as relevant or irrelevant. Set to false (the default), they are treated as irrelevant. */ + ignore_unlabeled?: boolean +} + +export interface RankEvalRankEvalMetricRatingTreshold extends RankEvalRankEvalMetricBase { + /** Sets the rating threshold above which documents are considered to be "relevant". */ + relevant_rating_threshold?: integer +} + +export interface RankEvalRankEvalMetricRecall extends RankEvalRankEvalMetricRatingTreshold { +} + +export interface RankEvalRankEvalQuery { + query: QueryDslQueryContainer + size?: integer +} + +export interface RankEvalRankEvalRequestItem { + /** The search request’s ID, used to group result details later. */ + id: Id + /** The query being evaluated. */ + request?: RankEvalRankEvalQuery | QueryDslQueryContainer + /** List of document ratings */ + ratings: RankEvalDocumentRating[] + /** The search template Id */ + template_id?: Id + /** The search template parameters. */ + params?: Record +} + +export interface RankEvalRequest extends RequestBase { + /** A comma-separated list of data streams, indices, and index aliases used to limit the request. + * Wildcard (`*`) expressions are supported. + * To target all data streams and indices in a cluster, omit this parameter or use `_all` or `*`. */ + index?: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ + allow_no_indices?: boolean + /** Whether to expand wildcard expression to concrete indices that are open, closed or both. */ + expand_wildcards?: ExpandWildcards + /** If `true`, missing or closed indices are not included in the response. */ + ignore_unavailable?: boolean + /** Search operation type */ + search_type?: string + /** A set of typical search requests, together with their provided ratings. */ + requests: RankEvalRankEvalRequestItem[] + /** Definition of the evaluation metric to calculate. */ + metric?: RankEvalRankEvalMetric + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, search_type?: never, requests?: never, metric?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, search_type?: never, requests?: never, metric?: never } +} + +export interface RankEvalResponse { + /** The overall evaluation quality calculated by the defined metric */ + metric_score: double + /** The details section contains one entry for every query in the original requests section, keyed by the search request id */ + details: Record + failures: Record +} + +export interface RankEvalUnratedDocument { + _id: Id + _index: IndexName +} + +export interface ReindexDestination { + /** The name of the data stream, index, or index alias you are copying to. */ + index: IndexName + /** If it is `create`, the operation will only index documents that do not already exist (also known as "put if absent"). + * + * IMPORTANT: To reindex to a data stream destination, this argument must be `create`. */ + op_type?: OpType + /** The name of the pipeline to use. */ + pipeline?: string + /** By default, a document's routing is preserved unless it's changed by the script. + * If it is `keep`, the routing on the bulk request sent for each match is set to the routing on the match. + * If it is `discard`, the routing on the bulk request sent for each match is set to `null`. + * If it is `=value`, the routing on the bulk request sent for each match is set to all value specified after the equals sign (`=`). */ + routing?: Routing + /** The versioning to use for the indexing operation. */ + version_type?: VersionType +} + +export interface ReindexRemoteSource { + /** The remote connection timeout. */ + connect_timeout?: Duration + /** An object containing the headers of the request. */ + headers?: Record + /** The URL for the remote instance of Elasticsearch that you want to index from. + * This information is required when you're indexing from remote. */ + host: Host + /** The username to use for authentication with the remote host (required when using basic auth). */ + username?: Username + /** The password to use for authentication with the remote host (required when using basic auth). */ + password?: Password + /** The API key to use for authentication with the remote host (as an alternative to basic auth when the remote cluster is in Elastic Cloud). + * (It is not permitted to set this and also to set an `Authorization` header via `headers`.) */ + api_key?: string + /** The remote socket read timeout. */ + socket_timeout?: Duration +} + +export interface ReindexRequest extends RequestBase { + /** If `true`, the request refreshes affected shards to make this operation visible to search. */ + refresh?: boolean + /** The throttle for this request in sub-requests per second. + * By default, there is no throttle. */ + requests_per_second?: float + /** The period of time that a consistent view of the index should be maintained for scrolled search. */ + scroll?: Duration + /** The number of slices this task should be divided into. + * It defaults to one slice, which means the task isn't sliced into subtasks. + * + * Reindex supports sliced scroll to parallelize the reindexing process. + * This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts. + * + * NOTE: Reindexing from remote clusters does not support manual or automatic slicing. + * + * If set to `auto`, Elasticsearch chooses the number of slices to use. + * This setting will use one slice per shard, up to a certain limit. + * If there are multiple sources, it will choose the number of slices based on the index or backing index with the smallest number of shards. */ + slices?: Slices + /** The period each indexing waits for automatic index creation, dynamic mapping updates, and waiting for active shards. + * By default, Elasticsearch waits for at least one minute before failing. + * The actual wait time could be longer, particularly when multiple waits occur. */ + timeout?: Duration + /** The number of shard copies that must be active before proceeding with the operation. + * Set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + * The default value is one, which means it waits for each primary shard to be active. */ + wait_for_active_shards?: WaitForActiveShards + /** If `true`, the request blocks until the operation is complete. */ + wait_for_completion?: boolean + /** If `true`, the destination must be an index alias. */ + require_alias?: boolean + /** Indicates whether to continue reindexing even when there are conflicts. */ + conflicts?: Conflicts + /** The destination you are copying to. */ + dest: ReindexDestination + /** The maximum number of documents to reindex. + * By default, all documents are reindexed. + * If it is a value less then or equal to `scroll_size`, a scroll will not be used to retrieve the results for the operation. + * + * If `conflicts` is set to `proceed`, the reindex operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query. */ + max_docs?: long + /** The script to run to update the document source or metadata when reindexing. */ + script?: Script | ScriptSource + /** The source you are copying from. */ + source: ReindexSource + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { refresh?: never, requests_per_second?: never, scroll?: never, slices?: never, timeout?: never, wait_for_active_shards?: never, wait_for_completion?: never, require_alias?: never, conflicts?: never, dest?: never, max_docs?: never, script?: never, source?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { refresh?: never, requests_per_second?: never, scroll?: never, slices?: never, timeout?: never, wait_for_active_shards?: never, wait_for_completion?: never, require_alias?: never, conflicts?: never, dest?: never, max_docs?: never, script?: never, source?: never } +} + +export interface ReindexResponse { + /** The number of scroll responses that were pulled back by the reindex. */ + batches?: long + /** The number of documents that were successfully created. */ + created?: long + /** The number of documents that were successfully deleted. */ + deleted?: long + /** If there were any unrecoverable errors during the process, it is an array of those failures. + * If this array is not empty, the request ended because of those failures. + * Reindex is implemented using batches and any failure causes the entire process to end but all failures in the current batch are collected into the array. + * You can use the `conflicts` option to prevent the reindex from ending on version conflicts. */ + failures?: BulkIndexByScrollFailure[] + /** The number of documents that were ignored because the script used for the reindex returned a `noop` value for `ctx.op`. */ + noops?: long + /** The number of retries attempted by reindex. */ + retries?: Retries + /** The number of requests per second effectively run during the reindex. */ + requests_per_second?: float + slice_id?: integer + task?: TaskId + /** The number of milliseconds the request slept to conform to `requests_per_second`. */ + throttled_millis?: EpochTime + /** This field should always be equal to zero in a reindex response. + * It has meaning only when using the task API, where it indicates the next time (in milliseconds since epoch) that a throttled request will be run again in order to conform to `requests_per_second`. */ + throttled_until_millis?: EpochTime + /** If any of the requests that ran during the reindex timed out, it is `true`. */ + timed_out?: boolean + /** The total milliseconds the entire operation took. */ + took?: DurationValue + /** The number of documents that were successfully processed. */ + total?: long + /** The number of documents that were successfully updated. + * That is to say, a document with the same ID already existed before the reindex updated it. */ + updated?: long + /** The number of version conflicts that occurred. */ + version_conflicts?: long +} + +export interface ReindexSource { + /** The name of the data stream, index, or alias you are copying from. + * It accepts a comma-separated list to reindex from multiple sources. */ + index: Indices + /** The documents to reindex, which is defined with Query DSL. */ + query?: QueryDslQueryContainer + /** A remote instance of Elasticsearch that you want to index from. */ + remote?: ReindexRemoteSource + /** The number of documents to index per batch. + * Use it when you are indexing from remote to ensure that the batches fit within the on-heap buffer, which defaults to a maximum size of 100 MB. */ + size?: integer + /** Slice the reindex request manually using the provided slice ID and total number of slices. */ + slice?: SlicedScroll + /** A comma-separated list of `:` pairs to sort by before indexing. + * Use it in conjunction with `max_docs` to control what documents are reindexed. + * + * WARNING: Sort in reindex is deprecated. + * Sorting in reindex was never guaranteed to index documents in order and prevents further development of reindex such as resilience and performance improvements. + * If used in combination with `max_docs`, consider using a query filter instead. */ + sort?: Sort + /** If `true`, reindex all source fields. + * Set it to a list to reindex select fields. */ + _source?: SearchSourceConfig + runtime_mappings?: MappingRuntimeFields +} + +export interface ReindexRethrottleReindexNode extends SpecUtilsBaseNode { + tasks: Record +} + +export interface ReindexRethrottleReindexStatus { + /** The number of scroll responses pulled back by the reindex. */ + batches: long + /** The number of documents that were successfully created. */ + created: long + /** The number of documents that were successfully deleted. */ + deleted: long + /** The number of documents that were ignored because the script used for the reindex returned a `noop` value for `ctx.op`. */ + noops: long + /** The number of requests per second effectively executed during the reindex. */ + requests_per_second: float + /** The number of retries attempted by reindex. `bulk` is the number of bulk actions retried and `search` is the number of search actions retried. */ + retries: Retries + throttled?: Duration + /** Number of milliseconds the request slept to conform to `requests_per_second`. */ + throttled_millis: DurationValue + throttled_until?: Duration + /** This field should always be equal to zero in a `_reindex` response. + * It only has meaning when using the Task API, where it indicates the next time (in milliseconds since epoch) a throttled request will be executed again in order to conform to `requests_per_second`. */ + throttled_until_millis: DurationValue + /** The number of documents that were successfully processed. */ + total: long + /** The number of documents that were successfully updated, for example, a document with same ID already existed prior to reindex updating it. */ + updated: long + /** The number of version conflicts that reindex hits. */ + version_conflicts: long +} + +export interface ReindexRethrottleReindexTask { + action: string + cancellable: boolean + description: string + id: long + node: Name + running_time_in_nanos: DurationValue + start_time_in_millis: EpochTime + status: ReindexRethrottleReindexStatus + type: string + headers: HttpHeaders +} + +export interface ReindexRethrottleRequest extends RequestBase { + /** The task identifier, which can be found by using the tasks API. */ + task_id: Id + /** The throttle for this request in sub-requests per second. + * It can be either `-1` to turn off throttling or any decimal number like `1.7` or `12` to throttle to that level. */ + requests_per_second?: float + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_id?: never, requests_per_second?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_id?: never, requests_per_second?: never } +} + +export interface ReindexRethrottleResponse { + nodes: Record +} + +export interface RenderSearchTemplateRequest extends RequestBase { + /** The ID of the search template to render. + * If no `source` is specified, this or the `id` request body parameter is required. */ + id?: Id + file?: string + /** Key-value pairs used to replace Mustache variables in the template. + * The key is the variable name. + * The value is the variable value. */ + params?: Record + /** An inline search template. + * It supports the same parameters as the search API's request body. + * These parameters also support Mustache variables. + * If no `id` or `` is specified, this parameter is required. */ + source?: ScriptSource + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, file?: never, params?: never, source?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, file?: never, params?: never, source?: never } +} + +export interface RenderSearchTemplateResponse { + template_output: Record +} + +export type ScriptsPainlessExecutePainlessContext = 'painless_test' | 'filter' | 'score' | 'boolean_field' | 'date_field' | 'double_field' | 'geo_point_field' | 'ip_field' | 'keyword_field' | 'long_field' | 'composite_field' + +export interface ScriptsPainlessExecutePainlessContextSetup { + /** Document that's temporarily indexed in-memory and accessible from the script. */ + document: any + /** Index containing a mapping that's compatible with the indexed document. + * You may specify a remote index by prefixing the index with the remote cluster alias. + * For example, `remote1:my_index` indicates that you want to run the painless script against the "my_index" index on the "remote1" cluster. + * This request will be forwarded to the "remote1" cluster if you have configured a connection to that remote cluster. + * + * NOTE: Wildcards are not accepted in the index expression for this endpoint. + * The expression `*:myindex` will return the error "No such remote cluster" and the expression `logs*` or `remote1:logs*` will return the error "index not found". */ + index: IndexName + /** Use this parameter to specify a query for computing a score. */ + query?: QueryDslQueryContainer +} + +export interface ScriptsPainlessExecuteRequest extends RequestBase { + /** The context that the script should run in. + * NOTE: Result ordering in the field contexts is not guaranteed. */ + context?: ScriptsPainlessExecutePainlessContext + /** Additional parameters for the `context`. + * NOTE: This parameter is required for all contexts except `painless_test`, which is the default if no value is provided for `context`. */ + context_setup?: ScriptsPainlessExecutePainlessContextSetup + /** The Painless script to run. */ + script?: Script | ScriptSource + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { context?: never, context_setup?: never, script?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { context?: never, context_setup?: never, script?: never } +} + +export interface ScriptsPainlessExecuteResponse { + result: TResult +} + +export interface ScrollRequest extends RequestBase { + /** The scroll ID */ + scroll_id?: ScrollId + /** If true, the API response’s hit.total property is returned as an integer. If false, the API response’s hit.total property is returned as an object. */ + rest_total_hits_as_int?: boolean + /** The period to retain the search context for scrolling. */ + scroll?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { scroll_id?: never, rest_total_hits_as_int?: never, scroll?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { scroll_id?: never, rest_total_hits_as_int?: never, scroll?: never } +} + +export type ScrollResponse> = SearchResponseBody + +export interface SearchRequest extends RequestBase { + /** A comma-separated list of data streams, indices, and aliases to search. + * It supports wildcards (`*`). + * To search all data streams and indices, omit this parameter or use `*` or `_all`. */ + index?: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. + * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ + allow_no_indices?: boolean + /** If `true` and there are shard request timeouts or shard failures, the request returns partial results. + * If `false`, it returns an error with no partial results. + * + * To override the default behavior, you can set the `search.default_allow_partial_results` cluster setting to `false`. */ + allow_partial_search_results?: boolean + /** The analyzer to use for the query string. + * This parameter can be used only when the `q` query string parameter is specified. */ + analyzer?: string + /** If `true`, wildcard and prefix queries are analyzed. + * This parameter can be used only when the `q` query string parameter is specified. */ + analyze_wildcard?: boolean + /** The number of shard results that should be reduced at once on the coordinating node. + * If the potential number of shards in the request can be large, this value should be used as a protection mechanism to reduce the memory overhead per search request. */ + batched_reduce_size?: long + /** If `true`, network round-trips between the coordinating node and the remote clusters are minimized when running cross-cluster search (CCS) requests. */ + ccs_minimize_roundtrips?: boolean + /** The default operator for the query string query: `and` or `or`. + * This parameter can be used only when the `q` query string parameter is specified. */ + default_operator?: QueryDslOperator + /** The field to use as a default when no field prefix is given in the query string. + * This parameter can be used only when the `q` query string parameter is specified. */ + df?: string + /** The type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * It supports comma-separated values such as `open,hidden`. */ + expand_wildcards?: ExpandWildcards + /** If `true`, concrete, expanded or aliased indices will be ignored when frozen. */ + ignore_throttled?: boolean + /** If `false`, the request returns an error if it targets a missing or closed index. */ + ignore_unavailable?: boolean + /** If `true`, the response includes the score contribution from any named queries. + * + * This functionality reruns each named query on every hit in a search response. + * Typically, this adds a small overhead to a request. + * However, using computationally expensive named queries on a large number of hits may add significant overhead. */ + include_named_queries_score?: boolean + /** If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. + * This parameter can be used only when the `q` query string parameter is specified. */ + lenient?: boolean + /** The number of concurrent shard requests per node that the search runs concurrently. + * This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests. */ + max_concurrent_shard_requests?: integer + /** The nodes and shards used for the search. + * By default, Elasticsearch selects from eligible nodes and shards using adaptive replica selection, accounting for allocation awareness. + * Valid values are: + * + * * `_only_local` to run the search only on shards on the local node. + * * `_local` to, if possible, run the search on shards on the local node, or if not, select shards using the default method. + * * `_only_nodes:,` to run the search on only the specified nodes IDs. If suitable shards exist on more than one selected node, use shards on those nodes using the default method. If none of the specified nodes are available, select shards from any available node using the default method. + * * `_prefer_nodes:,` to if possible, run the search on the specified nodes IDs. If not, select shards using the default method. + * * `_shards:,` to run the search only on the specified shards. You can combine this value with other `preference` values. However, the `_shards` value must come first. For example: `_shards:2,3|_local`. + * * `` (any string that does not start with `_`) to route searches with the same `` to the same shards in the same order. */ + preference?: string + /** A threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. + * This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method (if date filters are mandatory to match but the shard bounds and the query are disjoint). + * When unspecified, the pre-filter phase is executed if any of these conditions is met: + * + * * The request targets more than 128 shards. + * * The request targets one or more read-only index. + * * The primary sort of the query targets an indexed field. */ + pre_filter_shard_size?: long + /** Specifies a subset of projects to target for the search using project + * metadata tags in a subset of Lucene query syntax. + * Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). + * Examples: + * _alias:my-project + * _alias:_origin + * _alias:*pr* + * Supported in serverless only. */ + project_routing?: ProjectRouting + /** If `true`, the caching of search results is enabled for requests where `size` is `0`. + * It defaults to index level settings. */ + request_cache?: boolean + /** A custom value that is used to route operations to a specific shard. */ + routing?: Routing + /** The period to retain the search context for scrolling. + * By default, this value cannot exceed `1d` (24 hours). + * You can change this limit by using the `search.max_keep_alive` cluster-level setting. */ + scroll?: Duration + /** Indicates how distributed term frequencies are calculated for relevance scoring. */ + search_type?: SearchType + /** The field to use for suggestions. */ + suggest_field?: Field + /** The suggest mode. + * This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. */ + suggest_mode?: SuggestMode + /** The number of suggestions to return. + * This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. */ + suggest_size?: long + /** The source text for which the suggestions should be returned. + * This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. */ + suggest_text?: string + /** If `true`, aggregation and suggester names are be prefixed by their respective types in the response. */ + typed_keys?: boolean + /** Indicates whether `hits.total` should be rendered as an integer or an object in the rest search response. */ + rest_total_hits_as_int?: boolean + /** A comma-separated list of source fields to exclude from the response. + * You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. + * If the `_source` parameter is `false`, this parameter is ignored. */ + _source_excludes?: Fields + /** Whether vectors should be excluded from _source */ + _source_exclude_vectors?: boolean + /** A comma-separated list of source fields to include in the response. + * If this parameter is specified, only these source fields are returned. + * You can exclude fields from this subset using the `_source_excludes` query parameter. + * If the `_source` parameter is `false`, this parameter is ignored. */ + _source_includes?: Fields + /** A query in the Lucene query string syntax. + * Query parameter searches do not support the full Elasticsearch Query DSL but are handy for testing. + * + * IMPORTANT: This parameter overrides the query parameter in the request body. + * If both parameters are specified, documents matching the query request body parameter are not returned. */ + q?: string + /** Should this request force synthetic _source? + * Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. + * Fetches with this enabled will be slower the enabling synthetic source natively in the index. */ + force_synthetic_source?: boolean + /** Defines the aggregations that are run as part of the search request. */ + aggregations?: Record + /** Defines the aggregations that are run as part of the search request. + * @alias aggregations */ + aggs?: Record + /** Collapses search results the values of the specified field. */ + collapse?: SearchFieldCollapse + /** If `true`, the request returns detailed information about score computation as part of a hit. */ + explain?: boolean + /** Configuration of search extensions defined by Elasticsearch plugins. */ + ext?: Record + /** The starting document offset, which must be non-negative. + * By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. + * To page through more hits, use the `search_after` parameter. */ + from?: integer + /** Specifies the highlighter to use for retrieving highlighted snippets from one or more fields in your search results. */ + highlight?: SearchHighlight + /** Number of hits matching the query to count accurately. + * If `true`, the exact number of hits is returned at the cost of some performance. + * If `false`, the response does not include the total number of hits matching the query. */ + track_total_hits?: SearchTrackHits + /** Boost the `_score` of documents from specified indices. + * The boost value is the factor by which scores are multiplied. + * A boost value greater than `1.0` increases the score. + * A boost value between `0` and `1.0` decreases the score. */ + indices_boost?: Partial>[] + /** An array of wildcard (`*`) field patterns. + * The request returns doc values for field names matching these patterns in the `hits.fields` property of the response. */ + docvalue_fields?: (QueryDslFieldAndFormat | Field)[] + /** The approximate kNN search to run. */ + knn?: KnnSearch | KnnSearch[] + /** The Reciprocal Rank Fusion (RRF) to use. + * @remarks This property is not supported on Elastic Cloud Serverless. */ + rank?: RankContainer + /** The minimum `_score` for matching documents. + * Documents with a lower `_score` are not included in search results and results collected by aggregations. */ + min_score?: double + /** Use the `post_filter` parameter to filter search results. + * The search hits are filtered after the aggregations are calculated. + * A post filter has no impact on the aggregation results. */ + post_filter?: QueryDslQueryContainer + /** Set to `true` to return detailed timing information about the execution of individual components in a search request. + * NOTE: This is a debugging tool and adds significant overhead to search execution. */ + profile?: boolean + /** The search definition using the Query DSL. */ + query?: QueryDslQueryContainer + /** Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases. */ + rescore?: SearchRescore | SearchRescore[] + /** A retriever is a specification to describe top documents returned from a search. + * A retriever replaces other elements of the search API that also return top documents such as `query` and `knn`. */ + retriever?: RetrieverContainer + /** Retrieve a script evaluation (based on different fields) for each hit. */ + script_fields?: Record + /** Used to retrieve the next page of hits using a set of sort values from the previous page. */ + search_after?: SortResults + /** The number of hits to return, which must not be negative. + * By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. + * To page through more hits, use the `search_after` property. */ + size?: integer + /** Split a scrolled search into multiple slices that can be consumed independently. */ + slice?: SlicedScroll + /** A comma-separated list of : pairs. */ + sort?: Sort + /** The source fields that are returned for matching documents. + * These fields are returned in the `hits._source` property of the search response. + * If the `stored_fields` property is specified, the `_source` property defaults to `false`. + * Otherwise, it defaults to `true`. */ + _source?: SearchSourceConfig + /** An array of wildcard (`*`) field patterns. + * The request returns values for field names matching these patterns in the `hits.fields` property of the response. */ + fields?: (QueryDslFieldAndFormat | Field)[] + /** Defines a suggester that provides similar looking terms based on a provided text. */ + suggest?: SearchSuggester + /** The maximum number of documents to collect for each shard. + * If a query reaches this limit, Elasticsearch terminates the query early. + * Elasticsearch collects documents before sorting. + * + * IMPORTANT: Use with caution. + * Elasticsearch applies this property to each shard handling the request. + * When possible, let Elasticsearch perform early termination automatically. + * Avoid specifying this property for requests that target data streams with backing indices across multiple data tiers. + * + * If set to `0` (default), the query does not terminate early. */ + terminate_after?: long + /** The period of time to wait for a response from each shard. + * If no response is received before the timeout expires, the request fails and returns an error. + * Defaults to no timeout. */ + timeout?: string + /** If `true`, calculate and return document scores, even if the scores are not used for sorting. */ + track_scores?: boolean + /** If `true`, the request returns the document version as part of a hit. */ + version?: boolean + /** If `true`, the request returns sequence number and primary term of the last modification of each hit. */ + seq_no_primary_term?: boolean + /** A comma-separated list of stored fields to return as part of a hit. + * If no fields are specified, no stored fields are included in the response. + * If this field is specified, the `_source` property defaults to `false`. + * You can pass `_source: true` to return both source fields and stored fields in the search response. */ + stored_fields?: Fields + /** Limit the search to a point in time (PIT). + * If you provide a PIT, you cannot specify an `` in the request path. */ + pit?: SearchPointInTimeReference + /** One or more runtime fields in the search request. + * These fields take precedence over mapped fields with the same name. */ + runtime_mappings?: MappingRuntimeFields + /** The stats groups to associate with the search. + * Each group maintains a statistics aggregation for its associated searches. + * You can retrieve these stats using the indices stats API. */ + stats?: string[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, allow_partial_search_results?: never, analyzer?: never, analyze_wildcard?: never, batched_reduce_size?: never, ccs_minimize_roundtrips?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, include_named_queries_score?: never, lenient?: never, max_concurrent_shard_requests?: never, preference?: never, pre_filter_shard_size?: never, project_routing?: never, request_cache?: never, routing?: never, scroll?: never, search_type?: never, suggest_field?: never, suggest_mode?: never, suggest_size?: never, suggest_text?: never, typed_keys?: never, rest_total_hits_as_int?: never, _source_excludes?: never, _source_exclude_vectors?: never, _source_includes?: never, q?: never, force_synthetic_source?: never, aggregations?: never, aggs?: never, collapse?: never, explain?: never, ext?: never, from?: never, highlight?: never, track_total_hits?: never, indices_boost?: never, docvalue_fields?: never, knn?: never, rank?: never, min_score?: never, post_filter?: never, profile?: never, query?: never, rescore?: never, retriever?: never, script_fields?: never, search_after?: never, size?: never, slice?: never, sort?: never, _source?: never, fields?: never, suggest?: never, terminate_after?: never, timeout?: never, track_scores?: never, version?: never, seq_no_primary_term?: never, stored_fields?: never, pit?: never, runtime_mappings?: never, stats?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, allow_partial_search_results?: never, analyzer?: never, analyze_wildcard?: never, batched_reduce_size?: never, ccs_minimize_roundtrips?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, include_named_queries_score?: never, lenient?: never, max_concurrent_shard_requests?: never, preference?: never, pre_filter_shard_size?: never, project_routing?: never, request_cache?: never, routing?: never, scroll?: never, search_type?: never, suggest_field?: never, suggest_mode?: never, suggest_size?: never, suggest_text?: never, typed_keys?: never, rest_total_hits_as_int?: never, _source_excludes?: never, _source_exclude_vectors?: never, _source_includes?: never, q?: never, force_synthetic_source?: never, aggregations?: never, aggs?: never, collapse?: never, explain?: never, ext?: never, from?: never, highlight?: never, track_total_hits?: never, indices_boost?: never, docvalue_fields?: never, knn?: never, rank?: never, min_score?: never, post_filter?: never, profile?: never, query?: never, rescore?: never, retriever?: never, script_fields?: never, search_after?: never, size?: never, slice?: never, sort?: never, _source?: never, fields?: never, suggest?: never, terminate_after?: never, timeout?: never, track_scores?: never, version?: never, seq_no_primary_term?: never, stored_fields?: never, pit?: never, runtime_mappings?: never, stats?: never } +} + +export type SearchResponse> = SearchResponseBody + +export interface SearchResponseBody> { + /** The number of milliseconds it took Elasticsearch to run the request. + * This value is calculated by measuring the time elapsed between receipt of a request on the coordinating node and the time at which the coordinating node is ready to send the response. + * It includes: + * + * * Communication time between the coordinating node and data nodes + * * Time the request spends in the search thread pool, queued for execution + * * Actual run time + * + * It does not include: + * + * * Time needed to send the request to Elasticsearch + * * Time needed to serialize the JSON response + * * Time needed to send the response to a client */ + took: long + /** If `true`, the request timed out before completion; returned results may be partial or empty. */ + timed_out: boolean + /** A count of shards used for the request. */ + _shards: ShardStatistics + /** The returned documents and metadata. */ + hits: SearchHitsMetadata + aggregations?: TAggregations + _clusters?: ClusterStatistics + fields?: Record + max_score?: double + num_reduce_phases?: long + profile?: SearchProfile + pit_id?: Id + /** The identifier for the search and its search context. + * You can use this scroll ID with the scroll API to retrieve the next batch of search results for the request. + * This property is returned only if the `scroll` query parameter is specified in the request. */ + _scroll_id?: ScrollId + suggest?: Record[]> + terminated_early?: boolean +} + +export interface SearchAggregationBreakdown { + build_aggregation: long + build_aggregation_count: long + build_leaf_collector: long + build_leaf_collector_count: long + collect: long + collect_count: long + initialize: long + initialize_count: long + post_collection?: long + post_collection_count?: long + reduce: long + reduce_count: long +} + +export interface SearchAggregationProfile { + breakdown: SearchAggregationBreakdown + description: string + time_in_nanos: DurationValue + type: string + debug?: SearchAggregationProfileDebug + children?: SearchAggregationProfile[] +} + +export interface SearchAggregationProfileDebug { + segments_with_multi_valued_ords?: integer + collection_strategy?: string + segments_with_single_valued_ords?: integer + total_buckets?: integer + built_buckets?: integer + result_strategy?: string + has_filter?: boolean + delegate?: string + delegate_debug?: SearchAggregationProfileDebug + chars_fetched?: integer + extract_count?: integer + extract_ns?: integer + values_fetched?: integer + collect_analyzed_ns?: integer + collect_analyzed_count?: integer + surviving_buckets?: integer + ordinals_collectors_used?: integer + ordinals_collectors_overhead_too_high?: integer + string_hashing_collectors_used?: integer + numeric_collectors_used?: integer + empty_collectors_used?: integer + deferred_aggregators?: string[] + segments_with_doc_count_field?: integer + segments_with_deleted_docs?: integer + filters?: SearchAggregationProfileDelegateDebugFilter[] + segments_counted?: integer + segments_collected?: integer + map_reducer?: string + brute_force_used?: integer + dynamic_pruning_attempted?: integer + dynamic_pruning_used?: integer + skipped_due_to_no_data?: integer +} + +export interface SearchAggregationProfileDelegateDebugFilter { + results_from_metadata?: integer + query?: string + specialized_for?: string + segments_counted_in_constant_time?: integer +} + +export type SearchBoundaryScanner = 'chars' | 'sentence' | 'word' + +export interface SearchCollector { + name: string + reason: string + time_in_nanos: DurationValue + children?: SearchCollector[] +} + +export interface SearchCompletionContext { + /** The factor by which the score of the suggestion should be boosted. + * The score is computed by multiplying the boost with the suggestion weight. */ + boost?: double + /** The value of the category to filter/boost on. */ + context: SearchContext + /** An array of precision values at which neighboring geohashes should be taken into account. + * Precision value can be a distance value (`5m`, `10km`, etc.) or a raw geohash precision (`1`..`12`). + * Defaults to generating neighbors for index time precision level. */ + neighbours?: GeoHashPrecision[] + /** The precision of the geohash to encode the query geo point. + * Can be specified as a distance value (`5m`, `10km`, etc.), or as a raw geohash precision (`1`..`12`). + * Defaults to index time precision level. */ + precision?: GeoHashPrecision + /** Whether the category value should be treated as a prefix or not. */ + prefix?: boolean +} + +export interface SearchCompletionSuggest extends SearchSuggestBase { + options: SearchCompletionSuggestOption | SearchCompletionSuggestOption[] +} + +export interface SearchCompletionSuggestOption { + collate_match?: boolean + contexts?: Record + fields?: Record + _id?: string + _index?: IndexName + _routing?: Routing + _score?: double + _source?: TDocument + text: string + score?: double +} + +export interface SearchCompletionSuggester extends SearchSuggesterBase { + /** A value, geo point object, or a geo hash string to filter or boost the suggestion on. */ + contexts?: Record + /** Enables fuzziness, meaning you can have a typo in your search and still get results back. */ + fuzzy?: SearchSuggestFuzziness + /** A regex query that expresses a prefix as a regular expression. */ + regex?: SearchRegexOptions + /** Whether duplicate suggestions should be filtered out. */ + skip_duplicates?: boolean +} + +export type SearchContext = string | GeoLocation + +export interface SearchDfsKnnProfile { + vector_operations_count?: long + query: SearchKnnQueryProfileResult[] + rewrite_time: long + collector: SearchKnnCollectorResult[] +} + +export interface SearchDfsProfile { + statistics?: SearchDfsStatisticsProfile + knn?: SearchDfsKnnProfile[] +} + +export interface SearchDfsStatisticsBreakdown { + collection_statistics: long + collection_statistics_count: long + create_weight: long + create_weight_count: long + rewrite: long + rewrite_count: long + term_statistics: long + term_statistics_count: long +} + +export interface SearchDfsStatisticsProfile { + type: string + description: string + time?: Duration + time_in_nanos: DurationValue + breakdown: SearchDfsStatisticsBreakdown + debug?: Record + children?: SearchDfsStatisticsProfile[] +} + +export interface SearchDirectGenerator { + /** The field to fetch the candidate suggestions from. + * Needs to be set globally or per suggestion. */ + field: Field + /** The maximum edit distance candidate suggestions can have in order to be considered as a suggestion. + * Can only be `1` or `2`. */ + max_edits?: integer + /** A factor that is used to multiply with the shard_size in order to inspect more candidate spelling corrections on the shard level. + * Can improve accuracy at the cost of performance. */ + max_inspections?: float + /** The maximum threshold in number of documents in which a suggest text token can exist in order to be included. + * This can be used to exclude high frequency terms—which are usually spelled correctly—from being spellchecked. + * Can be a relative percentage number (for example `0.4`) or an absolute number to represent document frequencies. + * If a value higher than 1 is specified, then fractional can not be specified. */ + max_term_freq?: float + /** The minimal threshold in number of documents a suggestion should appear in. + * This can improve quality by only suggesting high frequency terms. + * Can be specified as an absolute number or as a relative percentage of number of documents. + * If a value higher than 1 is specified, the number cannot be fractional. */ + min_doc_freq?: float + /** The minimum length a suggest text term must have in order to be included. */ + min_word_length?: integer + /** A filter (analyzer) that is applied to each of the generated tokens before they are passed to the actual phrase scorer. */ + post_filter?: string + /** A filter (analyzer) that is applied to each of the tokens passed to this candidate generator. + * This filter is applied to the original token before candidates are generated. */ + pre_filter?: string + /** The number of minimal prefix characters that must match in order be a candidate suggestions. + * Increasing this number improves spellcheck performance. */ + prefix_length?: integer + /** The maximum corrections to be returned per suggest text token. */ + size?: integer + /** Controls what suggestions are included on the suggestions generated on each shard. */ + suggest_mode?: SuggestMode +} + +export interface SearchFetchProfile { + type: string + description: string + time_in_nanos: DurationValue + breakdown: SearchFetchProfileBreakdown + debug?: SearchFetchProfileDebug + children?: SearchFetchProfile[] +} + +export interface SearchFetchProfileBreakdown { + load_source?: integer + load_source_count?: integer + load_stored_fields?: integer + load_stored_fields_count?: integer + next_reader?: integer + next_reader_count?: integer + process_count?: integer + process?: integer +} + +export interface SearchFetchProfileDebug { + stored_fields?: string[] + fast_path?: integer +} + +export interface SearchFieldCollapse { + /** The field to collapse the result set on */ + field: Field + /** The number of inner hits and their sort order */ + inner_hits?: SearchInnerHits | SearchInnerHits[] + /** The number of concurrent requests allowed to retrieve the inner_hits per group */ + max_concurrent_group_searches?: integer + collapse?: SearchFieldCollapse +} + +export interface SearchFieldSuggester { + /** Provides auto-complete/search-as-you-type functionality. */ + completion?: SearchCompletionSuggester + /** Provides access to word alternatives on a per token basis within a certain string distance. */ + phrase?: SearchPhraseSuggester + /** Suggests terms based on edit distance. */ + term?: SearchTermSuggester + /** Prefix used to search for suggestions. */ + prefix?: string + /** A prefix expressed as a regular expression. */ + regex?: string + /** The text to use as input for the suggester. + * Needs to be set globally or per suggestion. */ + text?: string +} + +export interface SearchHighlight extends SearchHighlightBase { + encoder?: SearchHighlighterEncoder + fields: Partial> | Partial>[] +} + +export interface SearchHighlightBase { + type?: SearchHighlighterType + /** A string that contains each boundary character. */ + boundary_chars?: string + /** How far to scan for boundary characters. */ + boundary_max_scan?: integer + /** Specifies how to break the highlighted fragments: chars, sentence, or word. + * Only valid for the unified and fvh highlighters. + * Defaults to `sentence` for the `unified` highlighter. Defaults to `chars` for the `fvh` highlighter. */ + boundary_scanner?: SearchBoundaryScanner + /** Controls which locale is used to search for sentence and word boundaries. + * This parameter takes a form of a language tag, for example: `"en-US"`, `"fr-FR"`, `"ja-JP"`. */ + boundary_scanner_locale?: string + force_source?: boolean + /** Specifies how text should be broken up in highlight snippets: `simple` or `span`. + * Only valid for the `plain` highlighter. */ + fragmenter?: SearchHighlighterFragmenter + /** The size of the highlighted fragment in characters. */ + fragment_size?: integer + highlight_filter?: boolean + /** Highlight matches for a query other than the search query. + * This is especially useful if you use a rescore query because those are not taken into account by highlighting by default. */ + highlight_query?: QueryDslQueryContainer + max_fragment_length?: integer + /** If set to a non-negative value, highlighting stops at this defined maximum limit. + * The rest of the text is not processed, thus not highlighted and no error is returned + * The `max_analyzed_offset` query setting does not override the `index.highlight.max_analyzed_offset` setting, which prevails when it’s set to lower value than the query setting. */ + max_analyzed_offset?: integer + /** The amount of text you want to return from the beginning of the field if there are no matching fragments to highlight. */ + no_match_size?: integer + /** The maximum number of fragments to return. + * If the number of fragments is set to `0`, no fragments are returned. + * Instead, the entire field contents are highlighted and returned. + * This can be handy when you need to highlight short texts such as a title or address, but fragmentation is not required. + * If `number_of_fragments` is `0`, `fragment_size` is ignored. */ + number_of_fragments?: integer + options?: Record + /** Sorts highlighted fragments by score when set to `score`. + * By default, fragments will be output in the order they appear in the field (order: `none`). + * Setting this option to `score` will output the most relevant fragments first. + * Each highlighter applies its own logic to compute relevancy scores. */ + order?: SearchHighlighterOrder + /** Controls the number of matching phrases in a document that are considered. + * Prevents the `fvh` highlighter from analyzing too many phrases and consuming too much memory. + * When using `matched_fields`, `phrase_limit` phrases per matched field are considered. Raising the limit increases query time and consumes more memory. + * Only supported by the `fvh` highlighter. */ + phrase_limit?: integer + /** Use in conjunction with `pre_tags` to define the HTML tags to use for the highlighted text. + * By default, highlighted text is wrapped in `` and `` tags. */ + post_tags?: string[] + /** Use in conjunction with `post_tags` to define the HTML tags to use for the highlighted text. + * By default, highlighted text is wrapped in `` and `` tags. */ + pre_tags?: string[] + /** By default, only fields that contains a query match are highlighted. + * Set to `false` to highlight all fields. */ + require_field_match?: boolean + /** Set to `styled` to use the built-in tag schema. */ + tags_schema?: SearchHighlighterTagsSchema +} + +export interface SearchHighlightField extends SearchHighlightBase { + fragment_offset?: integer + matched_fields?: Fields +} + +export type SearchHighlighterEncoder = 'default' | 'html' + +export type SearchHighlighterFragmenter = 'simple' | 'span' + +export type SearchHighlighterOrder = 'score' + +export type SearchHighlighterTagsSchema = 'styled' + +export type SearchHighlighterType = 'plain' | 'fvh' | 'unified' | string + +export interface SearchHit { + _index: IndexName + _id?: Id + _score?: double | null + _explanation?: ExplainExplanation + fields?: Record + highlight?: Record + inner_hits?: Record + matched_queries?: string[] | Record + _nested?: SearchNestedIdentity + _ignored?: string[] + ignored_field_values?: Record + _shard?: string + _node?: string + _routing?: string + _source?: TDocument + _rank?: integer + _seq_no?: SequenceNumber + _primary_term?: long + _version?: VersionNumber + sort?: SortResults +} + +export interface SearchHitsMetadata { + /** Total hit count information, present only if `track_total_hits` wasn't `false` in the search request. */ + total?: SearchTotalHits | long + hits: SearchHit[] + max_score?: double | null +} + +export interface SearchInnerHits { + /** The name for the particular inner hit definition in the response. + * Useful when a search request contains multiple inner hits. */ + name?: Name + /** The maximum number of hits to return per `inner_hits`. */ + size?: integer + /** Inner hit starting document offset. */ + from?: integer + collapse?: SearchFieldCollapse + docvalue_fields?: (QueryDslFieldAndFormat | Field)[] + explain?: boolean + highlight?: SearchHighlight + ignore_unmapped?: boolean + script_fields?: Record + seq_no_primary_term?: boolean + fields?: Field[] + /** How the inner hits should be sorted per `inner_hits`. + * By default, inner hits are sorted by score. */ + sort?: Sort + _source?: SearchSourceConfig + stored_fields?: Fields + track_scores?: boolean + version?: boolean +} + +export interface SearchInnerHitsResult { + hits: SearchHitsMetadata +} + +export interface SearchKnnCollectorResult { + name: string + reason: string + time?: Duration + time_in_nanos: DurationValue + children?: SearchKnnCollectorResult[] +} + +export interface SearchKnnQueryProfileBreakdown { + advance: long + advance_count: long + build_scorer: long + build_scorer_count: long + compute_max_score: long + compute_max_score_count: long + count_weight: long + count_weight_count: long + create_weight: long + create_weight_count: long + match: long + match_count: long + next_doc: long + next_doc_count: long + score: long + score_count: long + set_min_competitive_score: long + set_min_competitive_score_count: long + shallow_advance: long + shallow_advance_count: long +} + +export interface SearchKnnQueryProfileResult { + type: string + description: string + time?: Duration + time_in_nanos: DurationValue + breakdown: SearchKnnQueryProfileBreakdown + debug?: Record + children?: SearchKnnQueryProfileResult[] +} + +export interface SearchLaplaceSmoothingModel { + /** A constant that is added to all counts to balance weights. */ + alpha: double +} + +export interface SearchLearningToRank { + /** The unique identifier of the trained model uploaded to Elasticsearch */ + model_id: string + /** Named parameters to be passed to the query templates used for feature */ + params?: Record +} + +export interface SearchLinearInterpolationSmoothingModel { + bigram_lambda: double + trigram_lambda: double + unigram_lambda: double +} + +export interface SearchNestedIdentity { + field: Field + offset: integer + _nested?: SearchNestedIdentity +} + +export interface SearchPhraseSuggest extends SearchSuggestBase { + options: SearchPhraseSuggestOption | SearchPhraseSuggestOption[] +} + +export interface SearchPhraseSuggestCollate { + /** Parameters to use if the query is templated. */ + params?: Record + /** Returns all suggestions with an extra `collate_match` option indicating whether the generated phrase matched any document. */ + prune?: boolean + /** A collate query that is run once for every suggestion. */ + query: SearchPhraseSuggestCollateQuery +} + +export interface SearchPhraseSuggestCollateQuery { + /** The search template ID. */ + id?: Id + /** The query source. */ + source?: ScriptSource +} + +export interface SearchPhraseSuggestHighlight { + /** Use in conjunction with `pre_tag` to define the HTML tags to use for the highlighted text. */ + post_tag: string + /** Use in conjunction with `post_tag` to define the HTML tags to use for the highlighted text. */ + pre_tag: string +} + +export interface SearchPhraseSuggestOption { + text: string + score: double + highlighted?: string + collate_match?: boolean +} + +export interface SearchPhraseSuggester extends SearchSuggesterBase { + /** Checks each suggestion against the specified query to prune suggestions for which no matching docs exist in the index. */ + collate?: SearchPhraseSuggestCollate + /** Defines a factor applied to the input phrases score, which is used as a threshold for other suggest candidates. + * Only candidates that score higher than the threshold will be included in the result. */ + confidence?: double + /** A list of candidate generators that produce a list of possible terms per term in the given text. */ + direct_generator?: SearchDirectGenerator[] + force_unigrams?: boolean + /** Sets max size of the n-grams (shingles) in the field. + * If the field doesn’t contain n-grams (shingles), this should be omitted or set to `1`. + * If the field uses a shingle filter, the `gram_size` is set to the `max_shingle_size` if not explicitly set. */ + gram_size?: integer + /** Sets up suggestion highlighting. + * If not provided, no highlighted field is returned. */ + highlight?: SearchPhraseSuggestHighlight + /** The maximum percentage of the terms considered to be misspellings in order to form a correction. + * This method accepts a float value in the range `[0..1)` as a fraction of the actual query terms or a number `>=1` as an absolute number of query terms. */ + max_errors?: double + /** The likelihood of a term being misspelled even if the term exists in the dictionary. */ + real_word_error_likelihood?: double + /** The separator that is used to separate terms in the bigram field. + * If not set, the whitespace character is used as a separator. */ + separator?: string + /** Sets the maximum number of suggested terms to be retrieved from each individual shard. */ + shard_size?: integer + /** The smoothing model used to balance weight between infrequent grams (grams (shingles) are not existing in the index) and frequent grams (appear at least once in the index). + * The default model is Stupid Backoff. */ + smoothing?: SearchSmoothingModelContainer + /** The text/query to provide suggestions for. */ + text?: string + token_limit?: integer +} + +export interface SearchPointInTimeReference { + id: Id + keep_alive?: Duration +} + +export interface SearchProfile { + shards: SearchShardProfile[] +} + +export interface SearchQueryBreakdown { + advance: long + advance_count: long + build_scorer: long + build_scorer_count: long + create_weight: long + create_weight_count: long + match: long + match_count: long + shallow_advance: long + shallow_advance_count: long + next_doc: long + next_doc_count: long + score: long + score_count: long + compute_max_score: long + compute_max_score_count: long + count_weight: long + count_weight_count: long + set_min_competitive_score: long + set_min_competitive_score_count: long +} + +export interface SearchQueryProfile { + breakdown: SearchQueryBreakdown + description: string + time_in_nanos: DurationValue + type: string + children?: SearchQueryProfile[] +} + +export interface SearchRegexOptions { + /** Optional operators for the regular expression. */ + flags?: integer | string + /** Maximum number of automaton states required for the query. */ + max_determinized_states?: integer +} + +export interface SearchRescore { + window_size?: integer + query?: SearchRescoreQuery + learning_to_rank?: SearchLearningToRank + script?: SearchScriptRescore +} + +export interface SearchRescoreQuery { + /** The query to use for rescoring. + * This query is only run on the Top-K results returned by the `query` and `post_filter` phases. */ + rescore_query: QueryDslQueryContainer + /** Relative importance of the original query versus the rescore query. */ + query_weight?: double + /** Relative importance of the rescore query versus the original query. */ + rescore_query_weight?: double + /** Determines how scores are combined. */ + score_mode?: SearchScoreMode +} + +export type SearchScoreMode = 'avg' | 'max' | 'min' | 'multiply' | 'total' + +export interface SearchScriptRescore { + script: Script | ScriptSource +} + +export interface SearchSearchProfile { + collector: SearchCollector[] + query: SearchQueryProfile[] + rewrite_time: long +} + +export interface SearchSearchRequestBody { + /** Defines the aggregations that are run as part of the search request. */ + aggregations?: Record + /** Defines the aggregations that are run as part of the search request. + * @alias aggregations */ + aggs?: Record + /** Collapses search results the values of the specified field. */ + collapse?: SearchFieldCollapse + /** If `true`, the request returns detailed information about score computation as part of a hit. */ + explain?: boolean + /** Configuration of search extensions defined by Elasticsearch plugins. */ + ext?: Record + /** The starting document offset, which must be non-negative. + * By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. + * To page through more hits, use the `search_after` parameter. */ + from?: integer + /** Specifies the highlighter to use for retrieving highlighted snippets from one or more fields in your search results. */ + highlight?: SearchHighlight + /** Number of hits matching the query to count accurately. + * If `true`, the exact number of hits is returned at the cost of some performance. + * If `false`, the response does not include the total number of hits matching the query. */ + track_total_hits?: SearchTrackHits + /** Boost the `_score` of documents from specified indices. + * The boost value is the factor by which scores are multiplied. + * A boost value greater than `1.0` increases the score. + * A boost value between `0` and `1.0` decreases the score. */ + indices_boost?: Partial>[] + /** An array of wildcard (`*`) field patterns. + * The request returns doc values for field names matching these patterns in the `hits.fields` property of the response. */ + docvalue_fields?: (QueryDslFieldAndFormat | Field)[] + /** The approximate kNN search to run. */ + knn?: KnnSearch | KnnSearch[] + /** The Reciprocal Rank Fusion (RRF) to use. + * @remarks This property is not supported on Elastic Cloud Serverless. */ + rank?: RankContainer + /** The minimum `_score` for matching documents. + * Documents with a lower `_score` are not included in search results or results collected by aggregations. */ + min_score?: double + /** Use the `post_filter` parameter to filter search results. + * The search hits are filtered after the aggregations are calculated. + * A post filter has no impact on the aggregation results. */ + post_filter?: QueryDslQueryContainer + /** Set to `true` to return detailed timing information about the execution of individual components in a search request. + * NOTE: This is a debugging tool and adds significant overhead to search execution. */ + profile?: boolean + /** The search definition using the Query DSL. */ + query?: QueryDslQueryContainer + /** Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases. */ + rescore?: SearchRescore | SearchRescore[] + /** A retriever is a specification to describe top documents returned from a search. + * A retriever replaces other elements of the search API that also return top documents such as `query` and `knn`. */ + retriever?: RetrieverContainer + /** Retrieve a script evaluation (based on different fields) for each hit. */ + script_fields?: Record + /** Used to retrieve the next page of hits using a set of sort values from the previous page. */ + search_after?: SortResults + /** The number of hits to return, which must not be negative. + * By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. + * To page through more hits, use the `search_after` property. */ + size?: integer + /** Split a scrolled search into multiple slices that can be consumed independently. */ + slice?: SlicedScroll + /** A comma-separated list of : pairs. */ + sort?: Sort + /** The source fields that are returned for matching documents. + * These fields are returned in the `hits._source` property of the search response. + * If the `stored_fields` property is specified, the `_source` property defaults to `false`. + * Otherwise, it defaults to `true`. */ + _source?: SearchSourceConfig + /** An array of wildcard (`*`) field patterns. + * The request returns values for field names matching these patterns in the `hits.fields` property of the response. */ + fields?: (QueryDslFieldAndFormat | Field)[] + /** Defines a suggester that provides similar looking terms based on a provided text. */ + suggest?: SearchSuggester + /** The maximum number of documents to collect for each shard. + * If a query reaches this limit, Elasticsearch terminates the query early. + * Elasticsearch collects documents before sorting. + * + * IMPORTANT: Use with caution. + * Elasticsearch applies this property to each shard handling the request. + * When possible, let Elasticsearch perform early termination automatically. + * Avoid specifying this property for requests that target data streams with backing indices across multiple data tiers. + * + * If set to `0` (default), the query does not terminate early. */ + terminate_after?: long + /** The period of time to wait for a response from each shard. + * If no response is received before the timeout expires, the request fails and returns an error. + * Defaults to no timeout. */ + timeout?: string + /** If `true`, calculate and return document scores, even if the scores are not used for sorting. */ + track_scores?: boolean + /** If `true`, the request returns the document version as part of a hit. */ + version?: boolean + /** If `true`, the request returns sequence number and primary term of the last modification of each hit. */ + seq_no_primary_term?: boolean + /** A comma-separated list of stored fields to return as part of a hit. + * If no fields are specified, no stored fields are included in the response. + * If this field is specified, the `_source` property defaults to `false`. + * You can pass `_source: true` to return both source fields and stored fields in the search response. */ + stored_fields?: Fields + /** Limit the search to a point in time (PIT). + * If you provide a PIT, you cannot specify an `` in the request path. */ + pit?: SearchPointInTimeReference + /** One or more runtime fields in the search request. + * These fields take precedence over mapped fields with the same name. */ + runtime_mappings?: MappingRuntimeFields + /** The stats groups to associate with the search. + * Each group maintains a statistics aggregation for its associated searches. + * You can retrieve these stats using the indices stats API. */ + stats?: string[] +} + +export interface SearchShardProfile { + aggregations: SearchAggregationProfile[] + cluster: string + dfs?: SearchDfsProfile + fetch?: SearchFetchProfile + id: string + index: IndexName + node_id: NodeId + searches: SearchSearchProfile[] + shard_id: integer +} + +export interface SearchSmoothingModelContainer { + /** A smoothing model that uses an additive smoothing where a constant (typically `1.0` or smaller) is added to all counts to balance weights. */ + laplace?: SearchLaplaceSmoothingModel + /** A smoothing model that takes the weighted mean of the unigrams, bigrams, and trigrams based on user supplied weights (lambdas). */ + linear_interpolation?: SearchLinearInterpolationSmoothingModel + /** A simple backoff model that backs off to lower order n-gram models if the higher order count is `0` and discounts the lower order n-gram model by a constant factor. */ + stupid_backoff?: SearchStupidBackoffSmoothingModel +} + +export type SearchSourceConfig = boolean | SearchSourceFilter | Fields + +export type SearchSourceConfigParam = boolean | Fields + +export interface SearchSourceFilter { + /** If `true`, vector fields are excluded from the returned source. + * + * This option takes precedence over `includes`: any vector field will + * remain excluded even if it matches an `includes` rule. */ + exclude_vectors?: boolean + /** A list of fields to exclude from the returned source. */ + excludes?: Fields + /** A list of fields to exclude from the returned source. + * @alias excludes */ + exclude?: Fields + /** A list of fields to include in the returned source. */ + includes?: Fields + /** A list of fields to include in the returned source. + * @alias includes */ + include?: Fields +} + +export type SearchStringDistance = 'internal' | 'damerau_levenshtein' | 'levenshtein' | 'jaro_winkler' | 'ngram' + +export interface SearchStupidBackoffSmoothingModel { + /** A constant factor that the lower order n-gram model is discounted by. */ + discount: double +} + +export type SearchSuggest = SearchCompletionSuggest | SearchPhraseSuggest | SearchTermSuggest + +export interface SearchSuggestBase { + length: integer + offset: integer + text: string +} + +export interface SearchSuggestFuzziness { + /** The fuzziness factor. */ + fuzziness?: Fuzziness + /** Minimum length of the input before fuzzy suggestions are returned. */ + min_length?: integer + /** Minimum length of the input, which is not checked for fuzzy alternatives. */ + prefix_length?: integer + /** If set to `true`, transpositions are counted as one change instead of two. */ + transpositions?: boolean + /** If `true`, all measurements (like fuzzy edit distance, transpositions, and lengths) are measured in Unicode code points instead of in bytes. + * This is slightly slower than raw bytes. */ + unicode_aware?: boolean +} + +export type SearchSuggestSort = 'score' | 'frequency' + +export interface SearchSuggesterKeys { + /** Global suggest text, to avoid repetition when the same text is used in several suggesters */ + text?: string +} +export type SearchSuggester = SearchSuggesterKeys +& { [property: string]: SearchFieldSuggester | string } + +export interface SearchSuggesterBase { + /** The field to fetch the candidate suggestions from. + * Needs to be set globally or per suggestion. */ + field: Field + /** The analyzer to analyze the suggest text with. + * Defaults to the search analyzer of the suggest field. */ + analyzer?: string + /** The maximum corrections to be returned per suggest text token. */ + size?: integer +} + +export interface SearchTermSuggest extends SearchSuggestBase { + options: SearchTermSuggestOption | SearchTermSuggestOption[] +} + +export interface SearchTermSuggestOption { + text: string + score: double + freq: long + highlighted?: string + collate_match?: boolean +} + +export interface SearchTermSuggester extends SearchSuggesterBase { + lowercase_terms?: boolean + /** The maximum edit distance candidate suggestions can have in order to be considered as a suggestion. + * Can only be `1` or `2`. */ + max_edits?: integer + /** A factor that is used to multiply with the shard_size in order to inspect more candidate spelling corrections on the shard level. + * Can improve accuracy at the cost of performance. */ + max_inspections?: integer + /** The maximum threshold in number of documents in which a suggest text token can exist in order to be included. + * Can be a relative percentage number (for example `0.4`) or an absolute number to represent document frequencies. + * If a value higher than 1 is specified, then fractional can not be specified. */ + max_term_freq?: float + /** The minimal threshold in number of documents a suggestion should appear in. + * This can improve quality by only suggesting high frequency terms. + * Can be specified as an absolute number or as a relative percentage of number of documents. + * If a value higher than 1 is specified, then the number cannot be fractional. */ + min_doc_freq?: float + /** The minimum length a suggest text term must have in order to be included. */ + min_word_length?: integer + /** The number of minimal prefix characters that must match in order be a candidate for suggestions. + * Increasing this number improves spellcheck performance. */ + prefix_length?: integer + /** Sets the maximum number of suggestions to be retrieved from each individual shard. */ + shard_size?: integer + /** Defines how suggestions should be sorted per suggest text term. */ + sort?: SearchSuggestSort + /** The string distance implementation to use for comparing how similar suggested terms are. */ + string_distance?: SearchStringDistance + /** Controls what suggestions are included or controls for what suggest text terms, suggestions should be suggested. */ + suggest_mode?: SuggestMode + /** The suggest text. + * Needs to be set globally or per suggestion. */ + text?: string +} + +export interface SearchTotalHits { + relation: SearchTotalHitsRelation + value: long +} + +export type SearchTotalHitsRelation = 'eq' | 'gte' + +export type SearchTrackHits = boolean | integer + +export interface SearchMvtRequest extends RequestBase { + /** Comma-separated list of data streams, indices, or aliases to search */ + index: Indices + /** Field containing geospatial data to return */ + field: Field + /** Zoom level for the vector tile to search */ + zoom: SearchMvtZoomLevel + /** X coordinate for the vector tile to search */ + x: SearchMvtCoordinate + /** Y coordinate for the vector tile to search */ + y: SearchMvtCoordinate + /** Specifies a subset of projects to target for the search using project + * metadata tags in a subset of Lucene query syntax. + * Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). + * Examples: + * _alias:my-project + * _alias:_origin + * _alias:*pr* + * Supported in serverless only. */ + project_routing?: ProjectRouting + /** Sub-aggregations for the geotile_grid. + * + * It supports the following aggregation types: + * + * - `avg` + * - `boxplot` + * - `cardinality` + * - `extended stats` + * - `max` + * - `median absolute deviation` + * - `min` + * - `percentile` + * - `percentile-rank` + * - `stats` + * - `sum` + * - `value count` + * + * The aggregation names can't start with `_mvt_`. The `_mvt_` prefix is reserved for internal aggregations. */ + aggs?: Record + /** The size, in pixels, of a clipping buffer outside the tile. This allows renderers + * to avoid outline artifacts from geometries that extend past the extent of the tile. */ + buffer?: integer + /** If `false`, the meta layer's feature is the bounding box of the tile. + * If `true`, the meta layer's feature is a bounding box resulting from a + * `geo_bounds` aggregation. The aggregation runs on values that intersect + * the `//` tile with `wrap_longitude` set to `false`. The resulting + * bounding box may be larger than the vector tile. */ + exact_bounds?: boolean + /** The size, in pixels, of a side of the tile. Vector tiles are square with equal sides. */ + extent?: integer + /** The fields to return in the `hits` layer. + * It supports wildcards (`*`). + * This parameter does not support fields with array values. Fields with array + * values may return inconsistent results. */ + fields?: Fields + /** The aggregation used to create a grid for the `field`. */ + grid_agg?: SearchMvtGridAggregationType + /** Additional zoom levels available through the aggs layer. For example, if `` is `7` + * and `grid_precision` is `8`, you can zoom in up to level 15. Accepts 0-8. If 0, results + * don't include the aggs layer. */ + grid_precision?: integer + /** Determines the geometry type for features in the aggs layer. In the aggs layer, + * each feature represents a `geotile_grid` cell. If `grid, each feature is a polygon + * of the cells bounding box. If `point`, each feature is a Point that is the centroid + * of the cell. */ + grid_type?: SearchMvtGridType + /** The query DSL used to filter documents for the search. */ + query?: QueryDslQueryContainer + /** Defines one or more runtime fields in the search request. These fields take + * precedence over mapped fields with the same name. */ + runtime_mappings?: MappingRuntimeFields + /** The maximum number of features to return in the hits layer. Accepts 0-10000. + * If 0, results don't include the hits layer. */ + size?: integer + /** Sort the features in the hits layer. By default, the API calculates a bounding + * box for each feature. It sorts features based on this box's diagonal length, + * from longest to shortest. */ + sort?: Sort + /** The number of hits matching the query to count accurately. If `true`, the exact number + * of hits is returned at the cost of some performance. If `false`, the response does + * not include the total number of hits matching the query. */ + track_total_hits?: SearchTrackHits + /** If `true`, the hits and aggs layers will contain additional point features representing + * suggested label positions for the original features. + * + * * `Point` and `MultiPoint` features will have one of the points selected. + * * `Polygon` and `MultiPolygon` features will have a single point generated, either the centroid, if it is within the polygon, or another point within the polygon selected from the sorted triangle-tree. + * * `LineString` features will likewise provide a roughly central point selected from the triangle-tree. + * * The aggregation results will provide one central point for each aggregation bucket. + * + * All attributes from the original features will also be copied to the new label features. + * In addition, the new features will be distinguishable using the tag `_mvt_label_position`. */ + with_labels?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, field?: never, zoom?: never, x?: never, y?: never, project_routing?: never, aggs?: never, buffer?: never, exact_bounds?: never, extent?: never, fields?: never, grid_agg?: never, grid_precision?: never, grid_type?: never, query?: never, runtime_mappings?: never, size?: never, sort?: never, track_total_hits?: never, with_labels?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, field?: never, zoom?: never, x?: never, y?: never, project_routing?: never, aggs?: never, buffer?: never, exact_bounds?: never, extent?: never, fields?: never, grid_agg?: never, grid_precision?: never, grid_type?: never, query?: never, runtime_mappings?: never, size?: never, sort?: never, track_total_hits?: never, with_labels?: never } +} + +export type SearchMvtResponse = MapboxVectorTiles + +export type SearchMvtCoordinate = integer + +export type SearchMvtGridAggregationType = 'geotile' | 'geohex' + +export type SearchMvtGridType = 'grid' | 'point' | 'centroid' + +export type SearchMvtZoomLevel = integer + +export interface SearchShardsRequest extends RequestBase { + /** A comma-separated list of data streams, indices, and aliases to search. + * It supports wildcards (`*`). + * To search all data streams and indices, omit this parameter or use `*` or `_all`. */ + index?: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. + * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ + allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ + expand_wildcards?: ExpandWildcards + /** If `false`, the request returns an error if it targets a missing or closed index. */ + ignore_unavailable?: boolean + /** If `true`, the request retrieves information from the local node only. */ + local?: boolean + /** The period to wait for a connection to the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * IT can also be set to `-1` to indicate that the request should never timeout. */ + master_timeout?: Duration + /** The node or shard the operation should be performed on. + * It is random by default. */ + preference?: string + /** A custom value used to route operations to a specific shard. */ + routing?: Routing + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, local?: never, master_timeout?: never, preference?: never, routing?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, local?: never, master_timeout?: never, preference?: never, routing?: never } +} + +export interface SearchShardsResponse { + nodes: Record + shards: NodeShard[][] + indices: Record +} + +export interface SearchShardsSearchShardsNodeAttributes { + /** The human-readable identifier of the node. */ + name: NodeName + /** The ephemeral ID of the node. */ + ephemeral_id: Id + /** The host and port where transport HTTP connections are accepted. */ + transport_address: TransportAddress + external_id: string + /** Lists node attributes. */ + attributes: Record + roles: NodeRoles + version: VersionString + min_index_version: integer + max_index_version: integer +} + +export interface SearchShardsShardStoreIndex { + aliases?: Name[] + filter?: QueryDslQueryContainer +} + +export interface SearchTemplateRequest extends RequestBase { + /** A comma-separated list of data streams, indices, and aliases to search. + * It supports wildcards (`*`). */ + index?: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. + * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ + allow_no_indices?: boolean + /** If `true`, network round-trips are minimized for cross-cluster search requests. */ + ccs_minimize_roundtrips?: boolean + /** The type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ + expand_wildcards?: ExpandWildcards + /** If `true`, specified concrete, expanded, or aliased indices are not included in the response when throttled. */ + ignore_throttled?: boolean + /** If `false`, the request returns an error if it targets a missing or closed index. */ + ignore_unavailable?: boolean + /** The node or shard the operation should be performed on. + * It is random by default. */ + preference?: string + /** Specifies a subset of projects to target for the search using project + * metadata tags in a subset of Lucene query syntax. + * Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). + * Examples: + * _alias:my-project + * _alias:_origin + * _alias:*pr* + * Supported in serverless only. */ + project_routing?: ProjectRouting + /** A custom value used to route operations to a specific shard. */ + routing?: Routing + /** Specifies how long a consistent view of the index + * should be maintained for scrolled search. */ + scroll?: Duration + /** The type of the search operation. */ + search_type?: SearchType + /** If `true`, `hits.total` is rendered as an integer in the response. + * If `false`, it is rendered as an object. */ + rest_total_hits_as_int?: boolean + /** If `true`, the response prefixes aggregation and suggester names with their respective types. */ + typed_keys?: boolean + /** If `true`, returns detailed information about score calculation as part of each hit. + * If you specify both this and the `explain` query parameter, the API uses only the query parameter. */ + explain?: boolean + /** The ID of the search template to use. If no `source` is specified, + * this parameter is required. */ + id?: Id + /** Key-value pairs used to replace Mustache variables in the template. + * The key is the variable name. + * The value is the variable value. */ + params?: Record + /** If `true`, the query execution is profiled. */ + profile?: boolean + /** An inline search template. Supports the same parameters as the search API's + * request body. It also supports Mustache variables. If no `id` is specified, this + * parameter is required. */ + source?: ScriptSource + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, ccs_minimize_roundtrips?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, preference?: never, project_routing?: never, routing?: never, scroll?: never, search_type?: never, rest_total_hits_as_int?: never, typed_keys?: never, explain?: never, id?: never, params?: never, profile?: never, source?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, ccs_minimize_roundtrips?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, preference?: never, project_routing?: never, routing?: never, scroll?: never, search_type?: never, rest_total_hits_as_int?: never, typed_keys?: never, explain?: never, id?: never, params?: never, profile?: never, source?: never } +} + +export interface SearchTemplateResponse { + took: long + timed_out: boolean + _shards: ShardStatistics + hits: SearchHitsMetadata + aggregations?: Record + _clusters?: ClusterStatistics + fields?: Record + max_score?: double + num_reduce_phases?: long + profile?: SearchProfile + pit_id?: Id + _scroll_id?: ScrollId + suggest?: Record[]> + terminated_early?: boolean +} + +export interface TermsEnumRequest extends RequestBase { + /** A comma-separated list of data streams, indices, and index aliases to search. + * Wildcard (`*`) expressions are supported. + * To search all data streams or indices, omit this parameter or use `*` or `_all`. */ + index: Indices + /** The string to match at the start of indexed terms. If not provided, all terms in the field are considered. */ + field: Field + /** The number of matching terms to return. */ + size?: integer + /** The maximum length of time to spend collecting results. + * If the timeout is exceeded the `complete` flag set to `false` in the response and the results may be partial or empty. */ + timeout?: Duration + /** When `true`, the provided search string is matched against index terms without case sensitivity. */ + case_insensitive?: boolean + /** Filter an index shard if the provided query rewrites to `match_none`. */ + index_filter?: QueryDslQueryContainer + /** The string to match at the start of indexed terms. + * If it is not provided, all terms in the field are considered. + * + * > info + * > The prefix string cannot be larger than the largest possible keyword value, which is Lucene's term byte-length limit of 32766. */ + string?: string + /** The string after which terms in the index should be returned. + * It allows for a form of pagination if the last result from one request is passed as the `search_after` parameter for a subsequent request. */ + search_after?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, field?: never, size?: never, timeout?: never, case_insensitive?: never, index_filter?: never, string?: never, search_after?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, field?: never, size?: never, timeout?: never, case_insensitive?: never, index_filter?: never, string?: never, search_after?: never } +} + +export interface TermsEnumResponse { + _shards: ShardStatistics + terms: string[] + /** If `false`, the returned terms set may be incomplete and should be treated as approximate. + * This can occur due to a few reasons, such as a request timeout or a node error. */ + complete: boolean +} + +export interface TermvectorsFieldStatistics { + doc_count: integer + sum_doc_freq: long + sum_ttf: long +} + +export interface TermvectorsFilter { + /** Ignore words which occur in more than this many docs. + * Defaults to unbounded. */ + max_doc_freq?: integer + /** The maximum number of terms that must be returned per field. */ + max_num_terms?: integer + /** Ignore words with more than this frequency in the source doc. + * It defaults to unbounded. */ + max_term_freq?: integer + /** The maximum word length above which words will be ignored. + * Defaults to unbounded. */ + max_word_length?: integer + /** Ignore terms which do not occur in at least this many docs. */ + min_doc_freq?: integer + /** Ignore words with less than this frequency in the source doc. */ + min_term_freq?: integer + /** The minimum word length below which words will be ignored. */ + min_word_length?: integer +} + +export interface TermvectorsRequest extends RequestBase { + /** The name of the index that contains the document. */ + index: IndexName + /** A unique identifier for the document. */ + id?: Id + /** The node or shard the operation should be performed on. + * It is random by default. */ + preference?: string + /** If true, the request is real-time as opposed to near-real-time. */ + realtime?: boolean + /** An artificial document (a document not present in the index) for which you want to retrieve term vectors. */ + doc?: TDocument + /** Filter terms based on their tf-idf scores. + * This could be useful in order find out a good characteristic vector of a document. + * This feature works in a similar manner to the second phase of the More Like This Query. */ + filter?: TermvectorsFilter + /** Override the default per-field analyzer. + * This is useful in order to generate term vectors in any fashion, especially when using artificial documents. + * When providing an analyzer for a field that already stores term vectors, the term vectors will be regenerated. */ + per_field_analyzer?: Record + /** A list of fields to include in the statistics. + * It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. */ + fields?: Field[] + /** If `true`, the response includes: + * + * * The document count (how many documents contain this field). + * * The sum of document frequencies (the sum of document frequencies for all terms in this field). + * * The sum of total term frequencies (the sum of total term frequencies of each term in this field). */ + field_statistics?: boolean + /** If `true`, the response includes term offsets. */ + offsets?: boolean + /** If `true`, the response includes term payloads. */ + payloads?: boolean + /** If `true`, the response includes term positions. */ + positions?: boolean + /** If `true`, the response includes: + * + * * The total term frequency (how often a term occurs in all documents). + * * The document frequency (the number of documents containing the current term). + * + * By default these values are not returned since term statistics can have a serious performance impact. */ + term_statistics?: boolean + /** A custom value that is used to route operations to a specific shard. */ + routing?: Routing + /** If `true`, returns the document version as part of a hit. */ + version?: VersionNumber + /** The version type. */ + version_type?: VersionType + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, id?: never, preference?: never, realtime?: never, doc?: never, filter?: never, per_field_analyzer?: never, fields?: never, field_statistics?: never, offsets?: never, payloads?: never, positions?: never, term_statistics?: never, routing?: never, version?: never, version_type?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, id?: never, preference?: never, realtime?: never, doc?: never, filter?: never, per_field_analyzer?: never, fields?: never, field_statistics?: never, offsets?: never, payloads?: never, positions?: never, term_statistics?: never, routing?: never, version?: never, version_type?: never } +} + +export interface TermvectorsResponse { + found: boolean + _id?: Id + _index: IndexName + term_vectors?: Record + took: long + _version: VersionNumber +} + +export interface TermvectorsTerm { + doc_freq?: integer + score?: double + term_freq: integer + tokens?: TermvectorsToken[] + ttf?: integer +} + +export interface TermvectorsTermVector { + field_statistics?: TermvectorsFieldStatistics + terms: Record +} + +export interface TermvectorsToken { + end_offset?: integer + payload?: string + position: integer + start_offset?: integer +} + +export interface UpdateRequest extends RequestBase { + /** A unique identifier for the document to be updated. */ + id: Id + /** The name of the target index. + * By default, the index is created automatically if it doesn't exist. */ + index: IndexName + /** Only perform the operation if the document has this primary term. */ + if_primary_term?: long + /** Only perform the operation if the document has this sequence number. */ + if_seq_no?: SequenceNumber + /** True or false if to include the document source in the error message in case of parsing errors. */ + include_source_on_error?: boolean + /** The script language. */ + lang?: string + /** If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. + * If 'wait_for', it waits for a refresh to make this operation visible to search. + * If 'false', it does nothing with refreshes. */ + refresh?: Refresh + /** If `true`, the destination must be an index alias. */ + require_alias?: boolean + /** The number of times the operation should be retried when a conflict occurs. */ + retry_on_conflict?: integer + /** A custom value used to route operations to a specific shard. */ + routing?: Routing + /** The period to wait for the following operations: dynamic mapping updates and waiting for active shards. + * Elasticsearch waits for at least the timeout period before failing. + * The actual wait time could be longer, particularly when multiple waits occur. */ + timeout?: Duration + /** The number of copies of each shard that must be active before proceeding with the operation. + * Set to 'all' or any positive integer up to the total number of shards in the index (`number_of_replicas`+1). + * The default value of `1` means it waits for each primary shard to be active. */ + wait_for_active_shards?: WaitForActiveShards + /** The source fields you want to exclude. */ + _source_excludes?: Fields + /** The source fields you want to retrieve. */ + _source_includes?: Fields + /** If `true`, the `result` in the response is set to `noop` (no operation) when there are no changes to the document. */ + detect_noop?: boolean + /** A partial update to an existing document. + * If both `doc` and `script` are specified, `doc` is ignored. */ + doc?: TPartialDocument + /** If `true`, use the contents of 'doc' as the value of 'upsert'. + * NOTE: Using ingest pipelines with `doc_as_upsert` is not supported. */ + doc_as_upsert?: boolean + /** The script to run to update the document. */ + script?: Script | ScriptSource + /** If `true`, run the script whether or not the document exists. */ + scripted_upsert?: boolean + /** If `false`, turn off source retrieval. + * You can also specify a comma-separated list of the fields you want to retrieve. */ + _source?: SearchSourceConfig + /** If the document does not already exist, the contents of 'upsert' are inserted as a new document. + * If the document exists, the 'script' is run. */ + upsert?: TDocument + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, index?: never, if_primary_term?: never, if_seq_no?: never, include_source_on_error?: never, lang?: never, refresh?: never, require_alias?: never, retry_on_conflict?: never, routing?: never, timeout?: never, wait_for_active_shards?: never, _source_excludes?: never, _source_includes?: never, detect_noop?: never, doc?: never, doc_as_upsert?: never, script?: never, scripted_upsert?: never, _source?: never, upsert?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, index?: never, if_primary_term?: never, if_seq_no?: never, include_source_on_error?: never, lang?: never, refresh?: never, require_alias?: never, retry_on_conflict?: never, routing?: never, timeout?: never, wait_for_active_shards?: never, _source_excludes?: never, _source_includes?: never, detect_noop?: never, doc?: never, doc_as_upsert?: never, script?: never, scripted_upsert?: never, _source?: never, upsert?: never } +} + +export type UpdateResponse = UpdateUpdateWriteResponseBase + +export interface UpdateUpdateWriteResponseBase extends WriteResponseBase { + get?: InlineGet +} + +export interface UpdateByQueryRequest extends RequestBase { + /** A comma-separated list of data streams, indices, and aliases to search. + * It supports wildcards (`*`). + * To search all data streams or indices, omit this parameter or use `*` or `_all`. */ + index: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. + * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ + allow_no_indices?: boolean + /** The analyzer to use for the query string. + * This parameter can be used only when the `q` query string parameter is specified. */ + analyzer?: string + /** If `true`, wildcard and prefix queries are analyzed. + * This parameter can be used only when the `q` query string parameter is specified. */ + analyze_wildcard?: boolean + /** The default operator for query string query: `and` or `or`. + * This parameter can be used only when the `q` query string parameter is specified. */ + default_operator?: QueryDslOperator + /** The field to use as default where no field prefix is given in the query string. + * This parameter can be used only when the `q` query string parameter is specified. */ + df?: string + /** The type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * It supports comma-separated values, such as `open,hidden`. */ + expand_wildcards?: ExpandWildcards + /** Skips the specified number of documents. */ + from?: long + /** If `false`, the request returns an error if it targets a missing or closed index. */ + ignore_unavailable?: boolean + /** If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. + * This parameter can be used only when the `q` query string parameter is specified. */ + lenient?: boolean + /** The ID of the pipeline to use to preprocess incoming documents. + * If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. + * If a final pipeline is configured it will always run, regardless of the value of this parameter. */ + pipeline?: string + /** The node or shard the operation should be performed on. + * It is random by default. */ + preference?: string + /** A query in the Lucene query string syntax. */ + q?: string + /** If `true`, Elasticsearch refreshes affected shards to make the operation visible to search after the request completes. + * This is different than the update API's `refresh` parameter, which causes just the shard that received the request to be refreshed. */ + refresh?: boolean + /** If `true`, the request cache is used for this request. + * It defaults to the index-level setting. */ + request_cache?: boolean + /** The throttle for this request in sub-requests per second. */ + requests_per_second?: float + /** A custom value used to route operations to a specific shard. */ + routing?: Routing + /** The period to retain the search context for scrolling. */ + scroll?: Duration + /** The size of the scroll request that powers the operation. */ + scroll_size?: long + /** An explicit timeout for each search request. + * By default, there is no timeout. */ + search_timeout?: Duration + /** The type of the search operation. Available options include `query_then_fetch` and `dfs_query_then_fetch`. */ + search_type?: SearchType + /** The number of slices this task should be divided into. */ + slices?: Slices + /** A comma-separated list of : pairs. */ + sort?: string[] + /** The specific `tag` of the request for logging and statistical purposes. */ + stats?: string[] + /** The maximum number of documents to collect for each shard. + * If a query reaches this limit, Elasticsearch terminates the query early. + * Elasticsearch collects documents before sorting. + * + * IMPORTANT: Use with caution. + * Elasticsearch applies this parameter to each shard handling the request. + * When possible, let Elasticsearch perform early termination automatically. + * Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. */ + terminate_after?: long + /** The period each update request waits for the following operations: dynamic mapping updates, waiting for active shards. + * By default, it is one minute. + * This guarantees Elasticsearch waits for at least the timeout before failing. + * The actual wait time could be longer, particularly when multiple waits occur. */ + timeout?: Duration + /** If `true`, returns the document version as part of a hit. */ + version?: boolean + /** Should the document increment the version number (internal) on hit or not (reindex) */ + version_type?: boolean + /** The number of shard copies that must be active before proceeding with the operation. + * Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + * The `timeout` parameter controls how long each write request waits for unavailable shards to become available. + * Both work exactly the way they work in the bulk API. */ + wait_for_active_shards?: WaitForActiveShards + /** If `true`, the request blocks until the operation is complete. + * If `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task ID that you can use to cancel or get the status of the task. + * Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. */ + wait_for_completion?: boolean + /** The maximum number of documents to update. */ + max_docs?: long + /** The documents to update using the Query DSL. */ + query?: QueryDslQueryContainer + /** The script to run to update the document source or metadata when updating. */ + script?: Script | ScriptSource + /** Slice the request manually using the provided slice ID and total number of slices. */ + slice?: SlicedScroll + /** The preferred behavior when update by query hits version conflicts: `abort` or `proceed`. */ + conflicts?: Conflicts + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, analyzer?: never, analyze_wildcard?: never, default_operator?: never, df?: never, expand_wildcards?: never, from?: never, ignore_unavailable?: never, lenient?: never, pipeline?: never, preference?: never, q?: never, refresh?: never, request_cache?: never, requests_per_second?: never, routing?: never, scroll?: never, scroll_size?: never, search_timeout?: never, search_type?: never, slices?: never, sort?: never, stats?: never, terminate_after?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never, wait_for_completion?: never, max_docs?: never, query?: never, script?: never, slice?: never, conflicts?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, analyzer?: never, analyze_wildcard?: never, default_operator?: never, df?: never, expand_wildcards?: never, from?: never, ignore_unavailable?: never, lenient?: never, pipeline?: never, preference?: never, q?: never, refresh?: never, request_cache?: never, requests_per_second?: never, routing?: never, scroll?: never, scroll_size?: never, search_timeout?: never, search_type?: never, slices?: never, sort?: never, stats?: never, terminate_after?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never, wait_for_completion?: never, max_docs?: never, query?: never, script?: never, slice?: never, conflicts?: never } +} + +export interface UpdateByQueryResponse { + /** The number of scroll responses pulled back by the update by query. */ + batches?: long + /** Array of failures if there were any unrecoverable errors during the process. + * If this is non-empty then the request ended because of those failures. + * Update by query is implemented using batches. + * Any failure causes the entire process to end, but all failures in the current batch are collected into the array. + * You can use the `conflicts` option to prevent reindex from ending when version conflicts occur. */ + failures?: BulkIndexByScrollFailure[] + /** The number of documents that were ignored because the script used for the update by query returned a noop value for `ctx.op`. */ + noops?: long + /** The number of documents that were successfully deleted. */ + deleted?: long + /** The number of requests per second effectively run during the update by query. */ + requests_per_second?: float + /** The number of retries attempted by update by query. + * `bulk` is the number of bulk actions retried. + * `search` is the number of search actions retried. */ + retries?: Retries + task?: TaskId + /** If true, some requests timed out during the update by query. */ + timed_out?: boolean + /** The number of milliseconds from start to end of the whole operation. */ + took?: DurationValue + /** The number of documents that were successfully processed. */ + total?: long + /** The number of documents that were successfully updated. */ + updated?: long + /** The number of version conflicts that the update by query hit. */ + version_conflicts?: long + throttled?: Duration + /** The number of milliseconds the request slept to conform to `requests_per_second`. */ + throttled_millis?: DurationValue + throttled_until?: Duration + /** This field should always be equal to zero in an _update_by_query response. + * It only has meaning when using the task API, where it indicates the next time (in milliseconds since epoch) a throttled request will be run again in order to conform to `requests_per_second`. */ + throttled_until_millis?: DurationValue +} + +export interface UpdateByQueryRethrottleRequest extends RequestBase { + /** The ID for the task. */ + task_id: Id + /** The throttle for this request in sub-requests per second. + * To turn off throttling, set it to `-1`. */ + requests_per_second?: float + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_id?: never, requests_per_second?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_id?: never, requests_per_second?: never } +} + +export interface UpdateByQueryRethrottleResponse { + nodes: Record +} + +export interface UpdateByQueryRethrottleUpdateByQueryRethrottleNode extends SpecUtilsBaseNode { + tasks: Record +} + +export interface SpecUtilsBaseNode { + attributes: Record + host: Host + ip: Ip + name: Name + roles?: NodeRoles + transport_address: TransportAddress +} + +export type SpecUtilsNullValue = null + +export type SpecUtilsPipeSeparatedFlags = T | string + +export type SpecUtilsStringified = T | string + +export type SpecUtilsWithNullValue = T | SpecUtilsNullValue + +export interface AcknowledgedResponseBase { + /** For a successful response, this value is always true. On failure, an exception is returned instead. */ + acknowledged: boolean +} + +export type AggregateName = string + +export interface BulkIndexByScrollFailure { + cause: ErrorCause + id: Id + index: IndexName + status: integer +} + +export interface BulkStats { + total_operations: long + total_time?: Duration + total_time_in_millis: DurationValue + total_size?: ByteSize + total_size_in_bytes: long + avg_time?: Duration + avg_time_in_millis: DurationValue + avg_size?: ByteSize + avg_size_in_bytes: long +} + +export type ByteSize = long | string + +export type Bytes = 'b' | 'kb' | 'mb' | 'gb' | 'tb' | 'pb' + +export interface CartesianPoint { + x: double + y: double +} + +export type CategoryId = string + +export interface ChunkRescorer { + /** The number of chunks per document to evaluate for reranking. */ + size?: integer + /** Chunking settings to apply */ + chunking_settings?: MappingChunkRescorerChunkingSettings +} + +export type ClusterAlias = string + +export interface ClusterDetails { + status: ClusterSearchStatus + indices: string + took?: DurationValue + timed_out: boolean + _shards?: ShardStatistics + failures?: ShardFailure[] +} + +export type ClusterInfoTarget = '_all' | 'http' | 'ingest' | 'thread_pool' | 'script' + +export type ClusterInfoTargets = ClusterInfoTarget | ClusterInfoTarget[] + +export type ClusterSearchStatus = 'running' | 'successful' | 'partial' | 'skipped' | 'failed' + +export interface ClusterStatistics { + skipped: integer + successful: integer + total: integer + running: integer + partial: integer + failed: integer + details?: Record +} + +export type CommonStatsFlag = '_all' | 'store' | 'indexing' | 'get' | 'search' | 'merge' | 'flush' | 'refresh' | 'query_cache' | 'fielddata' | 'docs' | 'warmer' | 'completion' | 'segments' | 'translog' | 'request_cache' | 'recovery' | 'bulk' | 'shard_stats' | 'mappings' | 'dense_vector' | 'sparse_vector' + +export type CommonStatsFlags = CommonStatsFlag | CommonStatsFlag[] + +export interface CompletionStats { + /** Total amount, in bytes, of memory used for completion across all shards assigned to selected nodes. */ + size_in_bytes: long + /** Total amount of memory used for completion across all shards assigned to selected nodes. */ + size?: ByteSize + fields?: Record +} + +export type Conflicts = 'abort' | 'proceed' + +export interface CoordsGeoBounds { + top: double + bottom: double + left: double + right: double +} + +export type DFIIndependenceMeasure = 'standardized' | 'saturated' | 'chisquared' + +export type DFRAfterEffect = 'no' | 'b' | 'l' + +export type DFRBasicModel = 'be' | 'd' | 'g' | 'if' | 'in' | 'ine' | 'p' + +export type DataStreamName = string + +export type DataStreamNames = DataStreamName | DataStreamName[] + +export type DateFormat = string + +export type DateMath = string | Date + +export type DateTime = string | EpochTime | Date + +export type Distance = string + +export type DistanceUnit = 'in' | 'ft' | 'yd' | 'mi' | 'nmi' | 'km' | 'm' | 'cm' | 'mm' + +export interface DocStats { + /** Total number of non-deleted documents across all primary shards assigned to selected nodes. + * This number is based on documents in Lucene segments and may include documents from nested fields. */ + count: long + /** Total number of deleted documents across all primary shards assigned to selected nodes. + * This number is based on documents in Lucene segments. + * Elasticsearch reclaims the disk space of deleted Lucene documents when a segment is merged. */ + deleted?: long + /** Returns the total size in bytes of all documents in this stats. + * This value may be more reliable than store_stats.size_in_bytes in estimating the index size. */ + total_size_in_bytes: long + /** Human readable total_size_in_bytes */ + total_size?: ByteSize +} + +export type Duration = string | -1 | 0 + +export type DurationLarge = string + +export type DurationValue = Unit + +export interface ElasticsearchVersionInfo { + /** The Elasticsearch Git commit's date. */ + build_date: DateTime + /** The build flavor. For example, `default`. */ + build_flavor: string + /** The Elasticsearch Git commit's SHA hash. */ + build_hash: string + /** Indicates whether the Elasticsearch build was a snapshot. */ + build_snapshot: boolean + /** The build type that corresponds to how Elasticsearch was installed. + * For example, `docker`, `rpm`, or `tar`. */ + build_type: string + /** The version number of Elasticsearch's underlying Lucene software. */ + lucene_version: VersionString + /** The minimum index version with which the responding node can read from disk. */ + minimum_index_compatibility_version: VersionString + /** The minimum node version with which the responding node can communicate. + * Also the minimum version from which you can perform a rolling upgrade. */ + minimum_wire_compatibility_version: VersionString + /** The Elasticsearch version number. + * + * ::: IMPORTANT: For Serverless deployments, this static value is always `8.11.0` and is used solely for backward compatibility with legacy clients. + * Serverless environments are versionless and automatically upgraded, so this value can be safely ignored. */ + number: string +} + +export interface ElasticsearchVersionMinInfo { + build_flavor: string + minimum_index_compatibility_version: VersionString + minimum_wire_compatibility_version: VersionString + number: string +} + +export interface EmptyObject { +} + +export type EpochTime = Unit + +export interface ErrorCauseKeys { + /** The type of error */ + type: string + /** A human-readable explanation of the error, in English. */ + reason?: string | null + /** The server stack trace. Present only if the `error_trace=true` parameter was sent with the request. */ + stack_trace?: string + caused_by?: ErrorCause + root_cause?: ErrorCause[] + suppressed?: ErrorCause[] +} +export type ErrorCause = ErrorCauseKeys +& { [property: string]: any } + +export interface ErrorResponseBase { + error: ErrorCause + status: integer +} + +export type ExpandWildcard = 'all' | 'open' | 'closed' | 'hidden' | 'none' + +export type ExpandWildcards = ExpandWildcard | ExpandWildcard[] + +export type Field = string + +export interface FieldMemoryUsage { + memory_size?: ByteSize + memory_size_in_bytes: long +} + +export interface FieldSizeUsage { + size?: ByteSize + size_in_bytes: long +} + +export interface FieldSort { + missing?: AggregationsMissing + mode?: SortMode + nested?: NestedSortValue + order?: SortOrder + unmapped_type?: MappingFieldType + numeric_type?: FieldSortNumericType + format?: string +} + +export type FieldSortNumericType = 'long' | 'double' | 'date' | 'date_nanos' + +export type FieldValue = long | double | string | boolean | null + +export interface FielddataStats { + evictions?: long + memory_size?: ByteSize + memory_size_in_bytes: long + fields?: Record + global_ordinals: GlobalOrdinalsStats +} + +export type Fields = Field | Field[] + +export interface FlushStats { + periodic: long + total: long + total_time?: Duration + total_time_in_millis: DurationValue +} + +export type Fuzziness = string | integer + +export type GeoBounds = CoordsGeoBounds | TopLeftBottomRightGeoBounds | TopRightBottomLeftGeoBounds | WktGeoBounds + +export interface GeoDistanceSortKeys { + mode?: SortMode + distance_type?: GeoDistanceType + ignore_unmapped?: boolean + order?: SortOrder + unit?: DistanceUnit + nested?: NestedSortValue +} +export type GeoDistanceSort = GeoDistanceSortKeys +& { [property: string]: GeoLocation | GeoLocation[] | SortMode | GeoDistanceType | boolean | SortOrder | DistanceUnit | NestedSortValue } + +export type GeoDistanceType = 'arc' | 'plane' + +export type GeoHash = string + +export interface GeoHashLocation { + geohash: GeoHash +} + +export type GeoHashPrecision = integer | string + +export type GeoHexCell = string + +export interface GeoLine { + /** Always `"LineString"` */ + type: string + /** Array of `[lon, lat]` coordinates */ + coordinates: double[][] +} + +export type GeoLocation = LatLonGeoLocation | GeoHashLocation | double[] | string + +export type GeoShape = any + +export type GeoShapeRelation = 'intersects' | 'disjoint' | 'within' | 'contains' + +export type GeoTile = string + +export type GeoTilePrecision = integer + +export interface GetStats { + current: long + exists_time?: Duration + exists_time_in_millis: DurationValue + exists_total: long + missing_time?: Duration + missing_time_in_millis: DurationValue + missing_total: long + time?: Duration + time_in_millis: DurationValue + total: long +} + +export interface GlobalOrdinalFieldStats { + build_time_in_millis: UnitMillis + build_time?: string + shard_max_value_count: long +} + +export interface GlobalOrdinalsStats { + build_time_in_millis: UnitMillis + build_time?: string + fields?: Record +} + +export type GrokPattern = string + +export type HealthStatus = 'green' | 'GREEN' | 'yellow' | 'YELLOW' | 'red' | 'RED' | 'unknown' | 'unavailable' + +export type Host = string + +export type HttpHeaders = Record + +export type IBDistribution = 'll' | 'spl' + +export type IBLambda = 'df' | 'ttf' + +export type Id = string + +export type Ids = Id | Id[] + +export type IndexAlias = string + +export type IndexName = string + +export type IndexPattern = string + +export type IndexPatterns = IndexPattern[] + +export interface IndexingStats { + index_current: long + delete_current: long + delete_time?: Duration + delete_time_in_millis: DurationValue + delete_total: long + is_throttled: boolean + noop_update_total: long + throttle_time?: Duration + throttle_time_in_millis: DurationValue + index_time?: Duration + index_time_in_millis: DurationValue + index_total: long + index_failed: long + types?: Record + write_load?: double + recent_write_load?: double + peak_write_load?: double +} + +export type Indices = IndexName | IndexName[] + +export interface IndicesOptions { + /** If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only + * missing or closed indices. This behavior applies even if the request targets other open indices. For example, + * a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ + allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument + * determines whether wildcard expressions match hidden data streams. Supports comma-separated values, + * such as `open,hidden`. */ + expand_wildcards?: ExpandWildcards + /** If true, missing or closed indices are not included in the response. */ + ignore_unavailable?: boolean + /** If true, concrete, expanded or aliased indices are ignored when frozen. */ + ignore_throttled?: boolean +} + +export interface IndicesResponseBase extends AcknowledgedResponseBase { + _shards?: ShardStatistics +} + +export interface InlineGetKeys { + fields?: Record + found: boolean + _seq_no?: SequenceNumber + _primary_term?: long + _routing?: Routing + _source?: TDocument +} +export type InlineGet = InlineGetKeys +& { [property: string]: any } + +export interface InnerRetriever { + retriever: RetrieverContainer + weight: float + normalizer: ScoreNormalizer +} + +export type Ip = string + +export interface KnnQuery extends QueryDslQueryBase { + /** The name of the vector field to search against */ + field: Field + /** The query vector */ + query_vector?: QueryVector + /** The query vector builder. You must provide a query_vector_builder or query_vector, but not both. */ + query_vector_builder?: QueryVectorBuilder + /** The number of nearest neighbor candidates to consider per shard */ + num_candidates?: integer + /** The percentage of vectors to explore per shard while doing knn search with bbq_disk */ + visit_percentage?: float + /** The final number of nearest neighbors to return as top hits */ + k?: integer + /** Filters for the kNN search query */ + filter?: QueryDslQueryContainer | QueryDslQueryContainer[] + /** The minimum similarity for a vector to be considered a match */ + similarity?: float + /** Apply oversampling and rescoring to quantized vectors */ + rescore_vector?: RescoreVector +} + +export interface KnnRetriever extends RetrieverBase { + /** The name of the vector field to search against. */ + field: string + /** Query vector. Must have the same number of dimensions as the vector field you are searching against. You must provide a query_vector_builder or query_vector, but not both. */ + query_vector?: QueryVector + /** Defines a model to build a query vector. */ + query_vector_builder?: QueryVectorBuilder + /** Number of nearest neighbors to return as top hits. */ + k: integer + /** Number of nearest neighbor candidates to consider per shard. */ + num_candidates: integer + /** The percentage of vectors to explore per shard while doing knn search with bbq_disk */ + visit_percentage?: float + /** The minimum similarity required for a document to be considered a match. */ + similarity?: float + /** Apply oversampling and rescoring to quantized vectors */ + rescore_vector?: RescoreVector +} + +export interface KnnSearch { + /** The name of the vector field to search against */ + field: Field + /** The query vector */ + query_vector?: QueryVector + /** The query vector builder. You must provide a query_vector_builder or query_vector, but not both. */ + query_vector_builder?: QueryVectorBuilder + /** The final number of nearest neighbors to return as top hits */ + k?: integer + /** The number of nearest neighbor candidates to consider per shard */ + num_candidates?: integer + /** The percentage of vectors to explore per shard while doing knn search with bbq_disk */ + visit_percentage?: float + /** Boost value to apply to kNN scores */ + boost?: float + /** Filters for the kNN search query */ + filter?: QueryDslQueryContainer | QueryDslQueryContainer[] + /** The minimum similarity for a vector to be considered a match */ + similarity?: float + /** If defined, each search hit will contain inner hits. */ + inner_hits?: SearchInnerHits + /** Apply oversampling and rescoring to quantized vectors */ + rescore_vector?: RescoreVector +} + +export interface LatLonGeoLocation { + /** Latitude */ + lat: double + /** Longitude */ + lon: double +} + +export type Level = 'cluster' | 'indices' | 'shards' + +export type LifecycleOperationMode = 'RUNNING' | 'STOPPING' | 'STOPPED' + +export interface LinearRetriever extends RetrieverBase { + /** Inner retrievers. */ + retrievers?: InnerRetriever[] + rank_window_size?: integer + query?: string + fields?: string[] + normalizer?: ScoreNormalizer +} + +export type MapboxVectorTiles = ArrayBuffer + +export interface MergesStats { + current: long + current_docs: long + current_size?: string + current_size_in_bytes: long + total: long + total_auto_throttle?: string + total_auto_throttle_in_bytes: long + total_docs: long + total_size?: string + total_size_in_bytes: long + total_stopped_time?: Duration + total_stopped_time_in_millis: DurationValue + total_throttled_time?: Duration + total_throttled_time_in_millis: DurationValue + total_time?: Duration + total_time_in_millis: DurationValue +} + +export type Metadata = Record + +export type MinimumShouldMatch = integer | string + +export type MultiTermQueryRewrite = string + +export type Name = string + +export type Names = Name | Name[] + +export type Namespace = string + +export interface NestedSortValue { + filter?: QueryDslQueryContainer + max_children?: integer + nested?: NestedSortValue + path: Field +} + +export interface NodeAttributes { + /** Lists node attributes. */ + attributes: Record + /** The ephemeral ID of the node. */ + ephemeral_id: Id + /** The unique identifier of the node. */ + id?: NodeId + /** The unique identifier of the node. */ + name: NodeName + /** The host and port where transport HTTP connections are accepted. */ + transport_address: TransportAddress +} + +export type NodeId = string + +export type NodeIds = NodeId | NodeId[] + +export type NodeName = string + +export type NodeRole = 'master' | 'data' | 'data_cold' | 'data_content' | 'data_frozen' | 'data_hot' | 'data_warm' | 'client' | 'ingest' | 'ml' | 'voting_only' | 'transform' | 'remote_cluster_client' | 'coordinating_only' + +export type NodeRoles = NodeRole[] + +export interface NodeShard { + state: IndicesStatsShardRoutingState + primary: boolean + node?: NodeName + shard: integer + index: IndexName + allocation_id?: Record + recovery_source?: Record + unassigned_info?: ClusterAllocationExplainUnassignedInformation + relocating_node?: NodeId | null + relocation_failure_info?: RelocationFailureInfo +} + +export interface NodeStatistics { + failures?: ErrorCause[] + /** Total number of nodes selected by the request. */ + total: integer + /** Number of nodes that responded successfully to the request. */ + successful: integer + /** Number of nodes that rejected the request or failed to respond. If this value is not 0, a reason for the rejection or failure is included in the response. */ + failed: integer +} + +export type NodeStatsLevel = 'node' | 'indices' | 'shards' + +export type Normalization = 'no' | 'h1' | 'h2' | 'h3' | 'z' + +export type OpType = 'index' | 'create' + +export type Password = string + +export type Percentage = string | float + +export interface PinnedRetriever extends RetrieverBase { + /** Inner retriever. */ + retriever: RetrieverContainer + ids?: string[] + docs?: SpecifiedDocument[] + rank_window_size?: integer +} + +export type PipelineName = string + +export interface PluginStats { + classname: string + description: string + elasticsearch_version: VersionString + extended_plugins: string[] + has_native_controller: boolean + java_version: VersionString + name: Name + version: VersionString + licensed: boolean +} + +export type ProjectRouting = string + +export type PropertyName = string + +export interface QueryCacheStats { + /** Total number of entries added to the query cache across all shards assigned to selected nodes. + * This number includes current and evicted entries. */ + cache_count: long + /** Total number of entries currently in the query cache across all shards assigned to selected nodes. */ + cache_size: long + /** Total number of query cache evictions across all shards assigned to selected nodes. */ + evictions: long + /** Total count of query cache hits across all shards assigned to selected nodes. */ + hit_count: long + /** Total amount of memory used for the query cache across all shards assigned to selected nodes. */ + memory_size?: ByteSize + /** Total amount, in bytes, of memory used for the query cache across all shards assigned to selected nodes. */ + memory_size_in_bytes: long + /** Total count of query cache misses across all shards assigned to selected nodes. */ + miss_count: long + /** Total count of hits and misses in the query cache across all shards assigned to selected nodes. */ + total_count: long +} + +export type QueryVector = float[] + +export interface QueryVectorBuilder { + text_embedding?: TextEmbedding +} + +export interface RRFRetriever extends RetrieverBase { + /** A list of child retrievers to specify which sets of returned top documents will have the RRF formula applied to them. Each retriever can optionally include a weight parameter. */ + retrievers: RRFRetrieverEntry[] + /** This value determines how much influence documents in individual result sets per query have over the final ranked result set. */ + rank_constant?: integer + /** This value determines the size of the individual result sets per query. */ + rank_window_size?: integer + query?: string + fields?: string[] +} + +export interface RRFRetrieverComponent { + /** The nested retriever configuration. */ + retriever: RetrieverContainer + /** Weight multiplier for this retriever's contribution to the RRF score. Higher values increase influence. Defaults to 1.0 if not specified. Must be non-negative. */ + weight?: float +} + +export type RRFRetrieverEntry = RetrieverContainer | RRFRetrieverComponent + +export interface RankBase { +} + +export interface RankContainer { + /** The reciprocal rank fusion parameters */ + rrf?: RrfRank +} + +export interface RecoveryStats { + current_as_source: long + current_as_target: long + throttle_time?: Duration + throttle_time_in_millis: DurationValue +} + +export type Refresh = boolean | 'true' | 'false' | 'wait_for' + +export interface RefreshStats { + external_total: long + external_total_time_in_millis: DurationValue + listeners: long + total: long + total_time?: Duration + total_time_in_millis: DurationValue +} + +export type RelationName = string + +export interface RelocationFailureInfo { + failed_attempts: integer +} + +export interface RequestBase extends SpecUtilsCommonQueryParameters { +} + +export interface RequestCacheStats { + evictions: long + hit_count: long + memory_size?: string + memory_size_in_bytes: long + miss_count: long +} + +export interface RescoreVector { + /** Applies the specified oversample factor to k on the approximate kNN search */ + oversample: float +} + +export interface RescorerRetriever extends RetrieverBase { + /** Inner retriever. */ + retriever: RetrieverContainer + rescore: SearchRescore | SearchRescore[] +} + +export type Result = 'created' | 'updated' | 'deleted' | 'not_found' | 'noop' + +export interface Retries { + /** The number of bulk actions retried. */ + bulk: long + /** The number of search actions retried. */ + search: long +} + +export interface RetrieverBase { + /** Query to filter the documents that can match. */ + filter?: QueryDslQueryContainer | QueryDslQueryContainer[] + /** Minimum _score for matching documents. Documents with a lower _score are not included in the top documents. */ + min_score?: float + /** Retriever name. */ + _name?: string +} + +export interface RetrieverContainer { + /** A retriever that replaces the functionality of a traditional query. */ + standard?: StandardRetriever + /** A retriever that replaces the functionality of a knn search. */ + knn?: KnnRetriever + /** A retriever that produces top documents from reciprocal rank fusion (RRF). */ + rrf?: RRFRetriever + /** A retriever that reranks the top documents based on a reranking model using the InferenceAPI */ + text_similarity_reranker?: TextSimilarityReranker + /** A retriever that replaces the functionality of a rule query. */ + rule?: RuleRetriever + /** A retriever that re-scores only the results produced by its child retriever. */ + rescorer?: RescorerRetriever + /** A retriever that supports the combination of different retrievers through a weighted linear combination. */ + linear?: LinearRetriever + /** A pinned retriever applies pinned documents to the underlying retriever. + * This retriever will rewrite to a PinnedQueryBuilder. */ + pinned?: PinnedRetriever +} + +export type Routing = string + +export interface RrfRank { + /** How much influence documents in individual result sets per query have over the final ranked result set */ + rank_constant?: long + /** Size of the individual result sets per query */ + rank_window_size?: long +} + +export interface RuleRetriever extends RetrieverBase { + /** The ruleset IDs containing the rules this retriever is evaluating against. */ + ruleset_ids: Id | Id[] + /** The match criteria that will determine if a rule in the provided rulesets should be applied. */ + match_criteria: any + /** The retriever whose results rules should be applied to. */ + retriever: RetrieverContainer + /** This value determines the size of the individual result set. */ + rank_window_size?: integer +} + +export type ScalarValue = long | double | string | boolean | null + +export type ScoreNormalizer = 'none' | 'minmax' | 'l2_norm' + +export interface ScoreSort { + order?: SortOrder +} + +export interface Script { + /** The script source. */ + source?: ScriptSource + /** The `id` for a stored script. */ + id?: Id + /** Specifies any named parameters that are passed into the script as variables. + * Use parameters instead of hard-coded values to decrease compile time. */ + params?: Record + /** Specifies the language the script is written in. */ + lang?: ScriptLanguage + options?: Record +} + +export interface ScriptField { + script: Script | ScriptSource + ignore_failure?: boolean +} + +export type ScriptLanguage = 'painless' | 'expression' | 'mustache' | 'java' | string + +export interface ScriptSort { + order?: SortOrder + script: Script | ScriptSource + type?: ScriptSortType + mode?: SortMode + nested?: NestedSortValue +} + +export type ScriptSortType = 'string' | 'number' | 'version' + +export type ScriptSource = string | SearchSearchRequestBody + +export interface ScriptTransform { + lang?: string + params?: Record + source?: ScriptSource + id?: string +} + +export type ScrollId = string + +export type ScrollIds = ScrollId | ScrollId[] + +export interface SearchStats { + fetch_current: long + fetch_time?: Duration + fetch_time_in_millis: DurationValue + fetch_total: long + open_contexts?: long + query_current: long + query_time?: Duration + query_time_in_millis: DurationValue + query_total: long + scroll_current: long + scroll_time?: Duration + scroll_time_in_millis: DurationValue + scroll_total: long + suggest_current: long + suggest_time?: Duration + suggest_time_in_millis: DurationValue + suggest_total: long + recent_search_load?: double + groups?: Record +} + +export interface SearchTransform { + request: WatcherSearchInputRequestDefinition + timeout: Duration +} + +export type SearchType = 'query_then_fetch' | 'dfs_query_then_fetch' + +export interface SegmentsStats { + /** Total number of segments across all shards assigned to selected nodes. */ + count: integer + /** Total amount of memory used for doc values across all shards assigned to selected nodes. */ + doc_values_memory?: ByteSize + /** Total amount, in bytes, of memory used for doc values across all shards assigned to selected nodes. */ + doc_values_memory_in_bytes: long + /** This object is not populated by the cluster stats API. + * To get information on segment files, use the node stats API. */ + file_sizes: Record + /** Total amount of memory used by fixed bit sets across all shards assigned to selected nodes. + * Fixed bit sets are used for nested object field types and type filters for join fields. */ + fixed_bit_set?: ByteSize + /** Total amount of memory, in bytes, used by fixed bit sets across all shards assigned to selected nodes. */ + fixed_bit_set_memory_in_bytes: long + /** Total amount of memory used by all index writers across all shards assigned to selected nodes. */ + index_writer_memory?: ByteSize + /** Total amount, in bytes, of memory used by all index writers across all shards assigned to selected nodes. */ + index_writer_memory_in_bytes: long + /** Unix timestamp, in milliseconds, of the most recently retried indexing request. */ + max_unsafe_auto_id_timestamp: long + /** Total amount of memory used for segments across all shards assigned to selected nodes. */ + memory?: ByteSize + /** Total amount, in bytes, of memory used for segments across all shards assigned to selected nodes. */ + memory_in_bytes: long + /** Total amount of memory used for normalization factors across all shards assigned to selected nodes. */ + norms_memory?: ByteSize + /** Total amount, in bytes, of memory used for normalization factors across all shards assigned to selected nodes. */ + norms_memory_in_bytes: long + /** Total amount of memory used for points across all shards assigned to selected nodes. */ + points_memory?: ByteSize + /** Total amount, in bytes, of memory used for points across all shards assigned to selected nodes. */ + points_memory_in_bytes: long + /** Total amount, in bytes, of memory used for stored fields across all shards assigned to selected nodes. */ + stored_fields_memory_in_bytes: long + /** Total amount of memory used for stored fields across all shards assigned to selected nodes. */ + stored_fields_memory?: ByteSize + /** Total amount, in bytes, of memory used for terms across all shards assigned to selected nodes. */ + terms_memory_in_bytes: long + /** Total amount of memory used for terms across all shards assigned to selected nodes. */ + terms_memory?: ByteSize + /** Total amount of memory used for term vectors across all shards assigned to selected nodes. */ + term_vectors_memory?: ByteSize + /** Total amount, in bytes, of memory used for term vectors across all shards assigned to selected nodes. */ + term_vectors_memory_in_bytes: long + /** Total amount of memory used by all version maps across all shards assigned to selected nodes. */ + version_map_memory?: ByteSize + /** Total amount, in bytes, of memory used by all version maps across all shards assigned to selected nodes. */ + version_map_memory_in_bytes: long +} + +export type SequenceNumber = long + +export type Service = string + +export interface ShardFailure { + index?: IndexName + /** @alias index */ + _index?: IndexName + node?: string + /** @alias node */ + _node?: string + reason: ErrorCause + shard?: integer + /** @alias shard */ + _shard?: integer + status?: string + primary?: boolean +} + +export interface ShardStatistics { + /** The number of shards the operation or search attempted to run on but failed. */ + failed: uint + /** The number of shards the operation or search succeeded on. */ + successful: uint + /** The number of shards the operation or search will run on overall. */ + total: uint + failures?: ShardFailure[] + skipped?: uint +} + +export interface ShardsOperationResponseBase { + _shards?: ShardStatistics +} + +export interface SlicedScroll { + field?: Field + id: Id + max: integer +} + +export type Slices = integer | SlicesCalculation + +export type SlicesCalculation = 'auto' + +export type Sort = SortCombinations | SortCombinations[] + +export type SortCombinations = Field | SortOptions + +export type SortMode = 'min' | 'max' | 'sum' | 'avg' | 'median' + +export interface SortOptionsKeys { + _score?: ScoreSort + _doc?: ScoreSort + _geo_distance?: GeoDistanceSort + _script?: ScriptSort +} +export type SortOptions = SortOptionsKeys +& { [property: string]: FieldSort | SortOrder | ScoreSort | GeoDistanceSort | ScriptSort } + +export type SortOrder = 'asc' | 'desc' + +export type SortResults = FieldValue[] + +export interface SpecifiedDocument { + index?: IndexName + id: Id +} + +export interface StandardRetriever extends RetrieverBase { + /** Defines a query to retrieve a set of top documents. */ + query?: QueryDslQueryContainer + /** Defines a search after object parameter used for pagination. */ + search_after?: SortResults + /** Maximum number of documents to collect for each shard. */ + terminate_after?: integer + /** A sort object that that specifies the order of matching documents. */ + sort?: Sort + /** Collapses the top documents by a specified key into a single top document per key. */ + collapse?: SearchFieldCollapse +} + +export interface StoreStats { + /** Total size of all shards assigned to selected nodes. */ + size?: ByteSize + /** Total size, in bytes, of all shards assigned to selected nodes. */ + size_in_bytes: long + /** A prediction of how much larger the shard stores will eventually grow due to ongoing peer recoveries, restoring snapshots, and similar activities. */ + reserved?: ByteSize + /** A prediction, in bytes, of how much larger the shard stores will eventually grow due to ongoing peer recoveries, restoring snapshots, and similar activities. */ + reserved_in_bytes: long + /** Total data set size of all shards assigned to selected nodes. + * This includes the size of shards not stored fully on the nodes, such as the cache for partially mounted indices. */ + total_data_set_size?: ByteSize + /** Total data set size, in bytes, of all shards assigned to selected nodes. + * This includes the size of shards not stored fully on the nodes, such as the cache for partially mounted indices. */ + total_data_set_size_in_bytes?: long +} + +export interface StoredScript { + /** The language the script is written in. + * For search templates, use `mustache`. */ + lang: ScriptLanguage + options?: Record + /** The script source. + * For search templates, an object containing the search template. */ + source: ScriptSource +} + +export type StreamResult = ArrayBuffer + +export type SuggestMode = 'missing' | 'popular' | 'always' + +export type SuggestionName = string + +export interface TaskFailure { + task_id: long + node_id: NodeId + status: string + reason: ErrorCause +} + +export type TaskId = string + +export interface TextEmbedding { + /** Model ID is required for all dense_vector fields but + * may be inferred for semantic_text fields */ + model_id?: string + model_text: string +} + +export interface TextSimilarityReranker extends RetrieverBase { + /** The nested retriever which will produce the first-level results, that will later be used for reranking. */ + retriever: RetrieverContainer + /** This value determines how many documents we will consider from the nested retriever. */ + rank_window_size?: integer + /** Unique identifier of the inference endpoint created using the inference API. */ + inference_id?: string + /** The text snippet used as the basis for similarity comparison. */ + inference_text: string + /** The document field to be used for text similarity comparisons. This field should contain the text that will be evaluated against the inference_text. */ + field: string + /** Whether to rescore on only the best matching chunks. + * @beta */ + chunk_rescorer?: ChunkRescorer +} + +export type ThreadType = 'cpu' | 'wait' | 'block' | 'gpu' | 'mem' + +export type TimeOfDay = string + +export type TimeUnit = 'nanos' | 'micros' | 'ms' | 's' | 'm' | 'h' | 'd' + +export type TimeZone = string + +export interface TokenPruningConfig { + /** Tokens whose frequency is more than this threshold times the average frequency of all tokens in the specified field are considered outliers and pruned. */ + tokens_freq_ratio_threshold?: integer + /** Tokens whose weight is less than this threshold are considered nonsignificant and pruned. */ + tokens_weight_threshold?: float + /** Whether to only score pruned tokens, vs only scoring kept tokens. */ + only_score_pruned_tokens?: boolean +} + +export interface TopLeftBottomRightGeoBounds { + top_left: GeoLocation + bottom_right: GeoLocation +} + +export interface TopRightBottomLeftGeoBounds { + top_right: GeoLocation + bottom_left: GeoLocation +} + +export interface TransformContainer { + chain?: TransformContainer[] + script?: ScriptTransform + search?: SearchTransform +} + +export interface TranslogStats { + earliest_last_modified_age: long + operations: long + size?: string + size_in_bytes: long + uncommitted_operations: integer + uncommitted_size?: string + uncommitted_size_in_bytes: long +} + +export type TransportAddress = string + +export type UnitFloatMillis = double + +export type UnitMillis = long + +export type UnitNanos = long + +export type UnitSeconds = long + +export type Username = string + +export type Uuid = string + +export type VersionNumber = long + +export type VersionString = string + +export type VersionType = 'internal' | 'external' | 'external_gte' + +export type WaitForActiveShardOptions = 'all' | 'index-setting' + +export type WaitForActiveShards = integer | WaitForActiveShardOptions + +export type WaitForEvents = 'immediate' | 'urgent' | 'high' | 'normal' | 'low' | 'languid' + +export interface WarmerStats { + current: long + total: long + total_time?: Duration + total_time_in_millis: DurationValue +} + +export interface WktGeoBounds { + wkt: string +} + +export interface WriteResponseBase { + /** The unique identifier for the added document. */ + _id: Id + /** The name of the index the document was added to. */ + _index: IndexName + /** The primary term assigned to the document for the indexing operation. */ + _primary_term?: long + /** The result of the indexing operation: `created` or `updated`. */ + result: Result + /** The sequence number assigned to the document for the indexing operation. + * Sequence numbers are used to ensure an older version of a document doesn't overwrite a newer version. */ + _seq_no?: SequenceNumber + /** Information about the replication process of the operation. */ + _shards: ShardStatistics + /** The document version, which is incremented each time the document is updated. */ + _version: VersionNumber + /** The role of the failure store in this document response */ + failure_store?: BulkFailureStoreStatus + forced_refresh?: boolean +} + +export type byte = number + +export type double = number + +export type float = number + +export type integer = number + +export type long = number + +export type short = number + +export type uint = number + +export type ulong = number + +export interface AggregationsAbstractChangePoint { + p_value: double + change_point: integer +} + +export interface AggregationsAdjacencyMatrixAggregate extends AggregationsMultiBucketAggregateBase { +} + +export interface AggregationsAdjacencyMatrixAggregation extends AggregationsBucketAggregationBase { + /** Filters used to create buckets. + * At least one filter is required. */ + filters?: Record + /** Separator used to concatenate filter names. Defaults to &. */ + separator?: string +} + +export interface AggregationsAdjacencyMatrixBucketKeys extends AggregationsMultiBucketBase { + key: string +} +export type AggregationsAdjacencyMatrixBucket = AggregationsAdjacencyMatrixBucketKeys +& { [property: string]: AggregationsAggregate | string | long } + +export type AggregationsAggregate = AggregationsCardinalityAggregate | AggregationsHdrPercentilesAggregate | AggregationsHdrPercentileRanksAggregate | AggregationsTDigestPercentilesAggregate | AggregationsTDigestPercentileRanksAggregate | AggregationsPercentilesBucketAggregate | AggregationsMedianAbsoluteDeviationAggregate | AggregationsMinAggregate | AggregationsMaxAggregate | AggregationsSumAggregate | AggregationsAvgAggregate | AggregationsWeightedAvgAggregate | AggregationsValueCountAggregate | AggregationsSimpleValueAggregate | AggregationsDerivativeAggregate | AggregationsBucketMetricValueAggregate | AggregationsChangePointAggregate | AggregationsStatsAggregate | AggregationsStatsBucketAggregate | AggregationsExtendedStatsAggregate | AggregationsExtendedStatsBucketAggregate | AggregationsCartesianBoundsAggregate | AggregationsCartesianCentroidAggregate | AggregationsGeoBoundsAggregate | AggregationsGeoCentroidAggregate | AggregationsHistogramAggregate | AggregationsDateHistogramAggregate | AggregationsAutoDateHistogramAggregate | AggregationsVariableWidthHistogramAggregate | AggregationsStringTermsAggregate | AggregationsLongTermsAggregate | AggregationsDoubleTermsAggregate | AggregationsUnmappedTermsAggregate | AggregationsLongRareTermsAggregate | AggregationsStringRareTermsAggregate | AggregationsUnmappedRareTermsAggregate | AggregationsMultiTermsAggregate | AggregationsMissingAggregate | AggregationsNestedAggregate | AggregationsReverseNestedAggregate | AggregationsGlobalAggregate | AggregationsFilterAggregate | AggregationsChildrenAggregate | AggregationsParentAggregate | AggregationsSamplerAggregate | AggregationsUnmappedSamplerAggregate | AggregationsGeoHashGridAggregate | AggregationsGeoTileGridAggregate | AggregationsGeoHexGridAggregate | AggregationsRangeAggregate | AggregationsDateRangeAggregate | AggregationsGeoDistanceAggregate | AggregationsIpRangeAggregate | AggregationsIpPrefixAggregate | AggregationsFiltersAggregate | AggregationsAdjacencyMatrixAggregate | AggregationsSignificantLongTermsAggregate | AggregationsSignificantStringTermsAggregate | AggregationsUnmappedSignificantTermsAggregate | AggregationsCompositeAggregate | AggregationsFrequentItemSetsAggregate | AggregationsTimeSeriesAggregate | AggregationsScriptedMetricAggregate | AggregationsTopHitsAggregate | AggregationsInferenceAggregate | AggregationsStringStatsAggregate | AggregationsBoxPlotAggregate | AggregationsTopMetricsAggregate | AggregationsTTestAggregate | AggregationsRateAggregate | AggregationsCumulativeCardinalityAggregate | AggregationsMatrixStatsAggregate | AggregationsGeoLineAggregate + +export interface AggregationsAggregateBase { + meta?: Metadata +} + +export type AggregationsAggregateOrder = Partial> | Partial>[] + +export interface AggregationsAggregation { +} + +export interface AggregationsAggregationContainer { + /** Sub-aggregations for this aggregation. + * Only applies to bucket aggregations. */ + aggregations?: Record + /** Sub-aggregations for this aggregation. + * Only applies to bucket aggregations. + * @alias aggregations */ + aggs?: Record + meta?: Metadata + /** A bucket aggregation returning a form of adjacency matrix. + * The request provides a collection of named filter expressions, similar to the `filters` aggregation. + * Each bucket in the response represents a non-empty cell in the matrix of intersecting filters. */ + adjacency_matrix?: AggregationsAdjacencyMatrixAggregation + /** A multi-bucket aggregation similar to the date histogram, except instead of providing an interval to use as the width of each bucket, a target number of buckets is provided. */ + auto_date_histogram?: AggregationsAutoDateHistogramAggregation + /** A single-value metrics aggregation that computes the average of numeric values that are extracted from the aggregated documents. */ + avg?: AggregationsAverageAggregation + /** A sibling pipeline aggregation which calculates the mean value of a specified metric in a sibling aggregation. + * The specified metric must be numeric and the sibling aggregation must be a multi-bucket aggregation. */ + avg_bucket?: AggregationsAverageBucketAggregation + /** A metrics aggregation that computes a box plot of numeric values extracted from the aggregated documents. */ + boxplot?: AggregationsBoxplotAggregation + /** A parent pipeline aggregation which runs a script which can perform per bucket computations on metrics in the parent multi-bucket aggregation. */ + bucket_script?: AggregationsBucketScriptAggregation + /** A parent pipeline aggregation which runs a script to determine whether the current bucket will be retained in the parent multi-bucket aggregation. */ + bucket_selector?: AggregationsBucketSelectorAggregation + /** A parent pipeline aggregation which sorts the buckets of its parent multi-bucket aggregation. */ + bucket_sort?: AggregationsBucketSortAggregation + /** A sibling pipeline aggregation which runs a two sample Kolmogorov–Smirnov test ("K-S test") against a provided distribution and the distribution implied by the documents counts in the configured sibling aggregation. + * @experimental */ + bucket_count_ks_test?: AggregationsBucketKsAggregation + /** A sibling pipeline aggregation which runs a correlation function on the configured sibling multi-bucket aggregation. + * @experimental */ + bucket_correlation?: AggregationsBucketCorrelationAggregation + /** A single-value metrics aggregation that calculates an approximate count of distinct values. */ + cardinality?: AggregationsCardinalityAggregation + /** A metric aggregation that computes the spatial bounding box containing all values for a Point or Shape field. */ + cartesian_bounds?: AggregationsCartesianBoundsAggregation + /** A metric aggregation that computes the weighted centroid from all coordinate values for point and shape fields. */ + cartesian_centroid?: AggregationsCartesianCentroidAggregation + /** A multi-bucket aggregation that groups semi-structured text into buckets. + * @experimental */ + categorize_text?: AggregationsCategorizeTextAggregation + /** A sibling pipeline that detects, spikes, dips, and change points in a metric. + * Given a distribution of values provided by the sibling multi-bucket aggregation, + * this aggregation indicates the bucket of any spike or dip and/or the bucket at which + * the largest change in the distribution of values, if they are statistically significant. + * There must be at least 22 bucketed values. Fewer than 1,000 is preferred. */ + change_point?: AggregationsChangePointAggregation + /** A single bucket aggregation that selects child documents that have the specified type, as defined in a `join` field. */ + children?: AggregationsChildrenAggregation + /** A multi-bucket aggregation that creates composite buckets from different sources. + * Unlike the other multi-bucket aggregations, you can use the `composite` aggregation to paginate *all* buckets from a multi-level aggregation efficiently. */ + composite?: AggregationsCompositeAggregation + /** A parent pipeline aggregation which calculates the cumulative cardinality in a parent `histogram` or `date_histogram` aggregation. */ + cumulative_cardinality?: AggregationsCumulativeCardinalityAggregation + /** A parent pipeline aggregation which calculates the cumulative sum of a specified metric in a parent `histogram` or `date_histogram` aggregation. */ + cumulative_sum?: AggregationsCumulativeSumAggregation + /** A multi-bucket values source based aggregation that can be applied on date values or date range values extracted from the documents. + * It dynamically builds fixed size (interval) buckets over the values. */ + date_histogram?: AggregationsDateHistogramAggregation + /** A multi-bucket value source based aggregation that enables the user to define a set of date ranges - each representing a bucket. */ + date_range?: AggregationsDateRangeAggregation + /** A parent pipeline aggregation which calculates the derivative of a specified metric in a parent `histogram` or `date_histogram` aggregation. */ + derivative?: AggregationsDerivativeAggregation + /** A filtering aggregation used to limit any sub aggregations' processing to a sample of the top-scoring documents. + * Similar to the `sampler` aggregation, but adds the ability to limit the number of matches that share a common value. */ + diversified_sampler?: AggregationsDiversifiedSamplerAggregation + /** A multi-value metrics aggregation that computes stats over numeric values extracted from the aggregated documents. */ + extended_stats?: AggregationsExtendedStatsAggregation + /** A sibling pipeline aggregation which calculates a variety of stats across all bucket of a specified metric in a sibling aggregation. */ + extended_stats_bucket?: AggregationsExtendedStatsBucketAggregation + /** A bucket aggregation which finds frequent item sets, a form of association rules mining that identifies items that often occur together. */ + frequent_item_sets?: AggregationsFrequentItemSetsAggregation + /** A bucket aggregation which finds frequent item sets, a form of association rules mining that identifies items that often occur together. + * @alias frequent_item_sets */ + frequent_items?: AggregationsFrequentItemSetsAggregation + /** A single bucket aggregation that narrows the set of documents to those that match a query. */ + filter?: QueryDslQueryContainer + /** A multi-bucket aggregation where each bucket contains the documents that match a query. */ + filters?: AggregationsFiltersAggregation + /** A metric aggregation that computes the geographic bounding box containing all values for a Geopoint or Geoshape field. */ + geo_bounds?: AggregationsGeoBoundsAggregation + /** A metric aggregation that computes the weighted centroid from all coordinate values for geo fields. */ + geo_centroid?: AggregationsGeoCentroidAggregation + /** A multi-bucket aggregation that works on `geo_point` fields. + * Evaluates the distance of each document value from an origin point and determines the buckets it belongs to, based on ranges defined in the request. */ + geo_distance?: AggregationsGeoDistanceAggregation + /** A multi-bucket aggregation that groups `geo_point` and `geo_shape` values into buckets that represent a grid. + * Each cell is labeled using a geohash which is of user-definable precision. */ + geohash_grid?: AggregationsGeoHashGridAggregation + /** Aggregates all `geo_point` values within a bucket into a `LineString` ordered by the chosen sort field. */ + geo_line?: AggregationsGeoLineAggregation + /** A multi-bucket aggregation that groups `geo_point` and `geo_shape` values into buckets that represent a grid. + * Each cell corresponds to a map tile as used by many online map sites. */ + geotile_grid?: AggregationsGeoTileGridAggregation + /** A multi-bucket aggregation that groups `geo_point` and `geo_shape` values into buckets that represent a grid. + * Each cell corresponds to a H3 cell index and is labeled using the H3Index representation. */ + geohex_grid?: AggregationsGeohexGridAggregation + /** Defines a single bucket of all the documents within the search execution context. + * This context is defined by the indices and the document types you’re searching on, but is not influenced by the search query itself. */ + global?: AggregationsGlobalAggregation + /** A multi-bucket values source based aggregation that can be applied on numeric values or numeric range values extracted from the documents. + * It dynamically builds fixed size (interval) buckets over the values. */ + histogram?: AggregationsHistogramAggregation + /** A multi-bucket value source based aggregation that enables the user to define a set of IP ranges - each representing a bucket. */ + ip_range?: AggregationsIpRangeAggregation + /** A bucket aggregation that groups documents based on the network or sub-network of an IP address. */ + ip_prefix?: AggregationsIpPrefixAggregation + /** A parent pipeline aggregation which loads a pre-trained model and performs inference on the collated result fields from the parent bucket aggregation. */ + inference?: AggregationsInferenceAggregation + line?: AggregationsGeoLineAggregation + /** A numeric aggregation that computes the following statistics over a set of document fields: `count`, `mean`, `variance`, `skewness`, `kurtosis`, `covariance`, and `covariance`. */ + matrix_stats?: AggregationsMatrixStatsAggregation + /** A single-value metrics aggregation that returns the maximum value among the numeric values extracted from the aggregated documents. */ + max?: AggregationsMaxAggregation + /** A sibling pipeline aggregation which identifies the bucket(s) with the maximum value of a specified metric in a sibling aggregation and outputs both the value and the key(s) of the bucket(s). */ + max_bucket?: AggregationsMaxBucketAggregation + /** A single-value aggregation that approximates the median absolute deviation of its search results. */ + median_absolute_deviation?: AggregationsMedianAbsoluteDeviationAggregation + /** A single-value metrics aggregation that returns the minimum value among numeric values extracted from the aggregated documents. */ + min?: AggregationsMinAggregation + /** A sibling pipeline aggregation which identifies the bucket(s) with the minimum value of a specified metric in a sibling aggregation and outputs both the value and the key(s) of the bucket(s). */ + min_bucket?: AggregationsMinBucketAggregation + /** A field data based single bucket aggregation, that creates a bucket of all documents in the current document set context that are missing a field value (effectively, missing a field or having the configured NULL value set). */ + missing?: AggregationsMissingAggregation + moving_avg?: AggregationsMovingAverageAggregation + /** Given an ordered series of percentiles, "slides" a window across those percentiles and computes cumulative percentiles. */ + moving_percentiles?: AggregationsMovingPercentilesAggregation + /** Given an ordered series of data, "slides" a window across the data and runs a custom script on each window of data. + * For convenience, a number of common functions are predefined such as `min`, `max`, and moving averages. */ + moving_fn?: AggregationsMovingFunctionAggregation + /** A multi-bucket value source based aggregation where buckets are dynamically built - one per unique set of values. */ + multi_terms?: AggregationsMultiTermsAggregation + /** A special single bucket aggregation that enables aggregating nested documents. */ + nested?: AggregationsNestedAggregation + /** A parent pipeline aggregation which calculates the specific normalized/rescaled value for a specific bucket value. */ + normalize?: AggregationsNormalizeAggregation + /** A special single bucket aggregation that selects parent documents that have the specified type, as defined in a `join` field. */ + parent?: AggregationsParentAggregation + /** A multi-value metrics aggregation that calculates one or more percentile ranks over numeric values extracted from the aggregated documents. */ + percentile_ranks?: AggregationsPercentileRanksAggregation + /** A multi-value metrics aggregation that calculates one or more percentiles over numeric values extracted from the aggregated documents. */ + percentiles?: AggregationsPercentilesAggregation + /** A sibling pipeline aggregation which calculates percentiles across all bucket of a specified metric in a sibling aggregation. */ + percentiles_bucket?: AggregationsPercentilesBucketAggregation + /** A multi-bucket value source based aggregation that enables the user to define a set of ranges - each representing a bucket. */ + range?: AggregationsRangeAggregation + /** A multi-bucket value source based aggregation which finds "rare" terms—terms that are at the long-tail of the distribution and are not frequent. */ + rare_terms?: AggregationsRareTermsAggregation + /** Calculates a rate of documents or a field in each bucket. + * Can only be used inside a `date_histogram` or `composite` aggregation. */ + rate?: AggregationsRateAggregation + /** A special single bucket aggregation that enables aggregating on parent documents from nested documents. + * Should only be defined inside a `nested` aggregation. */ + reverse_nested?: AggregationsReverseNestedAggregation + /** A single bucket aggregation that randomly includes documents in the aggregated results. + * Sampling provides significant speed improvement at the cost of accuracy. + * @remarks This property is not supported on Elastic Cloud Serverless. + * @experimental */ + random_sampler?: AggregationsRandomSamplerAggregation + /** A filtering aggregation used to limit any sub aggregations' processing to a sample of the top-scoring documents. */ + sampler?: AggregationsSamplerAggregation + /** A metric aggregation that uses scripts to provide a metric output. */ + scripted_metric?: AggregationsScriptedMetricAggregation + /** An aggregation that subtracts values in a time series from themselves at different time lags or periods. */ + serial_diff?: AggregationsSerialDifferencingAggregation + /** Returns interesting or unusual occurrences of terms in a set. */ + significant_terms?: AggregationsSignificantTermsAggregation + /** Returns interesting or unusual occurrences of free-text terms in a set. */ + significant_text?: AggregationsSignificantTextAggregation + /** A multi-value metrics aggregation that computes stats over numeric values extracted from the aggregated documents. */ + stats?: AggregationsStatsAggregation + /** A sibling pipeline aggregation which calculates a variety of stats across all bucket of a specified metric in a sibling aggregation. */ + stats_bucket?: AggregationsStatsBucketAggregation + /** A multi-value metrics aggregation that computes statistics over string values extracted from the aggregated documents. */ + string_stats?: AggregationsStringStatsAggregation + /** A single-value metrics aggregation that sums numeric values that are extracted from the aggregated documents. */ + sum?: AggregationsSumAggregation + /** A sibling pipeline aggregation which calculates the sum of a specified metric across all buckets in a sibling aggregation. */ + sum_bucket?: AggregationsSumBucketAggregation + /** A multi-bucket value source based aggregation where buckets are dynamically built - one per unique value. */ + terms?: AggregationsTermsAggregation + /** The time series aggregation queries data created using a time series index. + * This is typically data such as metrics or other data streams with a time component, and requires creating an index using the time series mode. + * @experimental */ + time_series?: AggregationsTimeSeriesAggregation + /** A metric aggregation that returns the top matching documents per bucket. */ + top_hits?: AggregationsTopHitsAggregation + /** A metrics aggregation that performs a statistical hypothesis test in which the test statistic follows a Student’s t-distribution under the null hypothesis on numeric values extracted from the aggregated documents. */ + t_test?: AggregationsTTestAggregation + /** A metric aggregation that selects metrics from the document with the largest or smallest sort value. */ + top_metrics?: AggregationsTopMetricsAggregation + /** A single-value metrics aggregation that counts the number of values that are extracted from the aggregated documents. */ + value_count?: AggregationsValueCountAggregation + /** A single-value metrics aggregation that computes the weighted average of numeric values that are extracted from the aggregated documents. */ + weighted_avg?: AggregationsWeightedAverageAggregation + /** A multi-bucket aggregation similar to the histogram, except instead of providing an interval to use as the width of each bucket, a target number of buckets is provided. */ + variable_width_histogram?: AggregationsVariableWidthHistogramAggregation +} + +export interface AggregationsAggregationRange { + /** Start of the range (inclusive). */ + from?: double | null + /** Custom key to return the range with. */ + key?: string + /** End of the range (exclusive). */ + to?: double | null +} + +export interface AggregationsArrayPercentilesItem { + key: double + value: double | null + value_as_string?: string +} + +export interface AggregationsAutoDateHistogramAggregate extends AggregationsMultiBucketAggregateBase { + interval: DurationLarge +} + +export interface AggregationsAutoDateHistogramAggregation extends AggregationsBucketAggregationBase { + /** The target number of buckets. */ + buckets?: integer + /** The field on which to run the aggregation. */ + field?: Field + /** The date format used to format `key_as_string` in the response. + * If no `format` is specified, the first date format specified in the field mapping is used. */ + format?: string + /** The minimum rounding interval. + * This can make the collection process more efficient, as the aggregation will not attempt to round at any interval lower than `minimum_interval`. */ + minimum_interval?: AggregationsMinimumInterval + /** The value to apply to documents that do not have a value. + * By default, documents without a value are ignored. */ + missing?: DateTime + /** Time zone specified as a ISO 8601 UTC offset. */ + offset?: string + params?: Record + script?: Script | ScriptSource + /** Time zone ID. */ + time_zone?: TimeZone +} + +export interface AggregationsAverageAggregation extends AggregationsFormatMetricAggregationBase { +} + +export interface AggregationsAverageBucketAggregation extends AggregationsPipelineAggregationBase { +} + +export interface AggregationsAvgAggregate extends AggregationsSingleMetricAggregateBase { +} + +export interface AggregationsBoxPlotAggregate extends AggregationsAggregateBase { + min: double + max: double + q1: double + q2: double + q3: double + lower: double + upper: double + min_as_string?: string + max_as_string?: string + q1_as_string?: string + q2_as_string?: string + q3_as_string?: string + lower_as_string?: string + upper_as_string?: string +} + +export interface AggregationsBoxplotAggregation extends AggregationsMetricAggregationBase { + /** Limits the maximum number of nodes used by the underlying TDigest algorithm to `20 * compression`, enabling control of memory usage and approximation error. */ + compression?: double + /** The default implementation of TDigest is optimized for performance, scaling to millions or even billions of sample values while maintaining acceptable accuracy levels (close to 1% relative error for millions of samples in some cases). + * To use an implementation optimized for accuracy, set this parameter to high_accuracy instead. */ + execution_hint?: AggregationsTDigestExecutionHint +} + +export interface AggregationsBucketAggregationBase { +} + +export interface AggregationsBucketCorrelationAggregation extends AggregationsBucketPathAggregation { + /** The correlation function to execute. */ + function: AggregationsBucketCorrelationFunction +} + +export interface AggregationsBucketCorrelationFunction { + /** The configuration to calculate a count correlation. This function is designed for determining the correlation of a term value and a given metric. */ + count_correlation: AggregationsBucketCorrelationFunctionCountCorrelation +} + +export interface AggregationsBucketCorrelationFunctionCountCorrelation { + /** The indicator with which to correlate the configured `bucket_path` values. */ + indicator: AggregationsBucketCorrelationFunctionCountCorrelationIndicator +} + +export interface AggregationsBucketCorrelationFunctionCountCorrelationIndicator { + /** The total number of documents that initially created the expectations. It’s required to be greater + * than or equal to the sum of all values in the buckets_path as this is the originating superset of data + * to which the term values are correlated. */ + doc_count: integer + /** An array of numbers with which to correlate the configured `bucket_path` values. + * The length of this value must always equal the number of buckets returned by the `bucket_path`. */ + expectations: double[] + /** An array of fractions to use when averaging and calculating variance. This should be used if + * the pre-calculated data and the buckets_path have known gaps. The length of fractions, if provided, + * must equal expectations. */ + fractions?: double[] +} + +export interface AggregationsBucketKsAggregation extends AggregationsBucketPathAggregation { + /** A list of string values indicating which K-S test alternative to calculate. The valid values + * are: "greater", "less", "two_sided". This parameter is key for determining the K-S statistic used + * when calculating the K-S test. Default value is all possible alternative hypotheses. */ + alternative?: string[] + /** A list of doubles indicating the distribution of the samples with which to compare to the `buckets_path` results. + * In typical usage this is the overall proportion of documents in each bucket, which is compared with the actual + * document proportions in each bucket from the sibling aggregation counts. The default is to assume that overall + * documents are uniformly distributed on these buckets, which they would be if one used equal percentiles of a + * metric to define the bucket end points. */ + fractions?: double[] + /** Indicates the sampling methodology when calculating the K-S test. Note, this is sampling of the returned values. + * This determines the cumulative distribution function (CDF) points used comparing the two samples. Default is + * `upper_tail`, which emphasizes the upper end of the CDF points. Valid options are: `upper_tail`, `uniform`, + * and `lower_tail`. */ + sampling_method?: string +} + +export interface AggregationsBucketMetricValueAggregate extends AggregationsSingleMetricAggregateBase { + keys: string[] +} + +export interface AggregationsBucketPathAggregation { + /** Path to the buckets that contain one set of values to correlate. */ + buckets_path?: AggregationsBucketsPath +} + +export interface AggregationsBucketScriptAggregation extends AggregationsPipelineAggregationBase { + /** The script to run for this aggregation. */ + script?: Script | ScriptSource +} + +export interface AggregationsBucketSelectorAggregation extends AggregationsPipelineAggregationBase { + /** The script to run for this aggregation. */ + script?: Script | ScriptSource +} + +export interface AggregationsBucketSortAggregation { + /** Buckets in positions prior to `from` will be truncated. */ + from?: integer + /** The policy to apply when gaps are found in the data. */ + gap_policy?: AggregationsGapPolicy + /** The number of buckets to return. + * Defaults to all buckets of the parent aggregation. */ + size?: integer + /** The list of fields to sort on. */ + sort?: Sort +} + +export type AggregationsBuckets = Record | TBucket[] + +export type AggregationsBucketsPath = string | string[] | Record + +export type AggregationsCalendarInterval = 'second' | '1s' | 'minute' | '1m' | 'hour' | '1h' | 'day' | '1d' | 'week' | '1w' | 'month' | '1M' | 'quarter' | '1q' | 'year' | '1y' + +export interface AggregationsCardinalityAggregate extends AggregationsAggregateBase { + value: long +} + +export interface AggregationsCardinalityAggregation extends AggregationsMetricAggregationBase { + /** A unique count below which counts are expected to be close to accurate. + * This allows to trade memory for accuracy. */ + precision_threshold?: integer + rehash?: boolean + /** Mechanism by which cardinality aggregations is run. */ + execution_hint?: AggregationsCardinalityExecutionMode +} + +export type AggregationsCardinalityExecutionMode = 'global_ordinals' | 'segment_ordinals' | 'direct' | 'save_memory_heuristic' | 'save_time_heuristic' + +export interface AggregationsCartesianBoundsAggregate extends AggregationsAggregateBase { + bounds?: TopLeftBottomRightGeoBounds +} + +export interface AggregationsCartesianBoundsAggregation extends AggregationsMetricAggregationBase { +} + +export interface AggregationsCartesianCentroidAggregate extends AggregationsAggregateBase { + count: long + location?: CartesianPoint +} + +export interface AggregationsCartesianCentroidAggregation extends AggregationsMetricAggregationBase { +} + +export interface AggregationsCategorizeTextAggregation { + /** The semi-structured text field to categorize. */ + field: Field + /** The maximum number of unique tokens at any position up to max_matched_tokens. Must be larger than 1. + * Smaller values use less memory and create fewer categories. Larger values will use more memory and + * create narrower categories. Max allowed value is 100. */ + max_unique_tokens?: integer + /** The maximum number of token positions to match on before attempting to merge categories. Larger + * values will use more memory and create narrower categories. Max allowed value is 100. */ + max_matched_tokens?: integer + /** The minimum percentage of tokens that must match for text to be added to the category bucket. Must + * be between 1 and 100. The larger the value the narrower the categories. Larger values will increase memory + * usage and create narrower categories. */ + similarity_threshold?: integer + /** This property expects an array of regular expressions. The expressions are used to filter out matching + * sequences from the categorization field values. You can use this functionality to fine tune the categorization + * by excluding sequences from consideration when categories are defined. For example, you can exclude SQL + * statements that appear in your log files. This property cannot be used at the same time as categorization_analyzer. + * If you only want to define simple regular expression filters that are applied prior to tokenization, setting + * this property is the easiest method. If you also want to customize the tokenizer or post-tokenization filtering, + * use the categorization_analyzer property instead and include the filters as pattern_replace character filters. */ + categorization_filters?: string[] + /** The categorization analyzer specifies how the text is analyzed and tokenized before being categorized. + * The syntax is very similar to that used to define the analyzer in the analyze API. This property + * cannot be used at the same time as `categorization_filters`. */ + categorization_analyzer?: AggregationsCategorizeTextAnalyzer + /** The number of categorization buckets to return from each shard before merging all the results. */ + shard_size?: integer + /** The number of buckets to return. */ + size?: integer + /** The minimum number of documents in a bucket to be returned to the results. */ + min_doc_count?: integer + /** The minimum number of documents in a bucket to be returned from the shard before merging. */ + shard_min_doc_count?: integer +} + +export type AggregationsCategorizeTextAnalyzer = string | AggregationsCustomCategorizeTextAnalyzer + +export interface AggregationsChangePointAggregate extends AggregationsAggregateBase { + type: AggregationsChangeType + bucket?: AggregationsChangePointBucket +} + +export interface AggregationsChangePointAggregation extends AggregationsPipelineAggregationBase { +} + +export interface AggregationsChangePointBucketKeys extends AggregationsMultiBucketBase { + key: FieldValue +} +export type AggregationsChangePointBucket = AggregationsChangePointBucketKeys +& { [property: string]: AggregationsAggregate | FieldValue | long } + +export interface AggregationsChangeType { + dip?: AggregationsDip + distribution_change?: AggregationsDistributionChange + indeterminable?: AggregationsIndeterminable + non_stationary?: AggregationsNonStationary + spike?: AggregationsSpike + stationary?: AggregationsStationary + step_change?: AggregationsStepChange + trend_change?: AggregationsTrendChange +} + +export interface AggregationsChiSquareHeuristic { + /** Set to `false` if you defined a custom background filter that represents a different set of documents that you want to compare to. */ + background_is_superset: boolean + /** Set to `false` to filter out the terms that appear less often in the subset than in documents outside the subset. */ + include_negatives: boolean +} + +export interface AggregationsChildrenAggregateKeys extends AggregationsSingleBucketAggregateBase { +} +export type AggregationsChildrenAggregate = AggregationsChildrenAggregateKeys +& { [property: string]: AggregationsAggregate | long | Metadata } + +export interface AggregationsChildrenAggregation extends AggregationsBucketAggregationBase { + /** The child type that should be selected. */ + type?: RelationName +} + +export interface AggregationsCompositeAggregate extends AggregationsMultiBucketAggregateBase { + after_key?: AggregationsCompositeAggregateKey +} + +export type AggregationsCompositeAggregateKey = Record + +export interface AggregationsCompositeAggregation extends AggregationsBucketAggregationBase { + /** When paginating, use the `after_key` value returned in the previous response to retrieve the next page. */ + after?: AggregationsCompositeAggregateKey + /** The number of composite buckets that should be returned. */ + size?: integer + /** The value sources used to build composite buckets. + * Keys are returned in the order of the `sources` definition. */ + sources?: Partial>[] +} + +export interface AggregationsCompositeAggregationBase { + /** Either `field` or `script` must be present */ + field?: Field + missing_bucket?: boolean + missing_order?: AggregationsMissingOrder + /** Either `field` or `script` must be present */ + script?: Script | ScriptSource + value_type?: AggregationsValueType + order?: SortOrder +} + +export interface AggregationsCompositeAggregationSource { + /** A terms aggregation. */ + terms?: AggregationsCompositeTermsAggregation + /** A histogram aggregation. */ + histogram?: AggregationsCompositeHistogramAggregation + /** A date histogram aggregation. */ + date_histogram?: AggregationsCompositeDateHistogramAggregation + /** A geotile grid aggregation. */ + geotile_grid?: AggregationsCompositeGeoTileGridAggregation +} + +export interface AggregationsCompositeBucketKeys extends AggregationsMultiBucketBase { + key: AggregationsCompositeAggregateKey +} +export type AggregationsCompositeBucket = AggregationsCompositeBucketKeys +& { [property: string]: AggregationsAggregate | AggregationsCompositeAggregateKey | long } + +export interface AggregationsCompositeDateHistogramAggregation extends AggregationsCompositeAggregationBase { + format?: string + /** Either `calendar_interval` or `fixed_interval` must be present */ + calendar_interval?: DurationLarge + /** Either `calendar_interval` or `fixed_interval` must be present */ + fixed_interval?: DurationLarge + offset?: Duration + time_zone?: TimeZone +} + +export interface AggregationsCompositeGeoTileGridAggregation extends AggregationsCompositeAggregationBase { + precision?: integer + bounds?: GeoBounds +} + +export interface AggregationsCompositeHistogramAggregation extends AggregationsCompositeAggregationBase { + interval: double +} + +export interface AggregationsCompositeTermsAggregation extends AggregationsCompositeAggregationBase { +} + +export interface AggregationsCumulativeCardinalityAggregate extends AggregationsAggregateBase { + value: long + value_as_string?: string +} + +export interface AggregationsCumulativeCardinalityAggregation extends AggregationsPipelineAggregationBase { +} + +export interface AggregationsCumulativeSumAggregation extends AggregationsPipelineAggregationBase { +} + +export interface AggregationsCustomCategorizeTextAnalyzer { + char_filter?: string[] + tokenizer?: string + filter?: string[] +} + +export interface AggregationsDateHistogramAggregate extends AggregationsMultiBucketAggregateBase { +} + +export interface AggregationsDateHistogramAggregation extends AggregationsBucketAggregationBase { + /** Calendar-aware interval. + * Can be specified using the unit name, such as `month`, or as a single unit quantity, such as `1M`. */ + calendar_interval?: AggregationsCalendarInterval + /** Enables extending the bounds of the histogram beyond the data itself. */ + extended_bounds?: AggregationsExtendedBounds + /** Limits the histogram to specified bounds. */ + hard_bounds?: AggregationsExtendedBounds + /** The date field whose values are use to build a histogram. */ + field?: Field + /** Fixed intervals: a fixed number of SI units and never deviate, regardless of where they fall on the calendar. */ + fixed_interval?: Duration + /** The date format used to format `key_as_string` in the response. + * If no `format` is specified, the first date format specified in the field mapping is used. */ + format?: string + interval?: Duration + /** Only returns buckets that have `min_doc_count` number of documents. + * By default, all buckets between the first bucket that matches documents and the last one are returned. */ + min_doc_count?: integer + /** The value to apply to documents that do not have a value. + * By default, documents without a value are ignored. */ + missing?: DateTime + /** Changes the start value of each bucket by the specified positive (`+`) or negative offset (`-`) duration. */ + offset?: Duration + /** The sort order of the returned buckets. */ + order?: AggregationsAggregateOrder + params?: Record + script?: Script | ScriptSource + /** Time zone used for bucketing and rounding. + * Defaults to Coordinated Universal Time (UTC). */ + time_zone?: TimeZone + /** Set to `true` to associate a unique string key with each bucket and return the ranges as a hash rather than an array. */ + keyed?: boolean +} + +export interface AggregationsDateHistogramBucketKeys extends AggregationsMultiBucketBase { + key_as_string?: string + key: EpochTime +} +export type AggregationsDateHistogramBucket = AggregationsDateHistogramBucketKeys +& { [property: string]: AggregationsAggregate | string | EpochTime | long } + +export interface AggregationsDateRangeAggregate extends AggregationsRangeAggregate { +} + +export interface AggregationsDateRangeAggregation extends AggregationsBucketAggregationBase { + /** The date field whose values are use to build ranges. */ + field?: Field + /** The date format used to format `from` and `to` in the response. */ + format?: string + /** The value to apply to documents that do not have a value. + * By default, documents without a value are ignored. */ + missing?: AggregationsMissing + /** Array of date ranges. */ + ranges?: AggregationsDateRangeExpression[] + /** Time zone used to convert dates from another time zone to UTC. */ + time_zone?: TimeZone + /** Set to `true` to associate a unique string key with each bucket and returns the ranges as a hash rather than an array. */ + keyed?: boolean +} + +export interface AggregationsDateRangeExpression { + /** Start of the range (inclusive). */ + from?: AggregationsFieldDateMath + /** Custom key to return the range with. */ + key?: string + /** End of the range (exclusive). */ + to?: AggregationsFieldDateMath +} + +export interface AggregationsDerivativeAggregate extends AggregationsSingleMetricAggregateBase { + normalized_value?: double + normalized_value_as_string?: string +} + +export interface AggregationsDerivativeAggregation extends AggregationsPipelineAggregationBase { +} + +export interface AggregationsDip extends AggregationsAbstractChangePoint { +} + +export interface AggregationsDistributionChange extends AggregationsAbstractChangePoint { +} + +export interface AggregationsDiversifiedSamplerAggregation extends AggregationsBucketAggregationBase { + /** The type of value used for de-duplication. */ + execution_hint?: AggregationsSamplerAggregationExecutionHint + /** Limits how many documents are permitted per choice of de-duplicating value. */ + max_docs_per_value?: integer + script?: Script | ScriptSource + /** Limits how many top-scoring documents are collected in the sample processed on each shard. */ + shard_size?: integer + /** The field used to provide values used for de-duplication. */ + field?: Field +} + +export interface AggregationsDoubleTermsAggregate extends AggregationsTermsAggregateBase { +} + +export interface AggregationsDoubleTermsBucketKeys extends AggregationsTermsBucketBase { + key: double + key_as_string?: string +} +export type AggregationsDoubleTermsBucket = AggregationsDoubleTermsBucketKeys +& { [property: string]: AggregationsAggregate | double | string | long } + +export interface AggregationsEwmaModelSettings { + alpha?: float +} + +export interface AggregationsEwmaMovingAverageAggregation extends AggregationsMovingAverageAggregationBase { + model: 'ewma' + settings: AggregationsEwmaModelSettings +} + +export interface AggregationsExtendedBounds { + /** Maximum value for the bound. */ + max?: T + /** Minimum value for the bound. */ + min?: T +} + +export interface AggregationsExtendedStatsAggregate extends AggregationsStatsAggregate { + sum_of_squares: double | null + variance: double | null + variance_population: double | null + variance_sampling: double | null + std_deviation: double | null + std_deviation_population: double | null + std_deviation_sampling: double | null + std_deviation_bounds?: AggregationsStandardDeviationBounds + sum_of_squares_as_string?: string + variance_as_string?: string + variance_population_as_string?: string + variance_sampling_as_string?: string + std_deviation_as_string?: string + std_deviation_bounds_as_string?: AggregationsStandardDeviationBoundsAsString +} + +export interface AggregationsExtendedStatsAggregation extends AggregationsFormatMetricAggregationBase { + /** The number of standard deviations above/below the mean to display. */ + sigma?: double +} + +export interface AggregationsExtendedStatsBucketAggregate extends AggregationsExtendedStatsAggregate { +} + +export interface AggregationsExtendedStatsBucketAggregation extends AggregationsPipelineAggregationBase { + /** The number of standard deviations above/below the mean to display. */ + sigma?: double +} + +export type AggregationsFieldDateMath = DateMath | double + +export interface AggregationsFilterAggregateKeys extends AggregationsSingleBucketAggregateBase { +} +export type AggregationsFilterAggregate = AggregationsFilterAggregateKeys +& { [property: string]: AggregationsAggregate | long | Metadata } + +export interface AggregationsFiltersAggregate extends AggregationsMultiBucketAggregateBase { +} + +export interface AggregationsFiltersAggregation extends AggregationsBucketAggregationBase { + /** Collection of queries from which to build buckets. */ + filters?: AggregationsBuckets + /** Set to `true` to add a bucket to the response which will contain all documents that do not match any of the given filters. */ + other_bucket?: boolean + /** The key with which the other bucket is returned. */ + other_bucket_key?: string + /** By default, the named filters aggregation returns the buckets as an object. + * Set to `false` to return the buckets as an array of objects. */ + keyed?: boolean +} + +export interface AggregationsFiltersBucketKeys extends AggregationsMultiBucketBase { + key?: string +} +export type AggregationsFiltersBucket = AggregationsFiltersBucketKeys +& { [property: string]: AggregationsAggregate | string | long } + +export interface AggregationsFormatMetricAggregationBase extends AggregationsMetricAggregationBase { + format?: string +} + +export interface AggregationsFormattableMetricAggregation extends AggregationsMetricAggregationBase { + format?: string +} + +export interface AggregationsFrequentItemSetsAggregate extends AggregationsMultiBucketAggregateBase { +} + +export interface AggregationsFrequentItemSetsAggregation { + /** Fields to analyze. */ + fields: AggregationsFrequentItemSetsField[] + /** The minimum size of one item set. */ + minimum_set_size?: integer + /** The minimum support of one item set. */ + minimum_support?: double + /** The number of top item sets to return. */ + size?: integer + /** Query that filters documents from analysis. */ + filter?: QueryDslQueryContainer +} + +export interface AggregationsFrequentItemSetsBucketKeys extends AggregationsMultiBucketBase { + key: Record + support: double +} +export type AggregationsFrequentItemSetsBucket = AggregationsFrequentItemSetsBucketKeys +& { [property: string]: AggregationsAggregate | Record | double | long } + +export interface AggregationsFrequentItemSetsField { + field: Field + /** Values to exclude. + * Can be regular expression strings or arrays of strings of exact terms. */ + exclude?: AggregationsTermsExclude + /** Values to include. + * Can be regular expression strings or arrays of strings of exact terms. */ + include?: AggregationsTermsInclude +} + +export type AggregationsGapPolicy = 'skip' | 'insert_zeros' | 'keep_values' + +export interface AggregationsGeoBoundsAggregate extends AggregationsAggregateBase { + bounds?: GeoBounds +} + +export interface AggregationsGeoBoundsAggregation extends AggregationsMetricAggregationBase { + /** Specifies whether the bounding box should be allowed to overlap the international date line. */ + wrap_longitude?: boolean +} + +export interface AggregationsGeoCentroidAggregate extends AggregationsAggregateBase { + count: long + location?: GeoLocation +} + +export interface AggregationsGeoCentroidAggregation extends AggregationsMetricAggregationBase { + count?: long + location?: GeoLocation +} + +export interface AggregationsGeoDistanceAggregate extends AggregationsRangeAggregate { +} + +export interface AggregationsGeoDistanceAggregation extends AggregationsBucketAggregationBase { + /** The distance calculation type. */ + distance_type?: GeoDistanceType + /** A field of type `geo_point` used to evaluate the distance. */ + field?: Field + /** The origin used to evaluate the distance. */ + origin?: GeoLocation + /** An array of ranges used to bucket documents. */ + ranges?: AggregationsAggregationRange[] + /** The distance unit. */ + unit?: DistanceUnit +} + +export interface AggregationsGeoHashGridAggregate extends AggregationsMultiBucketAggregateBase { +} + +export interface AggregationsGeoHashGridAggregation extends AggregationsBucketAggregationBase { + /** The bounding box to filter the points in each bucket. */ + bounds?: GeoBounds + /** Field containing indexed `geo_point` or `geo_shape` values. + * If the field contains an array, `geohash_grid` aggregates all array values. */ + field?: Field + /** The string length of the geohashes used to define cells/buckets in the results. */ + precision?: GeoHashPrecision + /** Allows for more accurate counting of the top cells returned in the final result the aggregation. + * Defaults to returning `max(10,(size x number-of-shards))` buckets from each shard. */ + shard_size?: integer + /** The maximum number of geohash buckets to return. */ + size?: integer +} + +export interface AggregationsGeoHashGridBucketKeys extends AggregationsMultiBucketBase { + key: GeoHash +} +export type AggregationsGeoHashGridBucket = AggregationsGeoHashGridBucketKeys +& { [property: string]: AggregationsAggregate | GeoHash | long } + +export interface AggregationsGeoHexGridAggregate extends AggregationsMultiBucketAggregateBase { +} + +export interface AggregationsGeoHexGridBucketKeys extends AggregationsMultiBucketBase { + key: GeoHexCell +} +export type AggregationsGeoHexGridBucket = AggregationsGeoHexGridBucketKeys +& { [property: string]: AggregationsAggregate | GeoHexCell | long } + +export interface AggregationsGeoLineAggregate extends AggregationsAggregateBase { + type: string + geometry: GeoLine + properties: any +} + +export interface AggregationsGeoLineAggregation { + /** The name of the geo_point field. */ + point: AggregationsGeoLinePoint + /** The name of the numeric field to use as the sort key for ordering the points. + * When the `geo_line` aggregation is nested inside a `time_series` aggregation, this field defaults to `@timestamp`, and any other value will result in error. */ + sort?: AggregationsGeoLineSort + /** When `true`, returns an additional array of the sort values in the feature properties. */ + include_sort?: boolean + /** The order in which the line is sorted (ascending or descending). */ + sort_order?: SortOrder + /** The maximum length of the line represented in the aggregation. + * Valid sizes are between 1 and 10000. */ + size?: integer +} + +export interface AggregationsGeoLinePoint { + /** The name of the geo_point field. */ + field: Field +} + +export interface AggregationsGeoLineSort { + /** The name of the numeric field to use as the sort key for ordering the points. */ + field: Field +} + +export interface AggregationsGeoTileGridAggregate extends AggregationsMultiBucketAggregateBase { +} + +export interface AggregationsGeoTileGridAggregation extends AggregationsBucketAggregationBase { + /** Field containing indexed `geo_point` or `geo_shape` values. + * If the field contains an array, `geotile_grid` aggregates all array values. */ + field?: Field + /** Integer zoom of the key used to define cells/buckets in the results. + * Values outside of the range [0,29] will be rejected. */ + precision?: GeoTilePrecision + /** Allows for more accurate counting of the top cells returned in the final result the aggregation. + * Defaults to returning `max(10,(size x number-of-shards))` buckets from each shard. */ + shard_size?: integer + /** The maximum number of buckets to return. */ + size?: integer + /** A bounding box to filter the geo-points or geo-shapes in each bucket. */ + bounds?: GeoBounds +} + +export interface AggregationsGeoTileGridBucketKeys extends AggregationsMultiBucketBase { + key: GeoTile +} +export type AggregationsGeoTileGridBucket = AggregationsGeoTileGridBucketKeys +& { [property: string]: AggregationsAggregate | GeoTile | long } + +export interface AggregationsGeohexGridAggregation extends AggregationsBucketAggregationBase { + /** Field containing indexed `geo_point` or `geo_shape` values. + * If the field contains an array, `geohex_grid` aggregates all array values. */ + field: Field + /** Integer zoom of the key used to defined cells or buckets + * in the results. Value should be between 0-15. */ + precision?: integer + /** Bounding box used to filter the geo-points in each bucket. */ + bounds?: GeoBounds + /** Maximum number of buckets to return. */ + size?: integer + /** Number of buckets returned from each shard. */ + shard_size?: integer +} + +export interface AggregationsGlobalAggregateKeys extends AggregationsSingleBucketAggregateBase { +} +export type AggregationsGlobalAggregate = AggregationsGlobalAggregateKeys +& { [property: string]: AggregationsAggregate | long | Metadata } + +export interface AggregationsGlobalAggregation extends AggregationsBucketAggregationBase { +} + +export interface AggregationsGoogleNormalizedDistanceHeuristic { + /** Set to `false` if you defined a custom background filter that represents a different set of documents that you want to compare to. */ + background_is_superset?: boolean +} + +export interface AggregationsHdrMethod { + /** Specifies the resolution of values for the histogram in number of significant digits. */ + number_of_significant_value_digits?: integer +} + +export interface AggregationsHdrPercentileRanksAggregate extends AggregationsPercentilesAggregateBase { +} + +export interface AggregationsHdrPercentilesAggregate extends AggregationsPercentilesAggregateBase { +} + +export interface AggregationsHistogramAggregate extends AggregationsMultiBucketAggregateBase { +} + +export interface AggregationsHistogramAggregation extends AggregationsBucketAggregationBase { + /** Enables extending the bounds of the histogram beyond the data itself. */ + extended_bounds?: AggregationsExtendedBounds + /** Limits the range of buckets in the histogram. + * It is particularly useful in the case of open data ranges that can result in a very large number of buckets. */ + hard_bounds?: AggregationsExtendedBounds + /** The name of the field to aggregate on. */ + field?: Field + /** The interval for the buckets. + * Must be a positive decimal. */ + interval?: double + /** Only returns buckets that have `min_doc_count` number of documents. + * By default, the response will fill gaps in the histogram with empty buckets. */ + min_doc_count?: integer + /** The value to apply to documents that do not have a value. + * By default, documents without a value are ignored. */ + missing?: double + /** By default, the bucket keys start with 0 and then continue in even spaced steps of `interval`. + * The bucket boundaries can be shifted by using the `offset` option. */ + offset?: double + /** The sort order of the returned buckets. + * By default, the returned buckets are sorted by their key ascending. */ + order?: AggregationsAggregateOrder + script?: Script | ScriptSource + format?: string + /** If `true`, returns buckets as a hash instead of an array, keyed by the bucket keys. */ + keyed?: boolean +} + +export interface AggregationsHistogramBucketKeys extends AggregationsMultiBucketBase { + key_as_string?: string + key: double +} +export type AggregationsHistogramBucket = AggregationsHistogramBucketKeys +& { [property: string]: AggregationsAggregate | string | double | long } + +export interface AggregationsHoltLinearModelSettings { + alpha?: float + beta?: float +} + +export interface AggregationsHoltMovingAverageAggregation extends AggregationsMovingAverageAggregationBase { + model: 'holt' + settings: AggregationsHoltLinearModelSettings +} + +export interface AggregationsHoltWintersModelSettings { + alpha?: float + beta?: float + gamma?: float + pad?: boolean + period?: integer + type?: AggregationsHoltWintersType +} + +export interface AggregationsHoltWintersMovingAverageAggregation extends AggregationsMovingAverageAggregationBase { + model: 'holt_winters' + settings: AggregationsHoltWintersModelSettings +} + +export type AggregationsHoltWintersType = 'add' | 'mult' + +export interface AggregationsIndeterminable { + reason: string +} + +export interface AggregationsInferenceAggregateKeys extends AggregationsAggregateBase { + value?: FieldValue + feature_importance?: AggregationsInferenceFeatureImportance[] + top_classes?: AggregationsInferenceTopClassEntry[] + warning?: string +} +export type AggregationsInferenceAggregate = AggregationsInferenceAggregateKeys +& { [property: string]: any } + +export interface AggregationsInferenceAggregation extends AggregationsPipelineAggregationBase { + /** The ID or alias for the trained model. */ + model_id: Name + /** Contains the inference type and its options. */ + inference_config?: AggregationsInferenceConfigContainer +} + +export interface AggregationsInferenceClassImportance { + class_name: string + importance: double +} + +export interface AggregationsInferenceConfigContainer { + /** Regression configuration for inference. */ + regression?: MlRegressionInferenceOptions + /** Classification configuration for inference. */ + classification?: MlClassificationInferenceOptions +} + +export interface AggregationsInferenceFeatureImportance { + feature_name: string + importance?: double + classes?: AggregationsInferenceClassImportance[] +} + +export interface AggregationsInferenceTopClassEntry { + class_name: FieldValue + class_probability: double + class_score: double +} + +export interface AggregationsIpPrefixAggregate extends AggregationsMultiBucketAggregateBase { +} + +export interface AggregationsIpPrefixAggregation extends AggregationsBucketAggregationBase { + /** The IP address field to aggregation on. The field mapping type must be `ip`. */ + field: Field + /** Length of the network prefix. For IPv4 addresses the accepted range is [0, 32]. + * For IPv6 addresses the accepted range is [0, 128]. */ + prefix_length: integer + /** Defines whether the prefix applies to IPv6 addresses. */ + is_ipv6?: boolean + /** Defines whether the prefix length is appended to IP address keys in the response. */ + append_prefix_length?: boolean + /** Defines whether buckets are returned as a hash rather than an array in the response. */ + keyed?: boolean + /** Minimum number of documents in a bucket for it to be included in the response. */ + min_doc_count?: long +} + +export interface AggregationsIpPrefixBucketKeys extends AggregationsMultiBucketBase { + is_ipv6: boolean + key: string + prefix_length: integer + netmask?: string +} +export type AggregationsIpPrefixBucket = AggregationsIpPrefixBucketKeys +& { [property: string]: AggregationsAggregate | boolean | string | integer | long } + +export interface AggregationsIpRangeAggregate extends AggregationsMultiBucketAggregateBase { +} + +export interface AggregationsIpRangeAggregation extends AggregationsBucketAggregationBase { + /** The date field whose values are used to build ranges. */ + field?: Field + /** Array of IP ranges. */ + ranges?: AggregationsIpRangeAggregationRange[] +} + +export interface AggregationsIpRangeAggregationRange { + /** Start of the range. */ + from?: string | null + /** IP range defined as a CIDR mask. */ + mask?: string + /** End of the range. */ + to?: string | null +} + +export interface AggregationsIpRangeBucketKeys extends AggregationsMultiBucketBase { + key?: string + from?: string + to?: string +} +export type AggregationsIpRangeBucket = AggregationsIpRangeBucketKeys +& { [property: string]: AggregationsAggregate | string | long } + +export type AggregationsKeyedPercentiles = Record + +export interface AggregationsLinearMovingAverageAggregation extends AggregationsMovingAverageAggregationBase { + model: 'linear' + settings: EmptyObject +} + +export interface AggregationsLongRareTermsAggregate extends AggregationsMultiBucketAggregateBase { +} + +export interface AggregationsLongRareTermsBucketKeys extends AggregationsMultiBucketBase { + key: long + key_as_string?: string +} +export type AggregationsLongRareTermsBucket = AggregationsLongRareTermsBucketKeys +& { [property: string]: AggregationsAggregate | long | string } + +export interface AggregationsLongTermsAggregate extends AggregationsTermsAggregateBase { +} + +export interface AggregationsLongTermsBucketKeys extends AggregationsTermsBucketBase { + key: long + key_as_string?: string +} +export type AggregationsLongTermsBucket = AggregationsLongTermsBucketKeys +& { [property: string]: AggregationsAggregate | long | string } + +export interface AggregationsMatrixAggregation { + /** An array of fields for computing the statistics. */ + fields?: Fields + /** The value to apply to documents that do not have a value. + * By default, documents without a value are ignored. */ + missing?: Record +} + +export interface AggregationsMatrixStatsAggregate extends AggregationsAggregateBase { + doc_count: long + fields?: AggregationsMatrixStatsFields[] +} + +export interface AggregationsMatrixStatsAggregation extends AggregationsMatrixAggregation { + /** Array value the aggregation will use for array or multi-valued fields. */ + mode?: SortMode +} + +export interface AggregationsMatrixStatsFields { + name: Field + count: long + mean: double + variance: double + skewness: double + kurtosis: double + covariance: Record + correlation: Record +} + +export interface AggregationsMaxAggregate extends AggregationsSingleMetricAggregateBase { +} + +export interface AggregationsMaxAggregation extends AggregationsFormatMetricAggregationBase { +} + +export interface AggregationsMaxBucketAggregation extends AggregationsPipelineAggregationBase { +} + +export interface AggregationsMedianAbsoluteDeviationAggregate extends AggregationsSingleMetricAggregateBase { +} + +export interface AggregationsMedianAbsoluteDeviationAggregation extends AggregationsFormatMetricAggregationBase { + /** Limits the maximum number of nodes used by the underlying TDigest algorithm to `20 * compression`, enabling control of memory usage and approximation error. */ + compression?: double + /** The default implementation of TDigest is optimized for performance, scaling to millions or even billions of sample values while maintaining acceptable accuracy levels (close to 1% relative error for millions of samples in some cases). + * To use an implementation optimized for accuracy, set this parameter to high_accuracy instead. */ + execution_hint?: AggregationsTDigestExecutionHint +} + +export interface AggregationsMetricAggregationBase { + /** The field on which to run the aggregation. */ + field?: Field + /** The value to apply to documents that do not have a value. + * By default, documents without a value are ignored. */ + missing?: AggregationsMissing + script?: Script | ScriptSource +} + +export interface AggregationsMinAggregate extends AggregationsSingleMetricAggregateBase { +} + +export interface AggregationsMinAggregation extends AggregationsFormatMetricAggregationBase { +} + +export interface AggregationsMinBucketAggregation extends AggregationsPipelineAggregationBase { +} + +export type AggregationsMinimumInterval = 'second' | 'minute' | 'hour' | 'day' | 'month' | 'year' + +export type AggregationsMissing = string | integer | double | boolean + +export interface AggregationsMissingAggregateKeys extends AggregationsSingleBucketAggregateBase { +} +export type AggregationsMissingAggregate = AggregationsMissingAggregateKeys +& { [property: string]: AggregationsAggregate | long | Metadata } + +export interface AggregationsMissingAggregation extends AggregationsBucketAggregationBase { + /** The name of the field. */ + field?: Field + missing?: AggregationsMissing +} + +export type AggregationsMissingOrder = 'first' | 'last' | 'default' + +export type AggregationsMovingAverageAggregation = AggregationsLinearMovingAverageAggregation | AggregationsSimpleMovingAverageAggregation | AggregationsEwmaMovingAverageAggregation | AggregationsHoltMovingAverageAggregation | AggregationsHoltWintersMovingAverageAggregation + +export interface AggregationsMovingAverageAggregationBase extends AggregationsPipelineAggregationBase { + minimize?: boolean + predict?: integer + window?: integer +} + +export interface AggregationsMovingFunctionAggregation extends AggregationsPipelineAggregationBase { + /** The script that should be executed on each window of data. */ + script?: string + /** By default, the window consists of the last n values excluding the current bucket. + * Increasing `shift` by 1, moves the starting window position by 1 to the right. */ + shift?: integer + /** The size of window to "slide" across the histogram. */ + window?: integer +} + +export interface AggregationsMovingPercentilesAggregation extends AggregationsPipelineAggregationBase { + /** The size of window to "slide" across the histogram. */ + window?: integer + /** By default, the window consists of the last n values excluding the current bucket. + * Increasing `shift` by 1, moves the starting window position by 1 to the right. */ + shift?: integer + keyed?: boolean +} + +export interface AggregationsMultiBucketAggregateBase extends AggregationsAggregateBase { + buckets: AggregationsBuckets +} + +export interface AggregationsMultiBucketBase { + doc_count: long +} + +export interface AggregationsMultiTermLookup { + /** A fields from which to retrieve terms. */ + field: Field + /** The value to apply to documents that do not have a value. + * By default, documents without a value are ignored. */ + missing?: AggregationsMissing +} + +export interface AggregationsMultiTermsAggregate extends AggregationsTermsAggregateBase { +} + +export interface AggregationsMultiTermsAggregation extends AggregationsBucketAggregationBase { + /** Specifies the strategy for data collection. */ + collect_mode?: AggregationsTermsAggregationCollectMode + /** Specifies the sort order of the buckets. + * Defaults to sorting by descending document count. */ + order?: AggregationsAggregateOrder + /** The minimum number of documents in a bucket for it to be returned. */ + min_doc_count?: long + /** The minimum number of documents in a bucket on each shard for it to be returned. */ + shard_min_doc_count?: long + /** The number of candidate terms produced by each shard. + * By default, `shard_size` will be automatically estimated based on the number of shards and the `size` parameter. */ + shard_size?: integer + /** Calculates the doc count error on per term basis. */ + show_term_doc_count_error?: boolean + /** The number of term buckets should be returned out of the overall terms list. */ + size?: integer + /** The field from which to generate sets of terms. */ + terms: AggregationsMultiTermLookup[] +} + +export interface AggregationsMultiTermsBucketKeys extends AggregationsMultiBucketBase { + key: FieldValue[] + key_as_string?: string + doc_count_error_upper_bound?: long +} +export type AggregationsMultiTermsBucket = AggregationsMultiTermsBucketKeys +& { [property: string]: AggregationsAggregate | FieldValue[] | string | long } + +export interface AggregationsMutualInformationHeuristic { + /** Set to `false` if you defined a custom background filter that represents a different set of documents that you want to compare to. */ + background_is_superset?: boolean + /** Set to `false` to filter out the terms that appear less often in the subset than in documents outside the subset. */ + include_negatives?: boolean +} + +export interface AggregationsNestedAggregateKeys extends AggregationsSingleBucketAggregateBase { +} +export type AggregationsNestedAggregate = AggregationsNestedAggregateKeys +& { [property: string]: AggregationsAggregate | long | Metadata } + +export interface AggregationsNestedAggregation extends AggregationsBucketAggregationBase { + /** The path to the field of type `nested`. */ + path?: Field +} + +export interface AggregationsNonStationary { + p_value: double + r_value: double + trend: string +} + +export interface AggregationsNormalizeAggregation extends AggregationsPipelineAggregationBase { + /** The specific method to apply. */ + method?: AggregationsNormalizeMethod +} + +export type AggregationsNormalizeMethod = 'rescale_0_1' | 'rescale_0_100' | 'percent_of_sum' | 'mean' | 'z-score' | 'softmax' + +export interface AggregationsPValueHeuristic { + background_is_superset?: boolean + /** Should the results be normalized when above the given value. + * Allows for consistent significance results at various scales. + * Note: `0` is a special value which means no normalization */ + normalize_above?: long +} + +export interface AggregationsParentAggregateKeys extends AggregationsSingleBucketAggregateBase { +} +export type AggregationsParentAggregate = AggregationsParentAggregateKeys +& { [property: string]: AggregationsAggregate | long | Metadata } + +export interface AggregationsParentAggregation extends AggregationsBucketAggregationBase { + /** The child type that should be selected. */ + type?: RelationName +} + +export interface AggregationsPercentageScoreHeuristic { +} + +export interface AggregationsPercentileRanksAggregation extends AggregationsFormatMetricAggregationBase { + /** By default, the aggregation associates a unique string key with each bucket and returns the ranges as a hash rather than an array. + * Set to `false` to disable this behavior. */ + keyed?: boolean + /** An array of values for which to calculate the percentile ranks. */ + values?: double[] | null + /** Uses the alternative High Dynamic Range Histogram algorithm to calculate percentile ranks. */ + hdr?: AggregationsHdrMethod + /** Sets parameters for the default TDigest algorithm used to calculate percentile ranks. */ + tdigest?: AggregationsTDigest +} + +export type AggregationsPercentiles = AggregationsKeyedPercentiles | AggregationsArrayPercentilesItem[] + +export interface AggregationsPercentilesAggregateBase extends AggregationsAggregateBase { + values: AggregationsPercentiles +} + +export interface AggregationsPercentilesAggregation extends AggregationsFormatMetricAggregationBase { + /** By default, the aggregation associates a unique string key with each bucket and returns the ranges as a hash rather than an array. + * Set to `false` to disable this behavior. */ + keyed?: boolean + /** The percentiles to calculate. */ + percents?: double | double[] + /** Uses the alternative High Dynamic Range Histogram algorithm to calculate percentiles. */ + hdr?: AggregationsHdrMethod + /** Sets parameters for the default TDigest algorithm used to calculate percentiles. */ + tdigest?: AggregationsTDigest +} + +export interface AggregationsPercentilesBucketAggregate extends AggregationsPercentilesAggregateBase { +} + +export interface AggregationsPercentilesBucketAggregation extends AggregationsPipelineAggregationBase { + /** The list of percentiles to calculate. */ + percents?: double[] +} + +export interface AggregationsPipelineAggregationBase extends AggregationsBucketPathAggregation { + /** `DecimalFormat` pattern for the output value. + * If specified, the formatted value is returned in the aggregation’s `value_as_string` property. */ + format?: string + /** Policy to apply when gaps are found in the data. */ + gap_policy?: AggregationsGapPolicy +} + +export interface AggregationsRandomSamplerAggregation extends AggregationsBucketAggregationBase { + /** The probability that a document will be included in the aggregated data. + * Must be greater than 0, less than 0.5, or exactly 1. + * The lower the probability, the fewer documents are matched. */ + probability: double + /** The seed to generate the random sampling of documents. + * When a seed is provided, the random subset of documents is the same between calls. */ + seed?: integer + /** When combined with seed, setting shard_seed ensures 100% consistent sampling over shards where data is exactly the same. + * @remarks This property is not supported on Elastic Cloud Serverless. */ + shard_seed?: integer +} + +export interface AggregationsRangeAggregate extends AggregationsMultiBucketAggregateBase { +} + +export interface AggregationsRangeAggregation extends AggregationsBucketAggregationBase { + /** The date field whose values are use to build ranges. */ + field?: Field + /** The value to apply to documents that do not have a value. + * By default, documents without a value are ignored. */ + missing?: integer + /** An array of ranges used to bucket documents. */ + ranges?: AggregationsAggregationRange[] + script?: Script | ScriptSource + /** Set to `true` to associate a unique string key with each bucket and return the ranges as a hash rather than an array. */ + keyed?: boolean + format?: string +} + +export interface AggregationsRangeBucketKeys extends AggregationsMultiBucketBase { + from?: double + to?: double + from_as_string?: string + to_as_string?: string + /** The bucket key. Present if the aggregation is _not_ keyed */ + key?: string +} +export type AggregationsRangeBucket = AggregationsRangeBucketKeys +& { [property: string]: AggregationsAggregate | double | string | long } + +export interface AggregationsRareTermsAggregation extends AggregationsBucketAggregationBase { + /** Terms that should be excluded from the aggregation. */ + exclude?: AggregationsTermsExclude + /** The field from which to return rare terms. */ + field?: Field + /** Terms that should be included in the aggregation. */ + include?: AggregationsTermsInclude + /** The maximum number of documents a term should appear in. */ + max_doc_count?: long + /** The value to apply to documents that do not have a value. + * By default, documents without a value are ignored. */ + missing?: AggregationsMissing + /** The precision of the internal CuckooFilters. + * Smaller precision leads to better approximation, but higher memory usage. */ + precision?: double + value_type?: string +} + +export interface AggregationsRateAggregate extends AggregationsAggregateBase { + value: double + value_as_string?: string +} + +export interface AggregationsRateAggregation extends AggregationsFormatMetricAggregationBase { + /** The interval used to calculate the rate. + * By default, the interval of the `date_histogram` is used. */ + unit?: AggregationsCalendarInterval + /** How the rate is calculated. */ + mode?: AggregationsRateMode +} + +export type AggregationsRateMode = 'sum' | 'value_count' + +export interface AggregationsReverseNestedAggregateKeys extends AggregationsSingleBucketAggregateBase { +} +export type AggregationsReverseNestedAggregate = AggregationsReverseNestedAggregateKeys +& { [property: string]: AggregationsAggregate | long | Metadata } + +export interface AggregationsReverseNestedAggregation extends AggregationsBucketAggregationBase { + /** Defines the nested object field that should be joined back to. + * The default is empty, which means that it joins back to the root/main document level. */ + path?: Field +} + +export interface AggregationsSamplerAggregateKeys extends AggregationsSingleBucketAggregateBase { +} +export type AggregationsSamplerAggregate = AggregationsSamplerAggregateKeys +& { [property: string]: AggregationsAggregate | long | Metadata } + +export interface AggregationsSamplerAggregation extends AggregationsBucketAggregationBase { + /** Limits how many top-scoring documents are collected in the sample processed on each shard. */ + shard_size?: integer +} + +export type AggregationsSamplerAggregationExecutionHint = 'map' | 'global_ordinals' | 'bytes_hash' + +export interface AggregationsScriptedHeuristic { + script: Script | ScriptSource +} + +export interface AggregationsScriptedMetricAggregate extends AggregationsAggregateBase { + value: any +} + +export interface AggregationsScriptedMetricAggregation extends AggregationsMetricAggregationBase { + /** Runs once on each shard after document collection is complete. + * Allows the aggregation to consolidate the state returned from each shard. */ + combine_script?: Script | ScriptSource + /** Runs prior to any collection of documents. + * Allows the aggregation to set up any initial state. */ + init_script?: Script | ScriptSource + /** Run once per document collected. + * If no `combine_script` is specified, the resulting state needs to be stored in the `state` object. */ + map_script?: Script | ScriptSource + /** A global object with script parameters for `init`, `map` and `combine` scripts. + * It is shared between the scripts. */ + params?: Record + /** Runs once on the coordinating node after all shards have returned their results. + * The script is provided with access to a variable `states`, which is an array of the result of the `combine_script` on each shard. */ + reduce_script?: Script | ScriptSource +} + +export interface AggregationsSerialDifferencingAggregation extends AggregationsPipelineAggregationBase { + /** The historical bucket to subtract from the current value. + * Must be a positive, non-zero integer. */ + lag?: integer +} + +export interface AggregationsSignificantLongTermsAggregate extends AggregationsSignificantTermsAggregateBase { +} + +export interface AggregationsSignificantLongTermsBucketKeys extends AggregationsSignificantTermsBucketBase { + key: long + key_as_string?: string +} +export type AggregationsSignificantLongTermsBucket = AggregationsSignificantLongTermsBucketKeys +& { [property: string]: AggregationsAggregate | long | string | double } + +export interface AggregationsSignificantStringTermsAggregate extends AggregationsSignificantTermsAggregateBase { +} + +export interface AggregationsSignificantStringTermsBucketKeys extends AggregationsSignificantTermsBucketBase { + key: string +} +export type AggregationsSignificantStringTermsBucket = AggregationsSignificantStringTermsBucketKeys +& { [property: string]: AggregationsAggregate | string | double | long } + +export interface AggregationsSignificantTermsAggregateBase extends AggregationsMultiBucketAggregateBase { + bg_count?: long + doc_count?: long +} + +export interface AggregationsSignificantTermsAggregation extends AggregationsBucketAggregationBase { + /** A background filter that can be used to focus in on significant terms within a narrower context, instead of the entire index. */ + background_filter?: QueryDslQueryContainer + /** Use Chi square, as described in "Information Retrieval", Manning et al., Chapter 13.5.2, as the significance score. */ + chi_square?: AggregationsChiSquareHeuristic + /** Terms to exclude. */ + exclude?: AggregationsTermsExclude + /** Mechanism by which the aggregation should be executed: using field values directly or using global ordinals. */ + execution_hint?: AggregationsTermsAggregationExecutionHint + /** The field from which to return significant terms. */ + field?: Field + /** Use Google normalized distance as described in "The Google Similarity Distance", Cilibrasi and Vitanyi, 2007, as the significance score. */ + gnd?: AggregationsGoogleNormalizedDistanceHeuristic + /** Terms to include. */ + include?: AggregationsTermsInclude + /** Use JLH score as the significance score. */ + jlh?: EmptyObject + /** Only return terms that are found in more than `min_doc_count` hits. */ + min_doc_count?: long + /** Use mutual information as described in "Information Retrieval", Manning et al., Chapter 13.5.1, as the significance score. */ + mutual_information?: AggregationsMutualInformationHeuristic + /** A simple calculation of the number of documents in the foreground sample with a term divided by the number of documents in the background with the term. */ + percentage?: AggregationsPercentageScoreHeuristic + /** Customized score, implemented via a script. */ + script_heuristic?: AggregationsScriptedHeuristic + /** Significant terms heuristic that calculates the p-value between the term existing in foreground and background sets. + * + * The p-value is the probability of obtaining test results at least as extreme as + * the results actually observed, under the assumption that the null hypothesis is + * correct. The p-value is calculated assuming that the foreground set and the + * background set are independent https://en.wikipedia.org/wiki/Bernoulli_trial, with the null + * hypothesis that the probabilities are the same. */ + p_value?: AggregationsPValueHeuristic + /** Regulates the certainty a shard has if the term should actually be added to the candidate list or not with respect to the `min_doc_count`. + * Terms will only be considered if their local shard frequency within the set is higher than the `shard_min_doc_count`. */ + shard_min_doc_count?: long + /** Can be used to control the volumes of candidate terms produced by each shard. + * By default, `shard_size` will be automatically estimated based on the number of shards and the `size` parameter. */ + shard_size?: integer + /** The number of buckets returned out of the overall terms list. */ + size?: integer +} + +export interface AggregationsSignificantTermsBucketBase extends AggregationsMultiBucketBase { + score: double + bg_count: long +} + +export interface AggregationsSignificantTextAggregation extends AggregationsBucketAggregationBase { + /** A background filter that can be used to focus in on significant terms within a narrower context, instead of the entire index. */ + background_filter?: QueryDslQueryContainer + /** Use Chi square, as described in "Information Retrieval", Manning et al., Chapter 13.5.2, as the significance score. */ + chi_square?: AggregationsChiSquareHeuristic + /** Values to exclude. */ + exclude?: AggregationsTermsExclude + /** Determines whether the aggregation will use field values directly or global ordinals. */ + execution_hint?: AggregationsTermsAggregationExecutionHint + /** The field from which to return significant text. */ + field?: Field + /** Whether to out duplicate text to deal with noisy data. */ + filter_duplicate_text?: boolean + /** Use Google normalized distance as described in "The Google Similarity Distance", Cilibrasi and Vitanyi, 2007, as the significance score. */ + gnd?: AggregationsGoogleNormalizedDistanceHeuristic + /** Values to include. */ + include?: AggregationsTermsInclude + /** Use JLH score as the significance score. */ + jlh?: EmptyObject + /** Only return values that are found in more than `min_doc_count` hits. */ + min_doc_count?: long + /** Use mutual information as described in "Information Retrieval", Manning et al., Chapter 13.5.1, as the significance score. */ + mutual_information?: AggregationsMutualInformationHeuristic + /** A simple calculation of the number of documents in the foreground sample with a term divided by the number of documents in the background with the term. */ + percentage?: AggregationsPercentageScoreHeuristic + /** Customized score, implemented via a script. */ + script_heuristic?: AggregationsScriptedHeuristic + /** Regulates the certainty a shard has if the values should actually be added to the candidate list or not with respect to the min_doc_count. + * Values will only be considered if their local shard frequency within the set is higher than the `shard_min_doc_count`. */ + shard_min_doc_count?: long + /** The number of candidate terms produced by each shard. + * By default, `shard_size` will be automatically estimated based on the number of shards and the `size` parameter. */ + shard_size?: integer + /** The number of buckets returned out of the overall terms list. */ + size?: integer + /** Overrides the JSON `_source` fields from which text will be analyzed. */ + source_fields?: Fields +} + +export interface AggregationsSimpleMovingAverageAggregation extends AggregationsMovingAverageAggregationBase { + model: 'simple' + settings: EmptyObject +} + +export interface AggregationsSimpleValueAggregate extends AggregationsSingleMetricAggregateBase { +} + +export interface AggregationsSingleBucketAggregateBase extends AggregationsAggregateBase { + doc_count: long +} + +export interface AggregationsSingleMetricAggregateBase extends AggregationsAggregateBase { + /** The metric value. A missing value generally means that there was no data to aggregate, + * unless specified otherwise. */ + value: double | null + value_as_string?: string +} + +export interface AggregationsSpike extends AggregationsAbstractChangePoint { +} + +export interface AggregationsStandardDeviationBounds { + upper: double | null + lower: double | null + upper_population: double | null + lower_population: double | null + upper_sampling: double | null + lower_sampling: double | null +} + +export interface AggregationsStandardDeviationBoundsAsString { + upper: string + lower: string + upper_population: string + lower_population: string + upper_sampling: string + lower_sampling: string +} + +export interface AggregationsStationary { +} + +export interface AggregationsStatsAggregate extends AggregationsAggregateBase { + count: long + min: double | null + max: double | null + avg: double | null + sum: double + min_as_string?: string + max_as_string?: string + avg_as_string?: string + sum_as_string?: string +} + +export interface AggregationsStatsAggregation extends AggregationsFormatMetricAggregationBase { +} + +export interface AggregationsStatsBucketAggregate extends AggregationsStatsAggregate { +} + +export interface AggregationsStatsBucketAggregation extends AggregationsPipelineAggregationBase { +} + +export interface AggregationsStepChange extends AggregationsAbstractChangePoint { +} + +export interface AggregationsStringRareTermsAggregate extends AggregationsMultiBucketAggregateBase { +} + +export interface AggregationsStringRareTermsBucketKeys extends AggregationsMultiBucketBase { + key: string +} +export type AggregationsStringRareTermsBucket = AggregationsStringRareTermsBucketKeys +& { [property: string]: AggregationsAggregate | string | long } + +export interface AggregationsStringStatsAggregate extends AggregationsAggregateBase { + count: long + min_length: integer | null + max_length: integer | null + avg_length: double | null + entropy: double | null + distribution?: Record | null + min_length_as_string?: string + max_length_as_string?: string + avg_length_as_string?: string +} + +export interface AggregationsStringStatsAggregation extends AggregationsMetricAggregationBase { + /** Shows the probability distribution for all characters. */ + show_distribution?: boolean +} + +export interface AggregationsStringTermsAggregate extends AggregationsTermsAggregateBase { +} + +export interface AggregationsStringTermsBucketKeys extends AggregationsTermsBucketBase { + key: FieldValue +} +export type AggregationsStringTermsBucket = AggregationsStringTermsBucketKeys +& { [property: string]: AggregationsAggregate | FieldValue | long } + +export interface AggregationsSumAggregate extends AggregationsSingleMetricAggregateBase { +} + +export interface AggregationsSumAggregation extends AggregationsFormatMetricAggregationBase { +} + +export interface AggregationsSumBucketAggregation extends AggregationsPipelineAggregationBase { +} + +export interface AggregationsTDigest { + /** Limits the maximum number of nodes used by the underlying TDigest algorithm to `20 * compression`, enabling control of memory usage and approximation error. */ + compression?: integer + /** The default implementation of TDigest is optimized for performance, scaling to millions or even billions of sample values while maintaining acceptable accuracy levels (close to 1% relative error for millions of samples in some cases). + * To use an implementation optimized for accuracy, set this parameter to high_accuracy instead. */ + execution_hint?: AggregationsTDigestExecutionHint +} + +export type AggregationsTDigestExecutionHint = 'default' | 'high_accuracy' + +export interface AggregationsTDigestPercentileRanksAggregate extends AggregationsPercentilesAggregateBase { +} + +export interface AggregationsTDigestPercentilesAggregate extends AggregationsPercentilesAggregateBase { +} + +export interface AggregationsTTestAggregate extends AggregationsAggregateBase { + value: double | null + value_as_string?: string +} + +export interface AggregationsTTestAggregation { + /** Test population A. */ + a?: AggregationsTestPopulation + /** Test population B. */ + b?: AggregationsTestPopulation + /** The type of test. */ + type?: AggregationsTTestType +} + +export type AggregationsTTestType = 'paired' | 'homoscedastic' | 'heteroscedastic' + +export interface AggregationsTermsAggregateBase extends AggregationsMultiBucketAggregateBase { + doc_count_error_upper_bound?: long + sum_other_doc_count?: long +} + +export interface AggregationsTermsAggregation extends AggregationsBucketAggregationBase { + /** Determines how child aggregations should be calculated: breadth-first or depth-first. */ + collect_mode?: AggregationsTermsAggregationCollectMode + /** Values to exclude. + * Accepts regular expressions and partitions. */ + exclude?: AggregationsTermsExclude + /** Determines whether the aggregation will use field values directly or global ordinals. */ + execution_hint?: AggregationsTermsAggregationExecutionHint + /** The field from which to return terms. */ + field?: Field + /** Values to include. + * Accepts regular expressions and partitions. */ + include?: AggregationsTermsInclude + /** Only return values that are found in more than `min_doc_count` hits. */ + min_doc_count?: integer + /** The value to apply to documents that do not have a value. + * By default, documents without a value are ignored. */ + missing?: AggregationsMissing + missing_order?: AggregationsMissingOrder + missing_bucket?: boolean + /** Coerced unmapped fields into the specified type. */ + value_type?: string + /** Specifies the sort order of the buckets. + * Defaults to sorting by descending document count. */ + order?: AggregationsAggregateOrder + script?: Script | ScriptSource + /** Regulates the certainty a shard has if the term should actually be added to the candidate list or not with respect to the `min_doc_count`. + * Terms will only be considered if their local shard frequency within the set is higher than the `shard_min_doc_count`. */ + shard_min_doc_count?: long + /** The number of candidate terms produced by each shard. + * By default, `shard_size` will be automatically estimated based on the number of shards and the `size` parameter. */ + shard_size?: integer + /** Set to `true` to return the `doc_count_error_upper_bound`, which is an upper bound to the error on the `doc_count` returned by each shard. */ + show_term_doc_count_error?: boolean + /** The number of buckets returned out of the overall terms list. */ + size?: integer + format?: string +} + +export type AggregationsTermsAggregationCollectMode = 'depth_first' | 'breadth_first' + +export type AggregationsTermsAggregationExecutionHint = 'map' | 'global_ordinals' | 'global_ordinals_hash' | 'global_ordinals_low_cardinality' + +export interface AggregationsTermsBucketBase extends AggregationsMultiBucketBase { + doc_count_error_upper_bound?: long +} + +export type AggregationsTermsExclude = string | string[] + +export type AggregationsTermsInclude = string | string[] | AggregationsTermsPartition + +export interface AggregationsTermsPartition { + /** The number of partitions. */ + num_partitions: long + /** The partition number for this request. */ + partition: long +} + +export interface AggregationsTestPopulation { + /** The field to aggregate. */ + field: Field + script?: Script | ScriptSource + /** A filter used to define a set of records to run unpaired t-test on. */ + filter?: QueryDslQueryContainer +} + +export interface AggregationsTimeSeriesAggregate extends AggregationsMultiBucketAggregateBase { +} + +export interface AggregationsTimeSeriesAggregation extends AggregationsBucketAggregationBase { + /** The maximum number of results to return. */ + size?: integer + /** Set to `true` to associate a unique string key with each bucket and returns the ranges as a hash rather than an array. */ + keyed?: boolean +} + +export interface AggregationsTimeSeriesBucketKeys extends AggregationsMultiBucketBase { + key: Record +} +export type AggregationsTimeSeriesBucket = AggregationsTimeSeriesBucketKeys +& { [property: string]: AggregationsAggregate | Record | long } + +export interface AggregationsTopHitsAggregate extends AggregationsAggregateBase { + hits: SearchHitsMetadata +} + +export interface AggregationsTopHitsAggregation extends AggregationsMetricAggregationBase { + /** Fields for which to return doc values. */ + docvalue_fields?: (QueryDslFieldAndFormat | Field)[] + /** If `true`, returns detailed information about score computation as part of a hit. */ + explain?: boolean + /** Array of wildcard (*) patterns. The request returns values for field names + * matching these patterns in the hits.fields property of the response. */ + fields?: (QueryDslFieldAndFormat | Field)[] + /** Starting document offset. */ + from?: integer + /** Specifies the highlighter to use for retrieving highlighted snippets from one or more fields in the search results. */ + highlight?: SearchHighlight + /** Returns the result of one or more script evaluations for each hit. */ + script_fields?: Record + /** The maximum number of top matching hits to return per bucket. */ + size?: integer + /** Sort order of the top matching hits. + * By default, the hits are sorted by the score of the main query. */ + sort?: Sort + /** Selects the fields of the source that are returned. */ + _source?: SearchSourceConfig + /** Returns values for the specified stored fields (fields that use the `store` mapping option). */ + stored_fields?: Fields + /** If `true`, calculates and returns document scores, even if the scores are not used for sorting. */ + track_scores?: boolean + /** If `true`, returns document version as part of a hit. */ + version?: boolean + /** If `true`, returns sequence number and primary term of the last modification of each hit. */ + seq_no_primary_term?: boolean +} + +export interface AggregationsTopMetrics { + sort: (FieldValue | null)[] + metrics: Record +} + +export interface AggregationsTopMetricsAggregate extends AggregationsAggregateBase { + top: AggregationsTopMetrics[] +} + +export interface AggregationsTopMetricsAggregation extends AggregationsMetricAggregationBase { + /** The fields of the top document to return. */ + metrics?: AggregationsTopMetricsValue | AggregationsTopMetricsValue[] + /** The number of top documents from which to return metrics. */ + size?: integer + /** The sort order of the documents. */ + sort?: Sort +} + +export interface AggregationsTopMetricsValue { + /** A field to return as a metric. */ + field: Field +} + +export interface AggregationsTrendChange { + p_value: double + r_value: double + change_point: integer +} + +export interface AggregationsUnmappedRareTermsAggregate extends AggregationsMultiBucketAggregateBase { +} + +export interface AggregationsUnmappedSamplerAggregateKeys extends AggregationsSingleBucketAggregateBase { +} +export type AggregationsUnmappedSamplerAggregate = AggregationsUnmappedSamplerAggregateKeys +& { [property: string]: AggregationsAggregate | long | Metadata } + +export interface AggregationsUnmappedSignificantTermsAggregate extends AggregationsSignificantTermsAggregateBase { +} + +export interface AggregationsUnmappedTermsAggregate extends AggregationsTermsAggregateBase { +} + +export interface AggregationsValueCountAggregate extends AggregationsSingleMetricAggregateBase { +} + +export interface AggregationsValueCountAggregation extends AggregationsFormattableMetricAggregation { +} + +export type AggregationsValueType = 'string' | 'long' | 'double' | 'number' | 'date' | 'date_nanos' | 'ip' | 'numeric' | 'geo_point' | 'boolean' + +export interface AggregationsVariableWidthHistogramAggregate extends AggregationsMultiBucketAggregateBase { +} + +export interface AggregationsVariableWidthHistogramAggregation { + /** The name of the field. */ + field?: Field + /** The target number of buckets. */ + buckets?: integer + /** The number of buckets that the coordinating node will request from each shard. + * Defaults to `buckets * 50`. */ + shard_size?: integer + /** Specifies the number of individual documents that will be stored in memory on a shard before the initial bucketing algorithm is run. + * Defaults to `min(10 * shard_size, 50000)`. */ + initial_buffer?: integer + script?: Script | ScriptSource +} + +export interface AggregationsVariableWidthHistogramBucketKeys extends AggregationsMultiBucketBase { + min: double + key: double + max: double + min_as_string?: string + key_as_string?: string + max_as_string?: string +} +export type AggregationsVariableWidthHistogramBucket = AggregationsVariableWidthHistogramBucketKeys +& { [property: string]: AggregationsAggregate | double | string | long } + +export interface AggregationsWeightedAverageAggregation { + /** A numeric response formatter. */ + format?: string + /** Configuration for the field that provides the values. */ + value?: AggregationsWeightedAverageValue + value_type?: AggregationsValueType + /** Configuration for the field or script that provides the weights. */ + weight?: AggregationsWeightedAverageValue +} + +export interface AggregationsWeightedAverageValue { + /** The field from which to extract the values or weights. */ + field?: Field + /** A value or weight to use if the field is missing. */ + missing?: double + script?: Script | ScriptSource +} + +export interface AggregationsWeightedAvgAggregate extends AggregationsSingleMetricAggregateBase { +} + +export type AnalysisAnalyzer = AnalysisCustomAnalyzer | AnalysisFingerprintAnalyzer | AnalysisKeywordAnalyzer | AnalysisNoriAnalyzer | AnalysisPatternAnalyzer | AnalysisSimpleAnalyzer | AnalysisStandardAnalyzer | AnalysisStopAnalyzer | AnalysisWhitespaceAnalyzer | AnalysisIcuAnalyzer | AnalysisKuromojiAnalyzer | AnalysisSnowballAnalyzer | AnalysisArabicAnalyzer | AnalysisArmenianAnalyzer | AnalysisBasqueAnalyzer | AnalysisBengaliAnalyzer | AnalysisBrazilianAnalyzer | AnalysisBulgarianAnalyzer | AnalysisCatalanAnalyzer | AnalysisChineseAnalyzer | AnalysisCjkAnalyzer | AnalysisCzechAnalyzer | AnalysisDanishAnalyzer | AnalysisDutchAnalyzer | AnalysisEnglishAnalyzer | AnalysisEstonianAnalyzer | AnalysisFinnishAnalyzer | AnalysisFrenchAnalyzer | AnalysisGalicianAnalyzer | AnalysisGermanAnalyzer | AnalysisGreekAnalyzer | AnalysisHindiAnalyzer | AnalysisHungarianAnalyzer | AnalysisIndonesianAnalyzer | AnalysisIrishAnalyzer | AnalysisItalianAnalyzer | AnalysisLatvianAnalyzer | AnalysisLithuanianAnalyzer | AnalysisNorwegianAnalyzer | AnalysisPersianAnalyzer | AnalysisPortugueseAnalyzer | AnalysisRomanianAnalyzer | AnalysisRussianAnalyzer | AnalysisSerbianAnalyzer | AnalysisSoraniAnalyzer | AnalysisSpanishAnalyzer | AnalysisSwedishAnalyzer | AnalysisTurkishAnalyzer | AnalysisThaiAnalyzer + +export interface AnalysisApostropheTokenFilter extends AnalysisTokenFilterBase { + type: 'apostrophe' +} + +export interface AnalysisArabicAnalyzer { + type: 'arabic' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisArabicNormalizationTokenFilter extends AnalysisTokenFilterBase { + type: 'arabic_normalization' +} + +export interface AnalysisArabicStemTokenFilter extends AnalysisTokenFilterBase { + type: 'arabic_stem' +} + +export interface AnalysisArmenianAnalyzer { + type: 'armenian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisAsciiFoldingTokenFilter extends AnalysisTokenFilterBase { + type: 'asciifolding' + /** If `true`, emit both original tokens and folded tokens. Defaults to `false`. */ + preserve_original?: SpecUtilsStringified +} + +export interface AnalysisBasqueAnalyzer { + type: 'basque' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisBengaliAnalyzer { + type: 'bengali' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisBengaliNormalizationTokenFilter extends AnalysisTokenFilterBase { + type: 'bengali_normalization' +} + +export interface AnalysisBrazilianAnalyzer { + type: 'brazilian' + stopwords?: AnalysisStopWords + stopwords_path?: string +} + +export interface AnalysisBrazilianStemTokenFilter extends AnalysisTokenFilterBase { + type: 'brazilian_stem' +} + +export interface AnalysisBulgarianAnalyzer { + type: 'bulgarian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisCatalanAnalyzer { + type: 'catalan' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export type AnalysisCharFilter = string | AnalysisCharFilterDefinition + +export interface AnalysisCharFilterBase { + version?: VersionString +} + +export type AnalysisCharFilterDefinition = AnalysisHtmlStripCharFilter | AnalysisMappingCharFilter | AnalysisPatternReplaceCharFilter | AnalysisIcuNormalizationCharFilter | AnalysisKuromojiIterationMarkCharFilter + +export interface AnalysisCharGroupTokenizer extends AnalysisTokenizerBase { + type: 'char_group' + tokenize_on_chars: string[] + max_token_length?: integer +} + +export interface AnalysisChineseAnalyzer { + type: 'chinese' + stopwords?: AnalysisStopWords + stopwords_path?: string +} + +export interface AnalysisCjkAnalyzer { + type: 'cjk' + stopwords?: AnalysisStopWords + stopwords_path?: string +} + +export type AnalysisCjkBigramIgnoredScript = 'han' | 'hangul' | 'hiragana' | 'katakana' + +export interface AnalysisCjkBigramTokenFilter extends AnalysisTokenFilterBase { + type: 'cjk_bigram' + /** Array of character scripts for which to disable bigrams. */ + ignored_scripts?: AnalysisCjkBigramIgnoredScript[] + /** If `true`, emit tokens in both bigram and unigram form. If `false`, a CJK character is output in unigram form when it has no adjacent characters. Defaults to `false`. */ + output_unigrams?: boolean +} + +export interface AnalysisCjkWidthTokenFilter extends AnalysisTokenFilterBase { + type: 'cjk_width' +} + +export interface AnalysisClassicTokenFilter extends AnalysisTokenFilterBase { + type: 'classic' +} + +export interface AnalysisClassicTokenizer extends AnalysisTokenizerBase { + type: 'classic' + max_token_length?: integer +} + +export interface AnalysisCommonGramsTokenFilter extends AnalysisTokenFilterBase { + type: 'common_grams' + /** A list of tokens. The filter generates bigrams for these tokens. + * Either this or the `common_words_path` parameter is required. */ + common_words?: string[] + /** Path to a file containing a list of tokens. The filter generates bigrams for these tokens. + * This path must be absolute or relative to the `config` location. The file must be UTF-8 encoded. Each token in the file must be separated by a line break. + * Either this or the `common_words` parameter is required. */ + common_words_path?: string + /** If `true`, matches for common words matching are case-insensitive. Defaults to `false`. */ + ignore_case?: boolean + /** If `true`, the filter excludes the following tokens from the output: + * - Unigrams for common words + * - Unigrams for terms followed by common words + * Defaults to `false`. We recommend enabling this parameter for search analyzers. */ + query_mode?: boolean +} + +export interface AnalysisCompoundWordTokenFilterBase extends AnalysisTokenFilterBase { + /** Maximum subword character length. Longer subword tokens are excluded from the output. Defaults to `15`. */ + max_subword_size?: integer + /** Minimum subword character length. Shorter subword tokens are excluded from the output. Defaults to `2`. */ + min_subword_size?: integer + /** Minimum word character length. Shorter word tokens are excluded from the output. Defaults to `5`. */ + min_word_size?: integer + /** If `true`, only include the longest matching subword. Defaults to `false`. */ + only_longest_match?: boolean + /** A list of subwords to look for in the token stream. If found, the subword is included in the token output. + * Either this parameter or `word_list_path` must be specified. */ + word_list?: string[] + /** Path to a file that contains a list of subwords to find in the token stream. If found, the subword is included in the token output. + * This path must be absolute or relative to the config location, and the file must be UTF-8 encoded. Each token in the file must be separated by a line break. + * Either this parameter or `word_list` must be specified. */ + word_list_path?: string +} + +export interface AnalysisConditionTokenFilter extends AnalysisTokenFilterBase { + type: 'condition' + /** Array of token filters. If a token matches the predicate script in the `script` parameter, these filters are applied to the token in the order provided. */ + filter: string[] + /** Predicate script used to apply token filters. If a token matches this script, the filters in the `filter` parameter are applied to the token. */ + script: Script | ScriptSource +} + +export interface AnalysisCustomAnalyzer { + type: 'custom' + char_filter?: string | string[] + filter?: string | string[] + position_increment_gap?: integer + position_offset_gap?: integer + tokenizer: string +} + +export interface AnalysisCustomNormalizer { + type: 'custom' + char_filter?: string[] + filter?: string[] +} + +export interface AnalysisCzechAnalyzer { + type: 'czech' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisCzechStemTokenFilter extends AnalysisTokenFilterBase { + type: 'czech_stem' +} + +export interface AnalysisDanishAnalyzer { + type: 'danish' + stopwords?: AnalysisStopWords + stopwords_path?: string +} + +export interface AnalysisDecimalDigitTokenFilter extends AnalysisTokenFilterBase { + type: 'decimal_digit' +} + +export type AnalysisDelimitedPayloadEncoding = 'int' | 'float' | 'identity' + +export interface AnalysisDelimitedPayloadTokenFilter extends AnalysisTokenFilterBase { + type: 'delimited_payload' + /** Character used to separate tokens from payloads. Defaults to `|`. */ + delimiter?: string + /** Data type for the stored payload. */ + encoding?: AnalysisDelimitedPayloadEncoding +} + +export interface AnalysisDictionaryDecompounderTokenFilter extends AnalysisCompoundWordTokenFilterBase { + type: 'dictionary_decompounder' +} + +export interface AnalysisDutchAnalyzer { + type: 'dutch' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisDutchStemTokenFilter extends AnalysisTokenFilterBase { + type: 'dutch_stem' +} + +export type AnalysisEdgeNGramSide = 'front' | 'back' + +export interface AnalysisEdgeNGramTokenFilter extends AnalysisTokenFilterBase { + type: 'edge_ngram' + /** Maximum character length of a gram. For custom token filters, defaults to `2`. For the built-in edge_ngram filter, defaults to `1`. */ + max_gram?: integer + /** Minimum character length of a gram. Defaults to `1`. */ + min_gram?: integer + /** Indicates whether to truncate tokens from the `front` or `back`. Defaults to `front`. */ + side?: AnalysisEdgeNGramSide + /** Emits original token when set to `true`. Defaults to `false`. */ + preserve_original?: SpecUtilsStringified +} + +export interface AnalysisEdgeNGramTokenizer extends AnalysisTokenizerBase { + type: 'edge_ngram' + custom_token_chars?: string + max_gram?: integer + min_gram?: integer + token_chars?: AnalysisTokenChar[] +} + +export interface AnalysisElisionTokenFilter extends AnalysisTokenFilterBase { + type: 'elision' + /** List of elisions to remove. + * To be removed, the elision must be at the beginning of a token and be immediately followed by an apostrophe. Both the elision and apostrophe are removed. + * For custom `elision` filters, either this parameter or `articles_path` must be specified. */ + articles?: string[] + /** Path to a file that contains a list of elisions to remove. + * This path must be absolute or relative to the `config` location, and the file must be UTF-8 encoded. Each elision in the file must be separated by a line break. + * To be removed, the elision must be at the beginning of a token and be immediately followed by an apostrophe. Both the elision and apostrophe are removed. + * For custom `elision` filters, either this parameter or `articles` must be specified. */ + articles_path?: string + /** If `true`, elision matching is case insensitive. If `false`, elision matching is case sensitive. Defaults to `false`. */ + articles_case?: SpecUtilsStringified +} + +export interface AnalysisEnglishAnalyzer { + type: 'english' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisEstonianAnalyzer { + type: 'estonian' + stopwords?: AnalysisStopWords + stopwords_path?: string +} + +export interface AnalysisFingerprintAnalyzer { + type: 'fingerprint' + version?: VersionString + /** The maximum token size to emit. Tokens larger than this size will be discarded. + * Defaults to `255` */ + max_output_size?: integer + /** The character to use to concatenate the terms. + * Defaults to a space. */ + separator?: string + /** A pre-defined stop words list like `_english_` or an array containing a list of stop words. + * Defaults to `_none_`. */ + stopwords?: AnalysisStopWords + /** The path to a file containing stop words. */ + stopwords_path?: string +} + +export interface AnalysisFingerprintTokenFilter extends AnalysisTokenFilterBase { + type: 'fingerprint' + /** Maximum character length, including whitespace, of the output token. Defaults to `255`. Concatenated tokens longer than this will result in no token output. */ + max_output_size?: integer + /** Character to use to concatenate the token stream input. Defaults to a space. */ + separator?: string +} + +export interface AnalysisFinnishAnalyzer { + type: 'finnish' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisFlattenGraphTokenFilter extends AnalysisTokenFilterBase { + type: 'flatten_graph' +} + +export interface AnalysisFrenchAnalyzer { + type: 'french' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisFrenchStemTokenFilter extends AnalysisTokenFilterBase { + type: 'french_stem' +} + +export interface AnalysisGalicianAnalyzer { + type: 'galician' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisGermanAnalyzer { + type: 'german' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisGermanNormalizationTokenFilter extends AnalysisTokenFilterBase { + type: 'german_normalization' +} + +export interface AnalysisGermanStemTokenFilter extends AnalysisTokenFilterBase { + type: 'german_stem' +} + +export interface AnalysisGreekAnalyzer { + type: 'greek' + stopwords?: AnalysisStopWords + stopwords_path?: string +} + +export interface AnalysisHindiAnalyzer { + type: 'hindi' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisHindiNormalizationTokenFilter extends AnalysisTokenFilterBase { + type: 'hindi_normalization' +} + +export interface AnalysisHtmlStripCharFilter extends AnalysisCharFilterBase { + type: 'html_strip' + escaped_tags?: string[] +} + +export interface AnalysisHungarianAnalyzer { + type: 'hungarian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisHunspellTokenFilter extends AnalysisTokenFilterBase { + type: 'hunspell' + /** If `true`, duplicate tokens are removed from the filter’s output. Defaults to `true`. */ + dedup?: boolean + /** One or more `.dic` files (e.g, `en_US.dic`, my_custom.dic) to use for the Hunspell dictionary. + * By default, the `hunspell` filter uses all `.dic` files in the `<$ES_PATH_CONF>/hunspell/` directory specified using the `lang`, `language`, or `locale` parameter. */ + dictionary?: string + /** Locale directory used to specify the `.aff` and `.dic` files for a Hunspell dictionary. */ + locale: string + /** Locale directory used to specify the `.aff` and `.dic` files for a Hunspell dictionary. + * @alias locale */ + lang: string + /** Locale directory used to specify the `.aff` and `.dic` files for a Hunspell dictionary. + * @alias locale */ + language: string + /** If `true`, only the longest stemmed version of each token is included in the output. If `false`, all stemmed versions of the token are included. Defaults to `false`. */ + longest_only?: boolean +} + +export interface AnalysisHyphenationDecompounderTokenFilter extends AnalysisCompoundWordTokenFilterBase { + type: 'hyphenation_decompounder' + /** Path to an Apache FOP (Formatting Objects Processor) XML hyphenation pattern file. + * This path must be absolute or relative to the `config` location. Only FOP v1.2 compatible files are supported. */ + hyphenation_patterns_path: string + /** If `true`, do not match sub tokens in tokens that are in the word list. Defaults to `false`. */ + no_sub_matches?: boolean + /** If `true`, do not allow overlapping tokens. Defaults to `false`. */ + no_overlapping_matches?: boolean +} + +export interface AnalysisIcuAnalyzer { + type: 'icu_analyzer' + method: AnalysisIcuNormalizationType + mode: AnalysisIcuNormalizationMode +} + +export type AnalysisIcuCollationAlternate = 'shifted' | 'non-ignorable' + +export type AnalysisIcuCollationCaseFirst = 'lower' | 'upper' + +export type AnalysisIcuCollationDecomposition = 'no' | 'identical' + +export type AnalysisIcuCollationStrength = 'primary' | 'secondary' | 'tertiary' | 'quaternary' | 'identical' + +export interface AnalysisIcuCollationTokenFilter extends AnalysisTokenFilterBase { + type: 'icu_collation' + alternate?: AnalysisIcuCollationAlternate + caseFirst?: AnalysisIcuCollationCaseFirst + caseLevel?: boolean + country?: string + decomposition?: AnalysisIcuCollationDecomposition + hiraganaQuaternaryMode?: boolean + language?: string + numeric?: boolean + rules?: string + strength?: AnalysisIcuCollationStrength + variableTop?: string + variant?: string +} + +export interface AnalysisIcuFoldingTokenFilter extends AnalysisTokenFilterBase { + type: 'icu_folding' + unicode_set_filter: string +} + +export interface AnalysisIcuNormalizationCharFilter extends AnalysisCharFilterBase { + type: 'icu_normalizer' + mode?: AnalysisIcuNormalizationMode + name?: AnalysisIcuNormalizationType + unicode_set_filter?: string +} + +export type AnalysisIcuNormalizationMode = 'decompose' | 'compose' + +export interface AnalysisIcuNormalizationTokenFilter extends AnalysisTokenFilterBase { + type: 'icu_normalizer' + name: AnalysisIcuNormalizationType +} + +export type AnalysisIcuNormalizationType = 'nfc' | 'nfkc' | 'nfkc_cf' + +export interface AnalysisIcuTokenizer extends AnalysisTokenizerBase { + type: 'icu_tokenizer' + rule_files: string +} + +export type AnalysisIcuTransformDirection = 'forward' | 'reverse' + +export interface AnalysisIcuTransformTokenFilter extends AnalysisTokenFilterBase { + type: 'icu_transform' + dir?: AnalysisIcuTransformDirection + id: string +} + +export interface AnalysisIndicNormalizationTokenFilter extends AnalysisTokenFilterBase { + type: 'indic_normalization' +} + +export interface AnalysisIndonesianAnalyzer { + type: 'indonesian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisIrishAnalyzer { + type: 'irish' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisItalianAnalyzer { + type: 'italian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisJaStopTokenFilter extends AnalysisTokenFilterBase { + type: 'ja_stop' + stopwords?: AnalysisStopWords +} + +export interface AnalysisKStemTokenFilter extends AnalysisTokenFilterBase { + type: 'kstem' +} + +export type AnalysisKeepTypesMode = 'include' | 'exclude' + +export interface AnalysisKeepTypesTokenFilter extends AnalysisTokenFilterBase { + type: 'keep_types' + /** Indicates whether to keep or remove the specified token types. */ + mode?: AnalysisKeepTypesMode + /** List of token types to keep or remove. */ + types: string[] +} + +export interface AnalysisKeepWordsTokenFilter extends AnalysisTokenFilterBase { + type: 'keep' + /** List of words to keep. Only tokens that match words in this list are included in the output. + * Either this parameter or `keep_words_path` must be specified. */ + keep_words?: string[] + /** If `true`, lowercase all keep words. Defaults to `false`. */ + keep_words_case?: boolean + /** Path to a file that contains a list of words to keep. Only tokens that match words in this list are included in the output. + * This path must be absolute or relative to the `config` location, and the file must be UTF-8 encoded. Each word in the file must be separated by a line break. + * Either this parameter or `keep_words` must be specified. */ + keep_words_path?: string +} + +export interface AnalysisKeywordAnalyzer { + type: 'keyword' + version?: VersionString +} + +export interface AnalysisKeywordMarkerTokenFilter extends AnalysisTokenFilterBase { + type: 'keyword_marker' + /** If `true`, matching for the `keywords` and `keywords_path` parameters ignores letter case. Defaults to `false`. */ + ignore_case?: boolean + /** Array of keywords. Tokens that match these keywords are not stemmed. + * This parameter, `keywords_path`, or `keywords_pattern` must be specified. You cannot specify this parameter and `keywords_pattern`. */ + keywords?: string | string[] + /** Path to a file that contains a list of keywords. Tokens that match these keywords are not stemmed. + * This path must be absolute or relative to the `config` location, and the file must be UTF-8 encoded. Each word in the file must be separated by a line break. + * This parameter, `keywords`, or `keywords_pattern` must be specified. You cannot specify this parameter and `keywords_pattern`. */ + keywords_path?: string + /** Java regular expression used to match tokens. Tokens that match this expression are marked as keywords and not stemmed. + * This parameter, `keywords`, or `keywords_path` must be specified. You cannot specify this parameter and `keywords` or `keywords_pattern`. */ + keywords_pattern?: string +} + +export interface AnalysisKeywordRepeatTokenFilter extends AnalysisTokenFilterBase { + type: 'keyword_repeat' +} + +export interface AnalysisKeywordTokenizer extends AnalysisTokenizerBase { + type: 'keyword' + buffer_size?: integer +} + +export interface AnalysisKuromojiAnalyzer { + type: 'kuromoji' + mode?: AnalysisKuromojiTokenizationMode + user_dictionary?: string +} + +export interface AnalysisKuromojiIterationMarkCharFilter extends AnalysisCharFilterBase { + type: 'kuromoji_iteration_mark' + normalize_kana: boolean + normalize_kanji: boolean +} + +export interface AnalysisKuromojiPartOfSpeechTokenFilter extends AnalysisTokenFilterBase { + type: 'kuromoji_part_of_speech' + stoptags: string[] +} + +export interface AnalysisKuromojiReadingFormTokenFilter extends AnalysisTokenFilterBase { + type: 'kuromoji_readingform' + use_romaji: boolean +} + +export interface AnalysisKuromojiStemmerTokenFilter extends AnalysisTokenFilterBase { + type: 'kuromoji_stemmer' + minimum_length: integer +} + +export type AnalysisKuromojiTokenizationMode = 'normal' | 'search' | 'extended' + +export interface AnalysisKuromojiTokenizer extends AnalysisTokenizerBase { + type: 'kuromoji_tokenizer' + discard_punctuation?: boolean + mode: AnalysisKuromojiTokenizationMode + nbest_cost?: integer + nbest_examples?: string + user_dictionary?: string + user_dictionary_rules?: string[] + discard_compound_token?: boolean +} + +export interface AnalysisLatvianAnalyzer { + type: 'latvian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisLengthTokenFilter extends AnalysisTokenFilterBase { + type: 'length' + /** Maximum character length of a token. Longer tokens are excluded from the output. Defaults to `Integer.MAX_VALUE`, which is `2^31-1` or `2147483647`. */ + max?: integer + /** Minimum character length of a token. Shorter tokens are excluded from the output. Defaults to `0`. */ + min?: integer +} + +export interface AnalysisLetterTokenizer extends AnalysisTokenizerBase { + type: 'letter' +} + +export interface AnalysisLimitTokenCountTokenFilter extends AnalysisTokenFilterBase { + type: 'limit' + /** If `true`, the limit filter exhausts the token stream, even if the `max_token_count` has already been reached. Defaults to `false`. */ + consume_all_tokens?: boolean + /** Maximum number of tokens to keep. Once this limit is reached, any remaining tokens are excluded from the output. Defaults to `1`. */ + max_token_count?: SpecUtilsStringified +} + +export interface AnalysisLithuanianAnalyzer { + type: 'lithuanian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisLowercaseNormalizer { + type: 'lowercase' +} + +export interface AnalysisLowercaseTokenFilter extends AnalysisTokenFilterBase { + type: 'lowercase' + /** Language-specific lowercase token filter to use. */ + language?: AnalysisLowercaseTokenFilterLanguages +} + +export type AnalysisLowercaseTokenFilterLanguages = 'greek' | 'irish' | 'turkish' + +export interface AnalysisLowercaseTokenizer extends AnalysisTokenizerBase { + type: 'lowercase' +} + +export interface AnalysisMappingCharFilter extends AnalysisCharFilterBase { + type: 'mapping' + mappings?: string[] + mappings_path?: string +} + +export interface AnalysisMinHashTokenFilter extends AnalysisTokenFilterBase { + type: 'min_hash' + /** Number of buckets to which hashes are assigned. Defaults to `512`. */ + bucket_count?: integer + /** Number of ways to hash each token in the stream. Defaults to `1`. */ + hash_count?: integer + /** Number of hashes to keep from each bucket. Defaults to `1`. + * Hashes are retained by ascending size, starting with the bucket’s smallest hash first. */ + hash_set_size?: integer + /** If `true`, the filter fills empty buckets with the value of the first non-empty bucket to its circular right if the `hash_set_size` is `1`. If the `bucket_count` argument is greater than 1, this parameter defaults to `true`. Otherwise, this parameter defaults to `false`. */ + with_rotation?: boolean +} + +export interface AnalysisMultiplexerTokenFilter extends AnalysisTokenFilterBase { + type: 'multiplexer' + /** A list of token filters to apply to incoming tokens. */ + filters: string[] + /** If `true` (the default) then emit the original token in addition to the filtered tokens. */ + preserve_original?: SpecUtilsStringified +} + +export interface AnalysisNGramTokenFilter extends AnalysisTokenFilterBase { + type: 'ngram' + /** Maximum length of characters in a gram. Defaults to `2`. */ + max_gram?: integer + /** Minimum length of characters in a gram. Defaults to `1`. */ + min_gram?: integer + /** Emits original token when set to `true`. Defaults to `false`. */ + preserve_original?: SpecUtilsStringified +} + +export interface AnalysisNGramTokenizer extends AnalysisTokenizerBase { + type: 'ngram' + custom_token_chars?: string + max_gram?: integer + min_gram?: integer + token_chars?: AnalysisTokenChar[] +} + +export interface AnalysisNoriAnalyzer { + type: 'nori' + version?: VersionString + decompound_mode?: AnalysisNoriDecompoundMode + stoptags?: string[] + user_dictionary?: string +} + +export type AnalysisNoriDecompoundMode = 'discard' | 'none' | 'mixed' + +export interface AnalysisNoriPartOfSpeechTokenFilter extends AnalysisTokenFilterBase { + type: 'nori_part_of_speech' + /** An array of part-of-speech tags that should be removed. */ + stoptags?: string[] +} + +export interface AnalysisNoriTokenizer extends AnalysisTokenizerBase { + type: 'nori_tokenizer' + decompound_mode?: AnalysisNoriDecompoundMode + discard_punctuation?: boolean + user_dictionary?: string + user_dictionary_rules?: string[] +} + +export type AnalysisNormalizer = AnalysisLowercaseNormalizer | AnalysisCustomNormalizer + +export interface AnalysisNorwegianAnalyzer { + type: 'norwegian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisPathHierarchyTokenizer extends AnalysisTokenizerBase { + type: 'path_hierarchy' + buffer_size?: SpecUtilsStringified + delimiter?: string + replacement?: string + reverse?: SpecUtilsStringified + skip?: SpecUtilsStringified +} + +export interface AnalysisPatternAnalyzer { + type: 'pattern' + version?: VersionString + /** Java regular expression flags. Flags should be pipe-separated, eg "CASE_INSENSITIVE|COMMENTS". */ + flags?: string + /** Should terms be lowercased or not. + * Defaults to `true`. */ + lowercase?: boolean + /** A Java regular expression. + * Defaults to `\W+`. */ + pattern?: string + /** A pre-defined stop words list like `_english_` or an array containing a list of stop words. + * Defaults to `_none_`. */ + stopwords?: AnalysisStopWords + /** The path to a file containing stop words. */ + stopwords_path?: string +} + +export interface AnalysisPatternCaptureTokenFilter extends AnalysisTokenFilterBase { + type: 'pattern_capture' + /** A list of regular expressions to match. */ + patterns: string[] + /** If set to `true` (the default) it will emit the original token. */ + preserve_original?: SpecUtilsStringified +} + +export interface AnalysisPatternReplaceCharFilter extends AnalysisCharFilterBase { + type: 'pattern_replace' + flags?: string + pattern: string + replacement?: string +} + +export interface AnalysisPatternReplaceTokenFilter extends AnalysisTokenFilterBase { + type: 'pattern_replace' + /** If `true`, all substrings matching the pattern parameter’s regular expression are replaced. If `false`, the filter replaces only the first matching substring in each token. Defaults to `true`. */ + all?: boolean + flags?: string + /** Regular expression, written in Java’s regular expression syntax. The filter replaces token substrings matching this pattern with the substring in the `replacement` parameter. */ + pattern: string + /** Replacement substring. Defaults to an empty substring (`""`). */ + replacement?: string +} + +export interface AnalysisPatternTokenizer extends AnalysisTokenizerBase { + type: 'pattern' + flags?: string + group?: integer + pattern?: string +} + +export interface AnalysisPersianAnalyzer { + type: 'persian' + stopwords?: AnalysisStopWords + stopwords_path?: string +} + +export interface AnalysisPersianNormalizationTokenFilter extends AnalysisTokenFilterBase { + type: 'persian_normalization' +} + +export interface AnalysisPersianStemTokenFilter extends AnalysisTokenFilterBase { + type: 'persian_stem' +} + +export type AnalysisPhoneticEncoder = 'metaphone' | 'double_metaphone' | 'soundex' | 'refined_soundex' | 'caverphone1' | 'caverphone2' | 'cologne' | 'nysiis' | 'koelnerphonetik' | 'haasephonetik' | 'beider_morse' | 'daitch_mokotoff' + +export type AnalysisPhoneticLanguage = 'any' | 'common' | 'cyrillic' | 'english' | 'french' | 'german' | 'hebrew' | 'hungarian' | 'polish' | 'romanian' | 'russian' | 'spanish' + +export type AnalysisPhoneticNameType = 'generic' | 'ashkenazi' | 'sephardic' + +export type AnalysisPhoneticRuleType = 'approx' | 'exact' + +export interface AnalysisPhoneticTokenFilter extends AnalysisTokenFilterBase { + type: 'phonetic' + encoder: AnalysisPhoneticEncoder + languageset?: AnalysisPhoneticLanguage | AnalysisPhoneticLanguage[] + max_code_len?: integer + name_type?: AnalysisPhoneticNameType + replace?: boolean + rule_type?: AnalysisPhoneticRuleType +} + +export interface AnalysisPorterStemTokenFilter extends AnalysisTokenFilterBase { + type: 'porter_stem' +} + +export interface AnalysisPortugueseAnalyzer { + type: 'portuguese' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisPredicateTokenFilter extends AnalysisTokenFilterBase { + type: 'predicate_token_filter' + /** Script containing a condition used to filter incoming tokens. Only tokens that match this script are included in the output. */ + script: Script | ScriptSource +} + +export interface AnalysisRemoveDuplicatesTokenFilter extends AnalysisTokenFilterBase { + type: 'remove_duplicates' +} + +export interface AnalysisReverseTokenFilter extends AnalysisTokenFilterBase { + type: 'reverse' +} + +export interface AnalysisRomanianAnalyzer { + type: 'romanian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisRussianAnalyzer { + type: 'russian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisRussianStemTokenFilter extends AnalysisTokenFilterBase { + type: 'russian_stem' +} + +export interface AnalysisScandinavianFoldingTokenFilter extends AnalysisTokenFilterBase { + type: 'scandinavian_folding' +} + +export interface AnalysisScandinavianNormalizationTokenFilter extends AnalysisTokenFilterBase { + type: 'scandinavian_normalization' +} + +export interface AnalysisSerbianAnalyzer { + type: 'serbian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisSerbianNormalizationTokenFilter extends AnalysisTokenFilterBase { + type: 'serbian_normalization' +} + +export interface AnalysisShingleTokenFilter extends AnalysisTokenFilterBase { + type: 'shingle' + /** String used in shingles as a replacement for empty positions that do not contain a token. This filler token is only used in shingles, not original unigrams. Defaults to an underscore (`_`). */ + filler_token?: string + /** Maximum number of tokens to concatenate when creating shingles. Defaults to `2`. */ + max_shingle_size?: SpecUtilsStringified + /** Minimum number of tokens to concatenate when creating shingles. Defaults to `2`. */ + min_shingle_size?: SpecUtilsStringified + /** If `true`, the output includes the original input tokens. If `false`, the output only includes shingles; the original input tokens are removed. Defaults to `true`. */ + output_unigrams?: boolean + /** If `true`, the output includes the original input tokens only if no shingles are produced; if shingles are produced, the output only includes shingles. Defaults to `false`. */ + output_unigrams_if_no_shingles?: boolean + /** Separator used to concatenate adjacent tokens to form a shingle. Defaults to a space (`" "`). */ + token_separator?: string +} + +export interface AnalysisSimpleAnalyzer { + type: 'simple' + version?: VersionString +} + +export interface AnalysisSimplePatternSplitTokenizer extends AnalysisTokenizerBase { + type: 'simple_pattern_split' + pattern?: string +} + +export interface AnalysisSimplePatternTokenizer extends AnalysisTokenizerBase { + type: 'simple_pattern' + pattern?: string +} + +export interface AnalysisSnowballAnalyzer { + type: 'snowball' + version?: VersionString + language: AnalysisSnowballLanguage + stopwords?: AnalysisStopWords +} + +export type AnalysisSnowballLanguage = 'Arabic' | 'Armenian' | 'Basque' | 'Catalan' | 'Danish' | 'Dutch' | 'English' | 'Estonian' | 'Finnish' | 'French' | 'German' | 'German2' | 'Hungarian' | 'Italian' | 'Irish' | 'Kp' | 'Lithuanian' | 'Lovins' | 'Norwegian' | 'Porter' | 'Portuguese' | 'Romanian' | 'Russian' | 'Serbian' | 'Spanish' | 'Swedish' | 'Turkish' + +export interface AnalysisSnowballTokenFilter extends AnalysisTokenFilterBase { + type: 'snowball' + /** Controls the language used by the stemmer. */ + language?: AnalysisSnowballLanguage +} + +export interface AnalysisSoraniAnalyzer { + type: 'sorani' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisSoraniNormalizationTokenFilter extends AnalysisTokenFilterBase { + type: 'sorani_normalization' +} + +export interface AnalysisSpanishAnalyzer { + type: 'spanish' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisStandardAnalyzer { + type: 'standard' + /** The maximum token length. If a token is seen that exceeds this length then it is split at `max_token_length` intervals. + * Defaults to `255`. */ + max_token_length?: integer + /** A pre-defined stop words list like `_english_` or an array containing a list of stop words. + * Defaults to `_none_`. */ + stopwords?: AnalysisStopWords + /** The path to a file containing stop words. */ + stopwords_path?: string +} + +export interface AnalysisStandardTokenizer extends AnalysisTokenizerBase { + type: 'standard' + max_token_length?: integer +} + +export interface AnalysisStemmerOverrideTokenFilter extends AnalysisTokenFilterBase { + type: 'stemmer_override' + /** A list of mapping rules to use. */ + rules?: string[] + /** A path (either relative to `config` location, or absolute) to a list of mappings. */ + rules_path?: string +} + +export interface AnalysisStemmerTokenFilter extends AnalysisTokenFilterBase { + type: 'stemmer' + language?: string + /** @alias language */ + name?: string +} + +export interface AnalysisStopAnalyzer { + type: 'stop' + version?: VersionString + /** A pre-defined stop words list like `_english_` or an array containing a list of stop words. + * Defaults to `_none_`. */ + stopwords?: AnalysisStopWords + /** The path to a file containing stop words. */ + stopwords_path?: string +} + +export interface AnalysisStopTokenFilter extends AnalysisTokenFilterBase { + type: 'stop' + /** If `true`, stop word matching is case insensitive. For example, if `true`, a stop word of the matches and removes `The`, `THE`, or `the`. Defaults to `false`. */ + ignore_case?: boolean + /** If `true`, the last token of a stream is removed if it’s a stop word. Defaults to `true`. */ + remove_trailing?: boolean + /** Language value, such as `_arabic_` or `_thai_`. Defaults to `_english_`. */ + stopwords?: AnalysisStopWords + /** Path to a file that contains a list of stop words to remove. + * This path must be absolute or relative to the `config` location, and the file must be UTF-8 encoded. Each stop word in the file must be separated by a line break. */ + stopwords_path?: string +} + +export type AnalysisStopWordLanguage = '_arabic_' | '_armenian_' | '_basque_' | '_bengali_' | '_brazilian_' | '_bulgarian_' | '_catalan_' | '_cjk_' | '_czech_' | '_danish_' | '_dutch_' | '_english_' | '_estonian_' | '_finnish_' | '_french_' | '_galician_' | '_german_' | '_greek_' | '_hindi_' | '_hungarian_' | '_indonesian_' | '_irish_' | '_italian_' | '_latvian_' | '_lithuanian_' | '_norwegian_' | '_persian_' | '_portuguese_' | '_romanian_' | '_russian_' | '_serbian_' | '_sorani_' | '_spanish_' | '_swedish_' | '_thai_' | '_turkish_' | '_none_' + +export type AnalysisStopWords = AnalysisStopWordLanguage | string[] + +export interface AnalysisSwedishAnalyzer { + type: 'swedish' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export type AnalysisSynonymFormat = 'solr' | 'wordnet' + +export interface AnalysisSynonymGraphTokenFilter extends AnalysisSynonymTokenFilterBase { + type: 'synonym_graph' +} + +export interface AnalysisSynonymTokenFilter extends AnalysisSynonymTokenFilterBase { + type: 'synonym' +} + +export interface AnalysisSynonymTokenFilterBase extends AnalysisTokenFilterBase { + /** Expands definitions for equivalent synonym rules. Defaults to `true`. */ + expand?: boolean + /** Sets the synonym rules format. */ + format?: AnalysisSynonymFormat + /** If `true` ignores errors while parsing the synonym rules. It is important to note that only those synonym rules which cannot get parsed are ignored. Defaults to the value of the `updateable` setting. */ + lenient?: boolean + /** Used to define inline synonyms. */ + synonyms?: string[] + /** Used to provide a synonym file. This path must be absolute or relative to the `config` location. */ + synonyms_path?: string + /** Provide a synonym set created via Synonyms Management APIs. */ + synonyms_set?: string + /** Controls the tokenizers that will be used to tokenize the synonym, this parameter is for backwards compatibility for indices that created before 6.0. */ + tokenizer?: string + /** If `true` allows reloading search analyzers to pick up changes to synonym files. Only to be used for search analyzers. Defaults to `false`. */ + updateable?: boolean +} + +export interface AnalysisThaiAnalyzer { + type: 'thai' + stopwords?: AnalysisStopWords + stopwords_path?: string +} + +export interface AnalysisThaiTokenizer extends AnalysisTokenizerBase { + type: 'thai' +} + +export type AnalysisTokenChar = 'letter' | 'digit' | 'whitespace' | 'punctuation' | 'symbol' | 'custom' + +export type AnalysisTokenFilter = string | AnalysisTokenFilterDefinition + +export interface AnalysisTokenFilterBase { + version?: VersionString +} + +export type AnalysisTokenFilterDefinition = AnalysisApostropheTokenFilter | AnalysisArabicStemTokenFilter | AnalysisArabicNormalizationTokenFilter | AnalysisAsciiFoldingTokenFilter | AnalysisBengaliNormalizationTokenFilter | AnalysisBrazilianStemTokenFilter | AnalysisCjkBigramTokenFilter | AnalysisCjkWidthTokenFilter | AnalysisClassicTokenFilter | AnalysisCommonGramsTokenFilter | AnalysisConditionTokenFilter | AnalysisCzechStemTokenFilter | AnalysisDecimalDigitTokenFilter | AnalysisDelimitedPayloadTokenFilter | AnalysisDutchStemTokenFilter | AnalysisEdgeNGramTokenFilter | AnalysisElisionTokenFilter | AnalysisFingerprintTokenFilter | AnalysisFlattenGraphTokenFilter | AnalysisFrenchStemTokenFilter | AnalysisGermanNormalizationTokenFilter | AnalysisGermanStemTokenFilter | AnalysisHindiNormalizationTokenFilter | AnalysisHunspellTokenFilter | AnalysisHyphenationDecompounderTokenFilter | AnalysisIndicNormalizationTokenFilter | AnalysisKeepTypesTokenFilter | AnalysisKeepWordsTokenFilter | AnalysisKeywordMarkerTokenFilter | AnalysisKeywordRepeatTokenFilter | AnalysisKStemTokenFilter | AnalysisLengthTokenFilter | AnalysisLimitTokenCountTokenFilter | AnalysisLowercaseTokenFilter | AnalysisMinHashTokenFilter | AnalysisMultiplexerTokenFilter | AnalysisNGramTokenFilter | AnalysisNoriPartOfSpeechTokenFilter | AnalysisPatternCaptureTokenFilter | AnalysisPatternReplaceTokenFilter | AnalysisPersianNormalizationTokenFilter | AnalysisPersianStemTokenFilter | AnalysisPorterStemTokenFilter | AnalysisPredicateTokenFilter | AnalysisRemoveDuplicatesTokenFilter | AnalysisReverseTokenFilter | AnalysisRussianStemTokenFilter | AnalysisScandinavianFoldingTokenFilter | AnalysisScandinavianNormalizationTokenFilter | AnalysisSerbianNormalizationTokenFilter | AnalysisShingleTokenFilter | AnalysisSnowballTokenFilter | AnalysisSoraniNormalizationTokenFilter | AnalysisStemmerOverrideTokenFilter | AnalysisStemmerTokenFilter | AnalysisStopTokenFilter | AnalysisSynonymGraphTokenFilter | AnalysisSynonymTokenFilter | AnalysisTrimTokenFilter | AnalysisTruncateTokenFilter | AnalysisUniqueTokenFilter | AnalysisUppercaseTokenFilter | AnalysisWordDelimiterGraphTokenFilter | AnalysisWordDelimiterTokenFilter | AnalysisJaStopTokenFilter | AnalysisKuromojiStemmerTokenFilter | AnalysisKuromojiReadingFormTokenFilter | AnalysisKuromojiPartOfSpeechTokenFilter | AnalysisIcuCollationTokenFilter | AnalysisIcuFoldingTokenFilter | AnalysisIcuNormalizationTokenFilter | AnalysisIcuTransformTokenFilter | AnalysisPhoneticTokenFilter | AnalysisDictionaryDecompounderTokenFilter + +export type AnalysisTokenizer = string | AnalysisTokenizerDefinition + +export interface AnalysisTokenizerBase { + version?: VersionString +} + +export type AnalysisTokenizerDefinition = AnalysisCharGroupTokenizer | AnalysisClassicTokenizer | AnalysisEdgeNGramTokenizer | AnalysisKeywordTokenizer | AnalysisLetterTokenizer | AnalysisLowercaseTokenizer | AnalysisNGramTokenizer | AnalysisPathHierarchyTokenizer | AnalysisPatternTokenizer | AnalysisSimplePatternTokenizer | AnalysisSimplePatternSplitTokenizer | AnalysisStandardTokenizer | AnalysisThaiTokenizer | AnalysisUaxEmailUrlTokenizer | AnalysisWhitespaceTokenizer | AnalysisIcuTokenizer | AnalysisKuromojiTokenizer | AnalysisNoriTokenizer + +export interface AnalysisTrimTokenFilter extends AnalysisTokenFilterBase { + type: 'trim' +} + +export interface AnalysisTruncateTokenFilter extends AnalysisTokenFilterBase { + type: 'truncate' + /** Character limit for each token. Tokens exceeding this limit are truncated. Defaults to `10`. */ + length?: integer +} + +export interface AnalysisTurkishAnalyzer { + type: 'turkish' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisUaxEmailUrlTokenizer extends AnalysisTokenizerBase { + type: 'uax_url_email' + max_token_length?: integer +} + +export interface AnalysisUniqueTokenFilter extends AnalysisTokenFilterBase { + type: 'unique' + /** If `true`, only remove duplicate tokens in the same position. Defaults to `false`. */ + only_on_same_position?: boolean +} + +export interface AnalysisUppercaseTokenFilter extends AnalysisTokenFilterBase { + type: 'uppercase' +} + +export interface AnalysisWhitespaceAnalyzer { + type: 'whitespace' + version?: VersionString +} + +export interface AnalysisWhitespaceTokenizer extends AnalysisTokenizerBase { + type: 'whitespace' + max_token_length?: integer +} + +export interface AnalysisWordDelimiterGraphTokenFilter extends AnalysisWordDelimiterTokenFilterBase { + type: 'word_delimiter_graph' + /** If `true`, the filter adjusts the offsets of split or catenated tokens to better reflect their actual position in the token stream. Defaults to `true`. */ + adjust_offsets?: boolean + /** If `true`, the filter skips tokens with a keyword attribute of true. Defaults to `false`. */ + ignore_keywords?: boolean +} + +export interface AnalysisWordDelimiterTokenFilter extends AnalysisWordDelimiterTokenFilterBase { + type: 'word_delimiter' +} + +export interface AnalysisWordDelimiterTokenFilterBase extends AnalysisTokenFilterBase { + /** If `true`, the filter produces catenated tokens for chains of alphanumeric characters separated by non-alphabetic delimiters. Defaults to `false`. */ + catenate_all?: boolean + /** If `true`, the filter produces catenated tokens for chains of numeric characters separated by non-alphabetic delimiters. Defaults to `false`. */ + catenate_numbers?: boolean + /** If `true`, the filter produces catenated tokens for chains of alphabetical characters separated by non-alphabetic delimiters. Defaults to `false`. */ + catenate_words?: boolean + /** If `true`, the filter includes tokens consisting of only numeric characters in the output. If `false`, the filter excludes these tokens from the output. Defaults to `true`. */ + generate_number_parts?: boolean + /** If `true`, the filter includes tokens consisting of only alphabetical characters in the output. If `false`, the filter excludes these tokens from the output. Defaults to `true`. */ + generate_word_parts?: boolean + /** If `true`, the filter includes the original version of any split tokens in the output. This original version includes non-alphanumeric delimiters. Defaults to `false`. */ + preserve_original?: SpecUtilsStringified + /** Array of tokens the filter won’t split. */ + protected_words?: string[] + /** Path to a file that contains a list of tokens the filter won’t split. + * This path must be absolute or relative to the `config` location, and the file must be UTF-8 encoded. Each token in the file must be separated by a line break. */ + protected_words_path?: string + /** If `true`, the filter splits tokens at letter case transitions. For example: camelCase -> [ camel, Case ]. Defaults to `true`. */ + split_on_case_change?: boolean + /** If `true`, the filter splits tokens at letter-number transitions. For example: j2se -> [ j, 2, se ]. Defaults to `true`. */ + split_on_numerics?: boolean + /** If `true`, the filter removes the English possessive (`'s`) from the end of each token. For example: O'Neil's -> [ O, Neil ]. Defaults to `true`. */ + stem_english_possessive?: boolean + /** Array of custom type mappings for characters. This allows you to map non-alphanumeric characters as numeric or alphanumeric to avoid splitting on those characters. */ + type_table?: string[] + /** Path to a file that contains custom type mappings for characters. This allows you to map non-alphanumeric characters as numeric or alphanumeric to avoid splitting on those characters. */ + type_table_path?: string +} + +export interface MappingAggregateMetricDoubleProperty extends MappingPropertyBase { + type: 'aggregate_metric_double' + default_metric: string + ignore_malformed?: boolean + metrics: string[] + time_series_metric?: MappingTimeSeriesMetricType +} + +export interface MappingAllField { + analyzer: string + enabled: boolean + omit_norms: boolean + search_analyzer: string + similarity: string + store: boolean + store_term_vector_offsets: boolean + store_term_vector_payloads: boolean + store_term_vector_positions: boolean + store_term_vectors: boolean +} + +export interface MappingBinaryProperty extends MappingDocValuesPropertyBase { + type: 'binary' +} + +export interface MappingBooleanProperty extends MappingDocValuesPropertyBase { + boost?: double + fielddata?: IndicesNumericFielddata + index?: boolean + null_value?: boolean + ignore_malformed?: boolean + script?: Script | ScriptSource + on_script_error?: MappingOnScriptError + /** For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. + * @experimental */ + time_series_dimension?: boolean + type: 'boolean' +} + +export interface MappingByteNumberProperty extends MappingNumberPropertyBase { + type: 'byte' + null_value?: byte +} + +export interface MappingChunkRescorerChunkingSettings { + /** The chunking strategy: `sentence`, `word`, `none` or `recursive`. + * + * * If `strategy` is set to `recursive`, you must also specify: + * + * - `max_chunk_size` + * - either `separators` or`separator_group` + * + * Learn more about different chunking strategies in the linked documentation. */ + strategy?: string + /** Only applicable to the `recursive` strategy and required when using it. + * + * Sets a predefined list of separators in the saved chunking settings based on the selected text type. + * Values can be `markdown` or `plaintext`. + * + * Using this parameter is an alternative to manually specifying a custom `separators` list. */ + separator_group?: string + /** Only applicable to the `recursive` strategy and required when using it. + * + * A list of strings used as possible split points when chunking text. + * + * Each string can be a plain string or a regular expression (regex) pattern. + * The system tries each separator in order to split the text, starting from the first item in the list. + * + * After splitting, it attempts to recombine smaller pieces into larger chunks that stay within + * the `max_chunk_size` limit, to reduce the total number of chunks generated. */ + separators?: string[] + /** The maximum size of a chunk in words. + * This value cannot be lower than `20` (for `sentence` strategy) or `10` (for `word` strategy). + * This value should not exceed the window size for the associated model. */ + max_chunk_size: integer + /** The number of overlapping words for chunks. + * It is applicable only to a `word` chunking strategy. + * This value cannot be higher than half the `max_chunk_size` value. */ + overlap?: integer + /** The number of overlapping sentences for chunks. + * It is applicable only for a `sentence` chunking strategy. + * It can be either `1` or `0`. */ + sentence_overlap?: integer +} + +export interface MappingChunkingSettings { + /** The chunking strategy: `sentence`, `word`, `none` or `recursive`. + * + * * If `strategy` is set to `recursive`, you must also specify: + * + * - `max_chunk_size` + * - either `separators` or`separator_group` + * + * Learn more about different chunking strategies in the linked documentation. */ + strategy: string + /** Only applicable to the `recursive` strategy and required when using it. + * + * Sets a predefined list of separators in the saved chunking settings based on the selected text type. + * Values can be `markdown` or `plaintext`. + * + * Using this parameter is an alternative to manually specifying a custom `separators` list. */ + separator_group?: string + /** Only applicable to the `recursive` strategy and required when using it. + * + * A list of strings used as possible split points when chunking text. + * + * Each string can be a plain string or a regular expression (regex) pattern. + * The system tries each separator in order to split the text, starting from the first item in the list. + * + * After splitting, it attempts to recombine smaller pieces into larger chunks that stay within + * the `max_chunk_size` limit, to reduce the total number of chunks generated. */ + separators?: string[] + /** The maximum size of a chunk in words. + * This value cannot be lower than `20` (for `sentence` strategy) or `10` (for `word` strategy). + * This value should not exceed the window size for the associated model. */ + max_chunk_size: integer + /** The number of overlapping words for chunks. + * It is applicable only to a `word` chunking strategy. + * This value cannot be higher than half the `max_chunk_size` value. */ + overlap?: integer + /** The number of overlapping sentences for chunks. + * It is applicable only for a `sentence` chunking strategy. + * It can be either `1` or `0`. */ + sentence_overlap?: integer +} + +export interface MappingCompletionProperty extends MappingDocValuesPropertyBase { + analyzer?: string + contexts?: MappingSuggestContext[] + max_input_length?: integer + preserve_position_increments?: boolean + preserve_separators?: boolean + search_analyzer?: string + type: 'completion' +} + +export interface MappingCompositeSubField { + type: MappingRuntimeFieldType +} + +export interface MappingConstantKeywordProperty extends MappingPropertyBase { + value?: any + type: 'constant_keyword' +} + +export interface MappingCorePropertyBase extends MappingPropertyBase { + copy_to?: Fields + store?: boolean +} + +export interface MappingCountedKeywordProperty extends MappingPropertyBase { + type: 'counted_keyword' + index?: boolean +} + +export interface MappingDataStreamTimestamp { + enabled: boolean +} + +export interface MappingDateNanosProperty extends MappingDocValuesPropertyBase { + boost?: double + format?: string + ignore_malformed?: boolean + index?: boolean + script?: Script | ScriptSource + on_script_error?: MappingOnScriptError + null_value?: DateTime + precision_step?: integer + type: 'date_nanos' +} + +export interface MappingDateProperty extends MappingDocValuesPropertyBase { + boost?: double + fielddata?: IndicesNumericFielddata + format?: string + ignore_malformed?: boolean + index?: boolean + script?: Script | ScriptSource + on_script_error?: MappingOnScriptError + null_value?: DateTime + precision_step?: integer + locale?: string + type: 'date' +} + +export interface MappingDateRangeProperty extends MappingRangePropertyBase { + format?: string + type: 'date_range' +} + +export type MappingDenseVectorElementType = 'bit' | 'byte' | 'float' + +export interface MappingDenseVectorIndexOptions { + /** The confidence interval to use when quantizing the vectors. Can be any value between and including `0.90` and + * `1.0` or exactly `0`. When the value is `0`, this indicates that dynamic quantiles should be calculated for + * optimized quantization. When between `0.90` and `1.0`, this value restricts the values used when calculating + * the quantization thresholds. + * + * For example, a value of `0.95` will only use the middle `95%` of the values when calculating the quantization + * thresholds (e.g. the highest and lowest `2.5%` of values will be ignored). + * + * Defaults to `1/(dims + 1)` for `int8` quantized vectors and `0` for `int4` for dynamic quantile calculation. + * + * Only applicable to `int8_hnsw`, `int4_hnsw`, `int8_flat`, and `int4_flat` index types. */ + confidence_interval?: float + /** The number of candidates to track while assembling the list of nearest neighbors for each new node. + * + * Only applicable to `hnsw`, `int8_hnsw`, `bbq_hnsw`, and `int4_hnsw` index types. */ + ef_construction?: integer + /** The number of neighbors each node will be connected to in the HNSW graph. + * + * Only applicable to `hnsw`, `int8_hnsw`, `bbq_hnsw`, and `int4_hnsw` index types. */ + m?: integer + /** The type of kNN algorithm to use. */ + type: MappingDenseVectorIndexOptionsType + /** The rescore vector options. This is only applicable to `bbq_disk`, `bbq_hnsw`, `int4_hnsw`, `int8_hnsw`, `bbq_flat`, `int4_flat`, and `int8_flat` index types. */ + rescore_vector?: MappingDenseVectorIndexOptionsRescoreVector + /** `true` if vector rescoring should be done on-disk + * + * Only applicable to `bbq_hnsw` */ + on_disk_rescore?: boolean +} + +export interface MappingDenseVectorIndexOptionsRescoreVector { + /** The oversampling factor to use when searching for the nearest neighbor. This is only applicable to the quantized formats: `bbq_*`, `int4_*`, and `int8_*`. + * When provided, `oversample * k` vectors will be gathered and then their scores will be re-computed with the original vectors. + * + * valid values are between `1.0` and `10.0` (inclusive), or `0` exactly to disable oversampling. */ + oversample: float +} + +export type MappingDenseVectorIndexOptionsType = 'bbq_flat' | 'bbq_hnsw' | 'bbq_disk' | 'flat' | 'hnsw' | 'int4_flat' | 'int4_hnsw' | 'int8_flat' | 'int8_hnsw' + +export interface MappingDenseVectorProperty extends MappingPropertyBase { + type: 'dense_vector' + /** Number of vector dimensions. Can't exceed `4096`. If `dims` is not specified, it will be set to the length of + * the first vector added to the field. */ + dims?: integer + /** The data type used to encode vectors. The supported data types are `float` (default), `byte`, and `bit`. */ + element_type?: MappingDenseVectorElementType + /** If `true`, you can search this field using the kNN search API. */ + index?: boolean + /** An optional section that configures the kNN indexing algorithm. The HNSW algorithm has two internal parameters + * that influence how the data structure is built. These can be adjusted to improve the accuracy of results, at the + * expense of slower indexing speed. + * + * This parameter can only be specified when `index` is `true`. */ + index_options?: MappingDenseVectorIndexOptions + /** The vector similarity metric to use in kNN search. + * + * Documents are ranked by their vector field's similarity to the query vector. The `_score` of each document will + * be derived from the similarity, in a way that ensures scores are positive and that a larger score corresponds + * to a higher ranking. + * + * Defaults to `l2_norm` when `element_type` is `bit` otherwise defaults to `cosine`. + * + * `bit` vectors only support `l2_norm` as their similarity metric. + * + * This parameter can only be specified when `index` is `true`. */ + similarity?: MappingDenseVectorSimilarity +} + +export type MappingDenseVectorSimilarity = 'cosine' | 'dot_product' | 'l2_norm' | 'max_inner_product' + +export interface MappingDocValuesPropertyBase extends MappingCorePropertyBase { + doc_values?: boolean +} + +export interface MappingDoubleNumberProperty extends MappingNumberPropertyBase { + type: 'double' + null_value?: double +} + +export interface MappingDoubleRangeProperty extends MappingRangePropertyBase { + type: 'double_range' +} + +export type MappingDynamicMapping = boolean | 'strict' | 'runtime' | 'true' | 'false' + +export interface MappingDynamicProperty extends MappingDocValuesPropertyBase { + type: '{dynamic_type}' + enabled?: boolean + null_value?: FieldValue + boost?: double + coerce?: boolean + script?: Script | ScriptSource + on_script_error?: MappingOnScriptError + ignore_malformed?: boolean + time_series_metric?: MappingTimeSeriesMetricType + analyzer?: string + eager_global_ordinals?: boolean + index?: boolean + index_options?: MappingIndexOptions + index_phrases?: boolean + index_prefixes?: MappingTextIndexPrefixes | null + norms?: boolean + position_increment_gap?: integer + search_analyzer?: string + search_quote_analyzer?: string + term_vector?: MappingTermVectorOption + format?: string + precision_step?: integer + locale?: string +} + +export interface MappingDynamicTemplate { + mapping?: MappingProperty + runtime?: MappingRuntimeField + match?: string | string[] + path_match?: string | string[] + unmatch?: string | string[] + path_unmatch?: string | string[] + match_mapping_type?: string | string[] + unmatch_mapping_type?: string | string[] + match_pattern?: MappingMatchType +} + +export interface MappingFieldAliasProperty extends MappingPropertyBase { + path?: Field + type: 'alias' +} + +export interface MappingFieldMapping { + full_name: string + mapping: Partial> +} + +export interface MappingFieldNamesField { + enabled: boolean +} + +export type MappingFieldType = 'none' | 'geo_point' | 'geo_shape' | 'ip' | 'binary' | 'keyword' | 'text' | 'search_as_you_type' | 'date' | 'date_nanos' | 'boolean' | 'completion' | 'nested' | 'object' | 'passthrough' | 'version' | 'murmur3' | 'token_count' | 'percolator' | 'integer' | 'long' | 'short' | 'byte' | 'float' | 'half_float' | 'scaled_float' | 'double' | 'integer_range' | 'float_range' | 'long_range' | 'double_range' | 'date_range' | 'ip_range' | 'alias' | 'join' | 'rank_feature' | 'rank_features' | 'flattened' | 'shape' | 'histogram' | 'constant_keyword' | 'counted_keyword' | 'aggregate_metric_double' | 'dense_vector' | 'semantic_text' | 'sparse_vector' | 'match_only_text' | 'icu_collation_keyword' + +export interface MappingFlattenedProperty extends MappingPropertyBase { + boost?: double + depth_limit?: integer + doc_values?: boolean + eager_global_ordinals?: boolean + index?: boolean + index_options?: MappingIndexOptions + null_value?: string + similarity?: string + split_queries_on_whitespace?: boolean + time_series_dimensions?: string[] + type: 'flattened' +} + +export interface MappingFloatNumberProperty extends MappingNumberPropertyBase { + type: 'float' + null_value?: float +} + +export interface MappingFloatRangeProperty extends MappingRangePropertyBase { + type: 'float_range' +} + +export type MappingGeoOrientation = 'right' | 'RIGHT' | 'counterclockwise' | 'ccw' | 'left' | 'LEFT' | 'clockwise' | 'cw' + +export type MappingGeoPointMetricType = 'gauge' | 'counter' | 'position' + +export interface MappingGeoPointProperty extends MappingDocValuesPropertyBase { + ignore_malformed?: boolean + ignore_z_value?: boolean + null_value?: GeoLocation + index?: boolean + on_script_error?: MappingOnScriptError + script?: Script | ScriptSource + type: 'geo_point' + time_series_metric?: MappingGeoPointMetricType +} + +export interface MappingGeoShapeProperty extends MappingDocValuesPropertyBase { + coerce?: boolean + ignore_malformed?: boolean + ignore_z_value?: boolean + index?: boolean + orientation?: MappingGeoOrientation + strategy?: MappingGeoStrategy + type: 'geo_shape' +} + +export type MappingGeoStrategy = 'recursive' | 'term' + +export interface MappingHalfFloatNumberProperty extends MappingNumberPropertyBase { + type: 'half_float' + null_value?: float +} + +export interface MappingHistogramProperty extends MappingPropertyBase { + ignore_malformed?: boolean + type: 'histogram' +} + +export interface MappingIcuCollationProperty extends MappingDocValuesPropertyBase { + type: 'icu_collation_keyword' + norms?: boolean + index_options?: MappingIndexOptions + /** Should the field be searchable? */ + index?: boolean + /** Accepts a string value which is substituted for any explicit null values. Defaults to null, which means the field is treated as missing. */ + null_value?: string + rules?: string + language?: string + country?: string + variant?: string + strength?: AnalysisIcuCollationStrength + decomposition?: AnalysisIcuCollationDecomposition + alternate?: AnalysisIcuCollationAlternate + case_level?: boolean + case_first?: AnalysisIcuCollationCaseFirst + numeric?: boolean + variable_top?: string + hiragana_quaternary_mode?: boolean +} + +export interface MappingIndexField { + enabled: boolean +} + +export type MappingIndexOptions = 'docs' | 'freqs' | 'positions' | 'offsets' + +export interface MappingIntegerNumberProperty extends MappingNumberPropertyBase { + type: 'integer' + null_value?: integer +} + +export interface MappingIntegerRangeProperty extends MappingRangePropertyBase { + type: 'integer_range' +} + +export interface MappingIpProperty extends MappingDocValuesPropertyBase { + boost?: double + index?: boolean + ignore_malformed?: boolean + null_value?: string + on_script_error?: MappingOnScriptError + script?: Script | ScriptSource + /** For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. + * @experimental */ + time_series_dimension?: boolean + type: 'ip' +} + +export interface MappingIpRangeProperty extends MappingRangePropertyBase { + type: 'ip_range' +} + +export interface MappingJoinProperty extends MappingPropertyBase { + relations?: Record + eager_global_ordinals?: boolean + type: 'join' +} + +export interface MappingKeywordProperty extends MappingDocValuesPropertyBase { + boost?: double + eager_global_ordinals?: boolean + index?: boolean + index_options?: MappingIndexOptions + script?: Script | ScriptSource + on_script_error?: MappingOnScriptError + normalizer?: string + norms?: boolean + null_value?: string + similarity?: string | null + split_queries_on_whitespace?: boolean + /** For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. + * @experimental */ + time_series_dimension?: boolean + type: 'keyword' +} + +export interface MappingLongNumberProperty extends MappingNumberPropertyBase { + type: 'long' + null_value?: long +} + +export interface MappingLongRangeProperty extends MappingRangePropertyBase { + type: 'long_range' +} + +export interface MappingMatchOnlyTextProperty { + type: 'match_only_text' + /** Multi-fields allow the same string value to be indexed in multiple ways for different purposes, such as one + * field for search and a multi-field for sorting and aggregations, or the same string value analyzed by different analyzers. */ + fields?: Record + /** Metadata about the field. */ + meta?: Record + /** Allows you to copy the values of multiple fields into a group + * field, which can then be queried as a single field. */ + copy_to?: Fields +} + +export type MappingMatchType = 'simple' | 'regex' + +export interface MappingMurmur3HashProperty extends MappingDocValuesPropertyBase { + type: 'murmur3' +} + +export interface MappingNestedProperty extends MappingCorePropertyBase { + enabled?: boolean + include_in_parent?: boolean + include_in_root?: boolean + type: 'nested' +} + +export interface MappingNumberPropertyBase extends MappingDocValuesPropertyBase { + boost?: double + coerce?: boolean + ignore_malformed?: boolean + index?: boolean + on_script_error?: MappingOnScriptError + script?: Script | ScriptSource + /** For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. + * @experimental */ + time_series_metric?: MappingTimeSeriesMetricType + /** For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. + * @experimental */ + time_series_dimension?: boolean +} + +export interface MappingObjectProperty extends MappingCorePropertyBase { + enabled?: boolean + subobjects?: MappingSubobjects + type?: 'object' +} + +export type MappingOnScriptError = 'fail' | 'continue' + +export interface MappingPassthroughObjectProperty extends MappingCorePropertyBase { + type?: 'passthrough' + enabled?: boolean + priority?: integer + time_series_dimension?: boolean +} + +export interface MappingPercolatorProperty extends MappingPropertyBase { + type: 'percolator' +} + +export interface MappingPointProperty extends MappingDocValuesPropertyBase { + ignore_malformed?: boolean + ignore_z_value?: boolean + null_value?: string + type: 'point' +} + +export type MappingProperty = MappingBinaryProperty | MappingBooleanProperty | MappingDynamicProperty | MappingJoinProperty | MappingKeywordProperty | MappingMatchOnlyTextProperty | MappingPercolatorProperty | MappingRankFeatureProperty | MappingRankFeaturesProperty | MappingSearchAsYouTypeProperty | MappingTextProperty | MappingVersionProperty | MappingWildcardProperty | MappingDateNanosProperty | MappingDateProperty | MappingAggregateMetricDoubleProperty | MappingDenseVectorProperty | MappingFlattenedProperty | MappingNestedProperty | MappingObjectProperty | MappingPassthroughObjectProperty | MappingRankVectorProperty | MappingSemanticTextProperty | MappingSparseVectorProperty | MappingCompletionProperty | MappingConstantKeywordProperty | MappingCountedKeywordProperty | MappingFieldAliasProperty | MappingHistogramProperty | MappingIpProperty | MappingMurmur3HashProperty | MappingTokenCountProperty | MappingGeoPointProperty | MappingGeoShapeProperty | MappingPointProperty | MappingShapeProperty | MappingByteNumberProperty | MappingDoubleNumberProperty | MappingFloatNumberProperty | MappingHalfFloatNumberProperty | MappingIntegerNumberProperty | MappingLongNumberProperty | MappingScaledFloatNumberProperty | MappingShortNumberProperty | MappingUnsignedLongNumberProperty | MappingDateRangeProperty | MappingDoubleRangeProperty | MappingFloatRangeProperty | MappingIntegerRangeProperty | MappingIpRangeProperty | MappingLongRangeProperty | MappingIcuCollationProperty + +export interface MappingPropertyBase { + /** Metadata about the field. */ + meta?: Record + properties?: Record + ignore_above?: integer + dynamic?: MappingDynamicMapping + fields?: Record + synthetic_source_keep?: MappingSyntheticSourceKeepEnum +} + +export interface MappingRangePropertyBase extends MappingDocValuesPropertyBase { + boost?: double + coerce?: boolean + index?: boolean +} + +export interface MappingRankFeatureProperty extends MappingPropertyBase { + positive_score_impact?: boolean + type: 'rank_feature' +} + +export interface MappingRankFeaturesProperty extends MappingPropertyBase { + positive_score_impact?: boolean + type: 'rank_features' +} + +export type MappingRankVectorElementType = 'byte' | 'float' | 'bit' + +export interface MappingRankVectorProperty extends MappingPropertyBase { + type: 'rank_vectors' + element_type?: MappingRankVectorElementType + dims?: integer +} + +export interface MappingRoutingField { + required: boolean +} + +export interface MappingRuntimeField { + /** For type `composite` */ + fields?: Record + /** For type `lookup` */ + fetch_fields?: (MappingRuntimeFieldFetchFields | Field)[] + /** A custom format for `date` type runtime fields. */ + format?: string + /** For type `lookup` */ + input_field?: Field + /** For type `lookup` */ + target_field?: Field + /** For type `lookup` */ + target_index?: IndexName + /** Painless script executed at query time. */ + script?: Script | ScriptSource + /** Field type, which can be: `boolean`, `composite`, `date`, `double`, `geo_point`, `ip`,`keyword`, `long`, or `lookup`. */ + type: MappingRuntimeFieldType +} + +export interface MappingRuntimeFieldFetchFields { + field: Field + format?: string +} + +export type MappingRuntimeFieldType = 'boolean' | 'composite' | 'date' | 'double' | 'geo_point' | 'geo_shape' | 'ip' | 'keyword' | 'long' | 'lookup' + +export type MappingRuntimeFields = Record + +export interface MappingScaledFloatNumberProperty extends MappingNumberPropertyBase { + type: 'scaled_float' + null_value?: double + scaling_factor?: double +} + +export interface MappingSearchAsYouTypeProperty extends MappingCorePropertyBase { + analyzer?: string + index?: boolean + index_options?: MappingIndexOptions + max_shingle_size?: integer + norms?: boolean + search_analyzer?: string + search_quote_analyzer?: string + similarity?: string | null + term_vector?: MappingTermVectorOption + type: 'search_as_you_type' +} + +export interface MappingSemanticTextIndexOptions { + dense_vector?: MappingDenseVectorIndexOptions + sparse_vector?: MappingSparseVectorIndexOptions +} + +export interface MappingSemanticTextProperty { + type: 'semantic_text' + meta?: Record + /** Inference endpoint that will be used to generate embeddings for the field. + * This parameter cannot be updated. Use the Create inference API to create the endpoint. + * If `search_inference_id` is specified, the inference endpoint will only be used at index time. */ + inference_id?: Id + /** Inference endpoint that will be used to generate embeddings at query time. + * You can update this parameter by using the Update mapping API. Use the Create inference API to create the endpoint. + * If not specified, the inference endpoint defined by inference_id will be used at both index and query time. */ + search_inference_id?: Id + /** Settings for index_options that override any defaults used by semantic_text, for example + * specific quantization settings. */ + index_options?: MappingSemanticTextIndexOptions + /** Settings for chunking text into smaller passages. If specified, these will override the + * chunking settings sent in the inference endpoint associated with inference_id. If chunking settings are updated, + * they will not be applied to existing documents until they are reindexed. */ + chunking_settings?: MappingChunkingSettings | null + /** Multi-fields allow the same string value to be indexed in multiple ways for different purposes, such as one + * field for search and a multi-field for sorting and aggregations, or the same string value analyzed by different analyzers. */ + fields?: Record +} + +export interface MappingShapeProperty extends MappingDocValuesPropertyBase { + coerce?: boolean + ignore_malformed?: boolean + ignore_z_value?: boolean + orientation?: MappingGeoOrientation + type: 'shape' +} + +export interface MappingShortNumberProperty extends MappingNumberPropertyBase { + type: 'short' + null_value?: short +} + +export interface MappingSizeField { + enabled: boolean +} + +export interface MappingSourceField { + compress?: boolean + compress_threshold?: string + enabled?: boolean + excludes?: string[] + includes?: string[] + mode?: MappingSourceFieldMode +} + +export type MappingSourceFieldMode = 'disabled' | 'stored' | 'synthetic' + +export interface MappingSparseVectorIndexOptions { + /** Whether to perform pruning, omitting the non-significant tokens from the query to improve query performance. + * If prune is true but the pruning_config is not specified, pruning will occur but default values will be used. + * Default: false */ + prune?: boolean + /** Optional pruning configuration. + * If enabled, this will omit non-significant tokens from the query in order to improve query performance. + * This is only used if prune is set to true. + * If prune is set to true but pruning_config is not specified, default values will be used. */ + pruning_config?: TokenPruningConfig +} + +export interface MappingSparseVectorProperty extends MappingPropertyBase { + store?: boolean + type: 'sparse_vector' + /** Additional index options for the sparse vector field that controls the + * token pruning behavior of the sparse vector field. */ + index_options?: MappingSparseVectorIndexOptions +} + +export type MappingSubobjects = boolean | 'true' | 'false' | 'auto' + +export interface MappingSuggestContext { + name: Name + path?: Field + type: string + precision?: integer | string +} + +export type MappingSyntheticSourceKeepEnum = 'none' | 'arrays' | 'all' + +export type MappingTermVectorOption = 'no' | 'yes' | 'with_offsets' | 'with_positions' | 'with_positions_offsets' | 'with_positions_offsets_payloads' | 'with_positions_payloads' + +export interface MappingTextIndexPrefixes { + max_chars: integer + min_chars: integer +} + +export interface MappingTextProperty extends MappingCorePropertyBase { + analyzer?: string + boost?: double + eager_global_ordinals?: boolean + fielddata?: boolean + fielddata_frequency_filter?: IndicesFielddataFrequencyFilter + index?: boolean + index_options?: MappingIndexOptions + index_phrases?: boolean + index_prefixes?: MappingTextIndexPrefixes | null + norms?: boolean + position_increment_gap?: integer + search_analyzer?: string + search_quote_analyzer?: string + similarity?: string | null + term_vector?: MappingTermVectorOption + type: 'text' +} + +export type MappingTimeSeriesMetricType = 'gauge' | 'counter' | 'summary' | 'histogram' | 'position' + +export interface MappingTokenCountProperty extends MappingDocValuesPropertyBase { + analyzer?: string + boost?: double + index?: boolean + null_value?: double + enable_position_increments?: boolean + type: 'token_count' +} + +export interface MappingTypeMapping { + all_field?: MappingAllField + date_detection?: boolean + dynamic?: MappingDynamicMapping + dynamic_date_formats?: string[] + dynamic_templates?: Partial>[] + _field_names?: MappingFieldNamesField + index_field?: MappingIndexField + _meta?: Metadata + numeric_detection?: boolean + properties?: Record + _routing?: MappingRoutingField + _size?: MappingSizeField + _source?: MappingSourceField + runtime?: Record + enabled?: boolean + subobjects?: MappingSubobjects + _data_stream_timestamp?: MappingDataStreamTimestamp +} + +export interface MappingUnsignedLongNumberProperty extends MappingNumberPropertyBase { + type: 'unsigned_long' + null_value?: ulong +} + +export interface MappingVersionProperty extends MappingDocValuesPropertyBase { + type: 'version' +} + +export interface MappingWildcardProperty extends MappingDocValuesPropertyBase { + type: 'wildcard' + null_value?: string +} + +export interface QueryDslBoolQuery extends QueryDslQueryBase { + /** The clause (query) must appear in matching documents. + * However, unlike `must`, the score of the query will be ignored. */ + filter?: QueryDslQueryContainer | QueryDslQueryContainer[] + /** Specifies the number or percentage of `should` clauses returned documents must match. */ + minimum_should_match?: MinimumShouldMatch + /** The clause (query) must appear in matching documents and will contribute to the score. */ + must?: QueryDslQueryContainer | QueryDslQueryContainer[] + /** The clause (query) must not appear in the matching documents. + * Because scoring is ignored, a score of `0` is returned for all documents. */ + must_not?: QueryDslQueryContainer | QueryDslQueryContainer[] + /** The clause (query) should appear in the matching document. */ + should?: QueryDslQueryContainer | QueryDslQueryContainer[] +} + +export interface QueryDslBoostingQuery extends QueryDslQueryBase { + /** Floating point number between 0 and 1.0 used to decrease the relevance scores of documents matching the `negative` query. */ + negative_boost: double + /** Query used to decrease the relevance score of matching documents. */ + negative: QueryDslQueryContainer + /** Any returned documents must match this query. */ + positive: QueryDslQueryContainer +} + +export type QueryDslChildScoreMode = 'none' | 'avg' | 'sum' | 'max' | 'min' + +export type QueryDslCombinedFieldsOperator = 'or' | 'and' + +export interface QueryDslCombinedFieldsQuery extends QueryDslQueryBase { + /** List of fields to search. Field wildcard patterns are allowed. Only `text` fields are supported, and they must all have the same search `analyzer`. */ + fields: Field[] + /** Text to search for in the provided `fields`. + * The `combined_fields` query analyzes the provided text before performing a search. */ + query: string + /** If true, match phrase queries are automatically created for multi-term synonyms. */ + auto_generate_synonyms_phrase_query?: boolean + /** Boolean logic used to interpret text in the query value. */ + operator?: QueryDslCombinedFieldsOperator + /** Minimum number of clauses that must match for a document to be returned. */ + minimum_should_match?: MinimumShouldMatch + /** Indicates whether no documents are returned if the analyzer removes all tokens, such as when using a `stop` filter. */ + zero_terms_query?: QueryDslCombinedFieldsZeroTerms +} + +export type QueryDslCombinedFieldsZeroTerms = 'none' | 'all' + +export interface QueryDslCommonTermsQuery extends QueryDslQueryBase { + analyzer?: string + cutoff_frequency?: double + high_freq_operator?: QueryDslOperator + low_freq_operator?: QueryDslOperator + minimum_should_match?: MinimumShouldMatch + query: string +} + +export interface QueryDslConstantScoreQuery extends QueryDslQueryBase { + /** Filter query you wish to run. Any returned documents must match this query. + * Filter queries do not calculate relevance scores. + * To speed up performance, Elasticsearch automatically caches frequently used filter queries. */ + filter: QueryDslQueryContainer +} + +export interface QueryDslDateDecayFunctionKeys extends QueryDslDecayFunctionBase { +} +export type QueryDslDateDecayFunction = QueryDslDateDecayFunctionKeys +& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode } + +export interface QueryDslDateDistanceFeatureQuery extends QueryDslDistanceFeatureQueryBase { +} + +export interface QueryDslDateRangeQuery extends QueryDslRangeQueryBase { + /** Date format used to convert `date` values in the query. */ + format?: DateFormat + /** Coordinated Universal Time (UTC) offset or IANA time zone used to convert `date` values in the query to UTC. */ + time_zone?: TimeZone +} + +export type QueryDslDecayFunction = QueryDslUntypedDecayFunction | QueryDslDateDecayFunction | QueryDslNumericDecayFunction | QueryDslGeoDecayFunction + +export interface QueryDslDecayFunctionBase { + /** Determines how the distance is calculated when a field used for computing the decay contains multiple values. */ + multi_value_mode?: QueryDslMultiValueMode +} + +export interface QueryDslDecayPlacement { + /** Defines how documents are scored at the distance given at scale. */ + decay?: double + /** If defined, the decay function will only compute the decay function for documents with a distance greater than the defined `offset`. */ + offset?: TScale + /** Defines the distance from origin + offset at which the computed score will equal `decay` parameter. */ + scale?: TScale + /** The point of origin used for calculating distance. Must be given as a number for numeric field, date for date fields and geo point for geo fields. */ + origin?: TOrigin +} + +export interface QueryDslDisMaxQuery extends QueryDslQueryBase { + /** One or more query clauses. + * Returned documents must match one or more of these queries. + * If a document matches multiple queries, Elasticsearch uses the highest relevance score. */ + queries: QueryDslQueryContainer[] + /** Floating point number between 0 and 1.0 used to increase the relevance scores of documents matching multiple query clauses. */ + tie_breaker?: double +} + +export type QueryDslDistanceFeatureQuery = QueryDslUntypedDistanceFeatureQuery | QueryDslGeoDistanceFeatureQuery | QueryDslDateDistanceFeatureQuery + +export interface QueryDslDistanceFeatureQueryBase extends QueryDslQueryBase { + /** Date or point of origin used to calculate distances. + * If the `field` value is a `date` or `date_nanos` field, the `origin` value must be a date. + * Date Math, such as `now-1h`, is supported. + * If the field value is a `geo_point` field, the `origin` value must be a geopoint. */ + origin: TOrigin + /** Distance from the `origin` at which relevance scores receive half of the `boost` value. + * If the `field` value is a `date` or `date_nanos` field, the `pivot` value must be a time unit, such as `1h` or `10d`. If the `field` value is a `geo_point` field, the `pivot` value must be a distance unit, such as `1km` or `12m`. */ + pivot: TDistance + /** Name of the field used to calculate distances. This field must meet the following criteria: + * be a `date`, `date_nanos` or `geo_point` field; + * have an `index` mapping parameter value of `true`, which is the default; + * have an `doc_values` mapping parameter value of `true`, which is the default. */ + field: Field +} + +export interface QueryDslExistsQuery extends QueryDslQueryBase { + /** Name of the field you wish to search. */ + field: Field +} + +export interface QueryDslFieldAndFormat { + /** A wildcard pattern. The request returns values for field names matching this pattern. */ + field: Field + /** The format in which the values are returned. */ + format?: string + include_unmapped?: boolean +} + +export interface QueryDslFieldLookup { + /** `id` of the document. */ + id: Id + /** Index from which to retrieve the document. */ + index?: IndexName + /** Name of the field. */ + path?: Field + /** Custom routing value. */ + routing?: Routing +} + +export type QueryDslFieldValueFactorModifier = 'none' | 'log' | 'log1p' | 'log2p' | 'ln' | 'ln1p' | 'ln2p' | 'square' | 'sqrt' | 'reciprocal' + +export interface QueryDslFieldValueFactorScoreFunction { + /** Field to be extracted from the document. */ + field: Field + /** Optional factor to multiply the field value with. */ + factor?: double + /** Value used if the document doesn’t have that field. + * The modifier and factor are still applied to it as though it were read from the document. */ + missing?: double + /** Modifier to apply to the field value. */ + modifier?: QueryDslFieldValueFactorModifier +} + +export type QueryDslFunctionBoostMode = 'multiply' | 'replace' | 'sum' | 'avg' | 'max' | 'min' + +export interface QueryDslFunctionScoreContainer { + /** Function that scores a document with a exponential decay, depending on the distance of a numeric field value of the document from an origin. */ + exp?: QueryDslDecayFunction + /** Function that scores a document with a normal decay, depending on the distance of a numeric field value of the document from an origin. */ + gauss?: QueryDslDecayFunction + /** Function that scores a document with a linear decay, depending on the distance of a numeric field value of the document from an origin. */ + linear?: QueryDslDecayFunction + /** Function allows you to use a field from a document to influence the score. + * It’s similar to using the script_score function, however, it avoids the overhead of scripting. */ + field_value_factor?: QueryDslFieldValueFactorScoreFunction + /** Generates scores that are uniformly distributed from 0 up to but not including 1. + * In case you want scores to be reproducible, it is possible to provide a `seed` and `field`. */ + random_score?: QueryDslRandomScoreFunction + /** Enables you to wrap another query and customize the scoring of it optionally with a computation derived from other numeric field values in the doc using a script expression. */ + script_score?: QueryDslScriptScoreFunction + filter?: QueryDslQueryContainer + weight?: double +} + +export type QueryDslFunctionScoreMode = 'multiply' | 'sum' | 'avg' | 'first' | 'max' | 'min' + +export interface QueryDslFunctionScoreQuery extends QueryDslQueryBase { + /** Defines how he newly computed score is combined with the score of the query */ + boost_mode?: QueryDslFunctionBoostMode + /** One or more functions that compute a new score for each document returned by the query. */ + functions?: QueryDslFunctionScoreContainer[] + /** Restricts the new score to not exceed the provided limit. */ + max_boost?: double + /** Excludes documents that do not meet the provided score threshold. */ + min_score?: double + /** A query that determines the documents for which a new score is computed. */ + query?: QueryDslQueryContainer + /** Specifies how the computed scores are combined */ + score_mode?: QueryDslFunctionScoreMode +} + +export interface QueryDslFuzzyQuery extends QueryDslQueryBase { + /** Maximum number of variations created. */ + max_expansions?: integer + /** Number of beginning characters left unchanged when creating expansions. */ + prefix_length?: integer + /** Number of beginning characters left unchanged when creating expansions. */ + rewrite?: MultiTermQueryRewrite + /** Indicates whether edits include transpositions of two adjacent characters (for example `ab` to `ba`). */ + transpositions?: boolean + /** Maximum edit distance allowed for matching. */ + fuzziness?: Fuzziness + /** Term you wish to find in the provided field. */ + value: string | double | boolean +} + +export interface QueryDslGeoBoundingBoxQueryKeys extends QueryDslQueryBase { + type?: QueryDslGeoExecution + /** Set to `IGNORE_MALFORMED` to accept geo points with invalid latitude or longitude. + * Set to `COERCE` to also try to infer correct latitude or longitude. */ + validation_method?: QueryDslGeoValidationMethod + /** Set to `true` to ignore an unmapped field and not match any documents for this query. + * Set to `false` to throw an exception if the field is not mapped. */ + ignore_unmapped?: boolean +} +export type QueryDslGeoBoundingBoxQuery = QueryDslGeoBoundingBoxQueryKeys +& { [property: string]: GeoBounds | QueryDslGeoExecution | QueryDslGeoValidationMethod | boolean | float | string } + +export interface QueryDslGeoDecayFunctionKeys extends QueryDslDecayFunctionBase { +} +export type QueryDslGeoDecayFunction = QueryDslGeoDecayFunctionKeys +& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode } + +export interface QueryDslGeoDistanceFeatureQuery extends QueryDslDistanceFeatureQueryBase { +} + +export interface QueryDslGeoDistanceQueryKeys extends QueryDslQueryBase { + /** The radius of the circle centred on the specified location. + * Points which fall into this circle are considered to be matches. */ + distance: Distance + /** How to compute the distance. + * Set to `plane` for a faster calculation that's inaccurate on long distances and close to the poles. */ + distance_type?: GeoDistanceType + /** Set to `IGNORE_MALFORMED` to accept geo points with invalid latitude or longitude. + * Set to `COERCE` to also try to infer correct latitude or longitude. */ + validation_method?: QueryDslGeoValidationMethod + /** Set to `true` to ignore an unmapped field and not match any documents for this query. + * Set to `false` to throw an exception if the field is not mapped. */ + ignore_unmapped?: boolean +} +export type QueryDslGeoDistanceQuery = QueryDslGeoDistanceQueryKeys +& { [property: string]: GeoLocation | Distance | GeoDistanceType | QueryDslGeoValidationMethod | boolean | float | string } + +export type QueryDslGeoExecution = 'memory' | 'indexed' + +export interface QueryDslGeoGridQuery extends QueryDslQueryBase { + geotile?: GeoTile + geohash?: GeoHash + geohex?: GeoHexCell +} + +export interface QueryDslGeoPolygonPoints { + points: GeoLocation[] +} + +export interface QueryDslGeoPolygonQueryKeys extends QueryDslQueryBase { + validation_method?: QueryDslGeoValidationMethod + ignore_unmapped?: boolean +} +export type QueryDslGeoPolygonQuery = QueryDslGeoPolygonQueryKeys +& { [property: string]: QueryDslGeoPolygonPoints | QueryDslGeoValidationMethod | boolean | float | string } + +export interface QueryDslGeoShapeFieldQuery { + shape?: GeoShape + /** Query using an indexed shape retrieved from the the specified document and path. */ + indexed_shape?: QueryDslFieldLookup + /** Spatial relation operator used to search a geo field. */ + relation?: GeoShapeRelation +} + +export interface QueryDslGeoShapeQueryKeys extends QueryDslQueryBase { + /** Set to `true` to ignore an unmapped field and not match any documents for this query. + * Set to `false` to throw an exception if the field is not mapped. */ + ignore_unmapped?: boolean +} +export type QueryDslGeoShapeQuery = QueryDslGeoShapeQueryKeys +& { [property: string]: QueryDslGeoShapeFieldQuery | boolean | float | string } + +export type QueryDslGeoValidationMethod = 'coerce' | 'ignore_malformed' | 'strict' + +export interface QueryDslHasChildQuery extends QueryDslQueryBase { + /** Indicates whether to ignore an unmapped `type` and not return any documents instead of an error. */ + ignore_unmapped?: boolean + /** If defined, each search hit will contain inner hits. */ + inner_hits?: SearchInnerHits + /** Maximum number of child documents that match the query allowed for a returned parent document. + * If the parent document exceeds this limit, it is excluded from the search results. */ + max_children?: integer + /** Minimum number of child documents that match the query required to match the query for a returned parent document. + * If the parent document does not meet this limit, it is excluded from the search results. */ + min_children?: integer + /** Query you wish to run on child documents of the `type` field. + * If a child document matches the search, the query returns the parent document. */ + query: QueryDslQueryContainer + /** Indicates how scores for matching child documents affect the root parent document’s relevance score. */ + score_mode?: QueryDslChildScoreMode + /** Name of the child relationship mapped for the `join` field. */ + type: RelationName +} + +export interface QueryDslHasParentQuery extends QueryDslQueryBase { + /** Indicates whether to ignore an unmapped `parent_type` and not return any documents instead of an error. + * You can use this parameter to query multiple indices that may not contain the `parent_type`. */ + ignore_unmapped?: boolean + /** If defined, each search hit will contain inner hits. */ + inner_hits?: SearchInnerHits + /** Name of the parent relationship mapped for the `join` field. */ + parent_type: RelationName + /** Query you wish to run on parent documents of the `parent_type` field. + * If a parent document matches the search, the query returns its child documents. */ + query: QueryDslQueryContainer + /** Indicates whether the relevance score of a matching parent document is aggregated into its child documents. */ + score?: boolean +} + +export interface QueryDslIdsQuery extends QueryDslQueryBase { + /** An array of document IDs. */ + values?: Ids +} + +export interface QueryDslIntervalsAllOf { + /** An array of rules to combine. All rules must produce a match in a document for the overall source to match. */ + intervals: QueryDslIntervalsContainer[] + /** Maximum number of positions between the matching terms. + * Intervals produced by the rules further apart than this are not considered matches. */ + max_gaps?: integer + /** If `true`, intervals produced by the rules should appear in the order in which they are specified. */ + ordered?: boolean + /** Rule used to filter returned intervals. */ + filter?: QueryDslIntervalsFilter +} + +export interface QueryDslIntervalsAnyOf { + /** An array of rules to match. */ + intervals: QueryDslIntervalsContainer[] + /** Rule used to filter returned intervals. */ + filter?: QueryDslIntervalsFilter +} + +export interface QueryDslIntervalsContainer { + /** Returns matches that span a combination of other rules. */ + all_of?: QueryDslIntervalsAllOf + /** Returns intervals produced by any of its sub-rules. */ + any_of?: QueryDslIntervalsAnyOf + /** Matches analyzed text. */ + fuzzy?: QueryDslIntervalsFuzzy + /** Matches analyzed text. */ + match?: QueryDslIntervalsMatch + /** Matches terms that start with a specified set of characters. */ + prefix?: QueryDslIntervalsPrefix + range?: QueryDslIntervalsRange + regexp?: QueryDslIntervalsRegexp + /** Matches terms using a wildcard pattern. */ + wildcard?: QueryDslIntervalsWildcard +} + +export interface QueryDslIntervalsFilter { + /** Query used to return intervals that follow an interval from the `filter` rule. */ + after?: QueryDslIntervalsContainer + /** Query used to return intervals that occur before an interval from the `filter` rule. */ + before?: QueryDslIntervalsContainer + /** Query used to return intervals contained by an interval from the `filter` rule. */ + contained_by?: QueryDslIntervalsContainer + /** Query used to return intervals that contain an interval from the `filter` rule. */ + containing?: QueryDslIntervalsContainer + /** Query used to return intervals that are **not** contained by an interval from the `filter` rule. */ + not_contained_by?: QueryDslIntervalsContainer + /** Query used to return intervals that do **not** contain an interval from the `filter` rule. */ + not_containing?: QueryDslIntervalsContainer + /** Query used to return intervals that do **not** overlap with an interval from the `filter` rule. */ + not_overlapping?: QueryDslIntervalsContainer + /** Query used to return intervals that overlap with an interval from the `filter` rule. */ + overlapping?: QueryDslIntervalsContainer + /** Script used to return matching documents. + * This script must return a boolean value: `true` or `false`. */ + script?: Script | ScriptSource +} + +export interface QueryDslIntervalsFuzzy { + /** Analyzer used to normalize the term. */ + analyzer?: string + /** Maximum edit distance allowed for matching. */ + fuzziness?: Fuzziness + /** Number of beginning characters left unchanged when creating expansions. */ + prefix_length?: integer + /** The term to match. */ + term: string + /** Indicates whether edits include transpositions of two adjacent characters (for example, `ab` to `ba`). */ + transpositions?: boolean + /** If specified, match intervals from this field rather than the top-level field. + * The `term` is normalized using the search analyzer from this field, unless `analyzer` is specified separately. */ + use_field?: Field +} + +export interface QueryDslIntervalsMatch { + /** Analyzer used to analyze terms in the query. */ + analyzer?: string + /** Maximum number of positions between the matching terms. + * Terms further apart than this are not considered matches. */ + max_gaps?: integer + /** If `true`, matching terms must appear in their specified order. */ + ordered?: boolean + /** Text you wish to find in the provided field. */ + query: string + /** If specified, match intervals from this field rather than the top-level field. + * The `term` is normalized using the search analyzer from this field, unless `analyzer` is specified separately. */ + use_field?: Field + /** An optional interval filter. */ + filter?: QueryDslIntervalsFilter +} + +export interface QueryDslIntervalsPrefix { + /** Analyzer used to analyze the `prefix`. */ + analyzer?: string + /** Beginning characters of terms you wish to find in the top-level field. */ + prefix: string + /** If specified, match intervals from this field rather than the top-level field. + * The `prefix` is normalized using the search analyzer from this field, unless `analyzer` is specified separately. */ + use_field?: Field +} + +export interface QueryDslIntervalsQuery extends QueryDslQueryBase { + /** Returns matches that span a combination of other rules. */ + all_of?: QueryDslIntervalsAllOf + /** Returns intervals produced by any of its sub-rules. */ + any_of?: QueryDslIntervalsAnyOf + /** Matches terms that are similar to the provided term, within an edit distance defined by `fuzziness`. */ + fuzzy?: QueryDslIntervalsFuzzy + /** Matches analyzed text. */ + match?: QueryDslIntervalsMatch + /** Matches terms that start with a specified set of characters. */ + prefix?: QueryDslIntervalsPrefix + range?: QueryDslIntervalsRange + regexp?: QueryDslIntervalsRegexp + /** Matches terms using a wildcard pattern. */ + wildcard?: QueryDslIntervalsWildcard +} + +export interface QueryDslIntervalsRange { + /** Analyzer used to analyze the `prefix`. */ + analyzer?: string + /** Lower term, either gte or gt must be provided. */ + gte?: string + /** Lower term, either gte or gt must be provided. */ + gt?: string + /** Upper term, either lte or lt must be provided. */ + lte?: string + /** Upper term, either lte or lt must be provided. */ + lt?: string + /** If specified, match intervals from this field rather than the top-level field. + * The `prefix` is normalized using the search analyzer from this field, unless `analyzer` is specified separately. */ + use_field?: Field +} + +export interface QueryDslIntervalsRegexp { + /** Analyzer used to analyze the `prefix`. */ + analyzer?: string + /** Regex pattern. */ + pattern: string + /** If specified, match intervals from this field rather than the top-level field. + * The `prefix` is normalized using the search analyzer from this field, unless `analyzer` is specified separately. */ + use_field?: Field +} + +export interface QueryDslIntervalsWildcard { + /** Analyzer used to analyze the `pattern`. + * Defaults to the top-level field's analyzer. */ + analyzer?: string + /** Wildcard pattern used to find matching terms. */ + pattern: string + /** If specified, match intervals from this field rather than the top-level field. + * The `pattern` is normalized using the search analyzer from this field, unless `analyzer` is specified separately. */ + use_field?: Field +} + +export type QueryDslLike = string | QueryDslLikeDocument + +export interface QueryDslLikeDocument { + /** A document not present in the index. */ + doc?: any + fields?: Field[] + /** ID of a document. */ + _id?: Id + /** Index of a document. */ + _index?: IndexName + /** Overrides the default analyzer. */ + per_field_analyzer?: Record + routing?: Routing + version?: VersionNumber + version_type?: VersionType +} + +export interface QueryDslMatchAllQuery extends QueryDslQueryBase { +} + +export interface QueryDslMatchBoolPrefixQuery extends QueryDslQueryBase { + /** Analyzer used to convert the text in the query value into tokens. */ + analyzer?: string + /** Maximum edit distance allowed for matching. + * Can be applied to the term subqueries constructed for all terms but the final term. */ + fuzziness?: Fuzziness + /** Method used to rewrite the query. + * Can be applied to the term subqueries constructed for all terms but the final term. */ + fuzzy_rewrite?: MultiTermQueryRewrite + /** If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`). + * Can be applied to the term subqueries constructed for all terms but the final term. */ + fuzzy_transpositions?: boolean + /** Maximum number of terms to which the query will expand. + * Can be applied to the term subqueries constructed for all terms but the final term. */ + max_expansions?: integer + /** Minimum number of clauses that must match for a document to be returned. + * Applied to the constructed bool query. */ + minimum_should_match?: MinimumShouldMatch + /** Boolean logic used to interpret text in the query value. + * Applied to the constructed bool query. */ + operator?: QueryDslOperator + /** Number of beginning characters left unchanged for fuzzy matching. + * Can be applied to the term subqueries constructed for all terms but the final term. */ + prefix_length?: integer + /** Terms you wish to find in the provided field. + * The last term is used in a prefix query. */ + query: string +} + +export interface QueryDslMatchNoneQuery extends QueryDslQueryBase { +} + +export interface QueryDslMatchPhrasePrefixQuery extends QueryDslQueryBase { + /** Analyzer used to convert text in the query value into tokens. */ + analyzer?: string + /** Maximum number of terms to which the last provided term of the query value will expand. */ + max_expansions?: integer + /** Text you wish to find in the provided field. */ + query: string + /** Maximum number of positions allowed between matching tokens. */ + slop?: integer + /** Indicates whether no documents are returned if the analyzer removes all tokens, such as when using a `stop` filter. */ + zero_terms_query?: QueryDslZeroTermsQuery +} + +export interface QueryDslMatchPhraseQuery extends QueryDslQueryBase { + /** Analyzer used to convert the text in the query value into tokens. */ + analyzer?: string + /** Query terms that are analyzed and turned into a phrase query. */ + query: string + /** Maximum number of positions allowed between matching tokens. */ + slop?: integer + /** Indicates whether no documents are returned if the `analyzer` removes all tokens, such as when using a `stop` filter. */ + zero_terms_query?: QueryDslZeroTermsQuery +} + +export interface QueryDslMatchQuery extends QueryDslQueryBase { + /** Analyzer used to convert the text in the query value into tokens. */ + analyzer?: string + /** If `true`, match phrase queries are automatically created for multi-term synonyms. */ + auto_generate_synonyms_phrase_query?: boolean + cutoff_frequency?: double + /** Maximum edit distance allowed for matching. */ + fuzziness?: Fuzziness + /** Method used to rewrite the query. */ + fuzzy_rewrite?: MultiTermQueryRewrite + /** If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`). */ + fuzzy_transpositions?: boolean + /** If `true`, format-based errors, such as providing a text query value for a numeric field, are ignored. */ + lenient?: boolean + /** Maximum number of terms to which the query will expand. */ + max_expansions?: integer + /** Minimum number of clauses that must match for a document to be returned. */ + minimum_should_match?: MinimumShouldMatch + /** Boolean logic used to interpret text in the query value. */ + operator?: QueryDslOperator + /** Number of beginning characters left unchanged for fuzzy matching. */ + prefix_length?: integer + /** Text, number, boolean value or date you wish to find in the provided field. */ + query: string | float | boolean + /** Indicates whether no documents are returned if the `analyzer` removes all tokens, such as when using a `stop` filter. */ + zero_terms_query?: QueryDslZeroTermsQuery +} + +export interface QueryDslMoreLikeThisQuery extends QueryDslQueryBase { + /** The analyzer that is used to analyze the free form text. + * Defaults to the analyzer associated with the first field in fields. */ + analyzer?: string + /** Each term in the formed query could be further boosted by their tf-idf score. + * This sets the boost factor to use when using this feature. + * Defaults to deactivated (0). */ + boost_terms?: double + /** Controls whether the query should fail (throw an exception) if any of the specified fields are not of the supported types (`text` or `keyword`). */ + fail_on_unsupported_field?: boolean + /** A list of fields to fetch and analyze the text from. + * Defaults to the `index.query.default_field` index setting, which has a default value of `*`. */ + fields?: Field[] + /** Specifies whether the input documents should also be included in the search results returned. */ + include?: boolean + /** Specifies free form text and/or a single or multiple documents for which you want to find similar documents. */ + like: QueryDslLike | QueryDslLike[] + /** The maximum document frequency above which the terms are ignored from the input document. */ + max_doc_freq?: integer + /** The maximum number of query terms that can be selected. */ + max_query_terms?: integer + /** The maximum word length above which the terms are ignored. + * Defaults to unbounded (`0`). */ + max_word_length?: integer + /** The minimum document frequency below which the terms are ignored from the input document. */ + min_doc_freq?: integer + /** After the disjunctive query has been formed, this parameter controls the number of terms that must match. */ + minimum_should_match?: MinimumShouldMatch + /** The minimum term frequency below which the terms are ignored from the input document. */ + min_term_freq?: integer + /** The minimum word length below which the terms are ignored. */ + min_word_length?: integer + routing?: Routing + /** An array of stop words. + * Any word in this set is ignored. */ + stop_words?: AnalysisStopWords + /** Used in combination with `like` to exclude documents that match a set of terms. */ + unlike?: QueryDslLike | QueryDslLike[] + version?: VersionNumber + version_type?: VersionType +} + +export interface QueryDslMultiMatchQuery extends QueryDslQueryBase { + /** Analyzer used to convert the text in the query value into tokens. */ + analyzer?: string + /** If `true`, match phrase queries are automatically created for multi-term synonyms. */ + auto_generate_synonyms_phrase_query?: boolean + cutoff_frequency?: double + /** The fields to be queried. + * Defaults to the `index.query.default_field` index settings, which in turn defaults to `*`. */ + fields?: Fields + /** Maximum edit distance allowed for matching. */ + fuzziness?: Fuzziness + /** Method used to rewrite the query. */ + fuzzy_rewrite?: MultiTermQueryRewrite + /** If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`). + * Can be applied to the term subqueries constructed for all terms but the final term. */ + fuzzy_transpositions?: boolean + /** If `true`, format-based errors, such as providing a text query value for a numeric field, are ignored. */ + lenient?: boolean + /** Maximum number of terms to which the query will expand. */ + max_expansions?: integer + /** Minimum number of clauses that must match for a document to be returned. */ + minimum_should_match?: MinimumShouldMatch + /** Boolean logic used to interpret text in the query value. */ + operator?: QueryDslOperator + /** Number of beginning characters left unchanged for fuzzy matching. */ + prefix_length?: integer + /** Text, number, boolean value or date you wish to find in the provided field. */ + query: string + /** Maximum number of positions allowed between matching tokens. */ + slop?: integer + /** Determines how scores for each per-term blended query and scores across groups are combined. */ + tie_breaker?: double + /** How `the` multi_match query is executed internally. */ + type?: QueryDslTextQueryType + /** Indicates whether no documents are returned if the `analyzer` removes all tokens, such as when using a `stop` filter. */ + zero_terms_query?: QueryDslZeroTermsQuery +} + +export type QueryDslMultiValueMode = 'min' | 'max' | 'avg' | 'sum' + +export interface QueryDslNestedQuery extends QueryDslQueryBase { + /** Indicates whether to ignore an unmapped path and not return any documents instead of an error. */ + ignore_unmapped?: boolean + /** If defined, each search hit will contain inner hits. */ + inner_hits?: SearchInnerHits + /** Path to the nested object you wish to search. */ + path: Field + /** Query you wish to run on nested objects in the path. */ + query: QueryDslQueryContainer + /** How scores for matching child objects affect the root parent document’s relevance score. */ + score_mode?: QueryDslChildScoreMode +} + +export interface QueryDslNumberRangeQuery extends QueryDslRangeQueryBase { +} + +export interface QueryDslNumericDecayFunctionKeys extends QueryDslDecayFunctionBase { +} +export type QueryDslNumericDecayFunction = QueryDslNumericDecayFunctionKeys +& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode } + +export type QueryDslOperator = 'and' | 'AND' | 'or' | 'OR' + +export interface QueryDslParentIdQuery extends QueryDslQueryBase { + /** ID of the parent document. */ + id?: Id + /** Indicates whether to ignore an unmapped `type` and not return any documents instead of an error. */ + ignore_unmapped?: boolean + /** Name of the child relationship mapped for the `join` field. */ + type?: RelationName +} + +export interface QueryDslPercolateQuery extends QueryDslQueryBase { + /** The source of the document being percolated. */ + document?: any + /** An array of sources of the documents being percolated. */ + documents?: any[] + /** Field that holds the indexed queries. The field must use the `percolator` mapping type. */ + field: Field + /** The ID of a stored document to percolate. */ + id?: Id + /** The index of a stored document to percolate. */ + index?: IndexName + /** The suffix used for the `_percolator_document_slot` field when multiple `percolate` queries are specified. */ + name?: string + /** Preference used to fetch document to percolate. */ + preference?: string + /** Routing used to fetch document to percolate. */ + routing?: Routing + /** The expected version of a stored document to percolate. */ + version?: VersionNumber +} + +export interface QueryDslPinnedDoc { + /** The unique document ID. */ + _id: Id + /** The index that contains the document. */ + _index?: IndexName +} + +export interface QueryDslPinnedQuery extends QueryDslQueryBase { + /** Any choice of query used to rank documents which will be ranked below the "pinned" documents. */ + organic: QueryDslQueryContainer + /** Document IDs listed in the order they are to appear in results. + * Required if `docs` is not specified. */ + ids?: Id[] + /** Documents listed in the order they are to appear in results. + * Required if `ids` is not specified. */ + docs?: QueryDslPinnedDoc[] +} + +export interface QueryDslPrefixQuery extends QueryDslQueryBase { + /** Method used to rewrite the query. */ + rewrite?: MultiTermQueryRewrite + /** Beginning characters of terms you wish to find in the provided field. */ + value: string + /** Allows ASCII case insensitive matching of the value with the indexed field values when set to `true`. + * Default is `false` which means the case sensitivity of matching depends on the underlying field’s mapping. */ + case_insensitive?: boolean +} + +export interface QueryDslQueryBase { + /** Floating point number used to decrease or increase the relevance scores of the query. + * Boost values are relative to the default value of 1.0. + * A boost value between 0 and 1.0 decreases the relevance score. + * A value greater than 1.0 increases the relevance score. */ + boost?: float + _name?: string +} + +export interface QueryDslQueryContainer { + /** matches documents matching boolean combinations of other queries. */ + bool?: QueryDslBoolQuery + /** Returns documents matching a `positive` query while reducing the relevance score of documents that also match a `negative` query. */ + boosting?: QueryDslBoostingQuery + common?: Partial> + /** The `combined_fields` query supports searching multiple text fields as if their contents had been indexed into one combined field. */ + combined_fields?: QueryDslCombinedFieldsQuery + /** Wraps a filter query and returns every matching document with a relevance score equal to the `boost` parameter value. */ + constant_score?: QueryDslConstantScoreQuery + /** Returns documents matching one or more wrapped queries, called query clauses or clauses. + * If a returned document matches multiple query clauses, the `dis_max` query assigns the document the highest relevance score from any matching clause, plus a tie breaking increment for any additional matching subqueries. */ + dis_max?: QueryDslDisMaxQuery + /** Boosts the relevance score of documents closer to a provided origin date or point. + * For example, you can use this query to give more weight to documents closer to a certain date or location. */ + distance_feature?: QueryDslDistanceFeatureQuery + /** Returns documents that contain an indexed value for a field. */ + exists?: QueryDslExistsQuery + /** The `function_score` enables you to modify the score of documents that are retrieved by a query. */ + function_score?: QueryDslFunctionScoreQuery | QueryDslFunctionScoreContainer[] + /** Returns documents that contain terms similar to the search term, as measured by a Levenshtein edit distance. */ + fuzzy?: Partial> + /** Matches geo_point and geo_shape values that intersect a bounding box. */ + geo_bounding_box?: QueryDslGeoBoundingBoxQuery + /** Matches `geo_point` and `geo_shape` values within a given distance of a geopoint. */ + geo_distance?: QueryDslGeoDistanceQuery + /** Matches `geo_point` and `geo_shape` values that intersect a grid cell from a GeoGrid aggregation. */ + geo_grid?: Partial> + geo_polygon?: QueryDslGeoPolygonQuery + /** Filter documents indexed using either the `geo_shape` or the `geo_point` type. */ + geo_shape?: QueryDslGeoShapeQuery + /** Returns parent documents whose joined child documents match a provided query. */ + has_child?: QueryDslHasChildQuery + /** Returns child documents whose joined parent document matches a provided query. */ + has_parent?: QueryDslHasParentQuery + /** Returns documents based on their IDs. + * This query uses document IDs stored in the `_id` field. */ + ids?: QueryDslIdsQuery + /** Returns documents based on the order and proximity of matching terms. */ + intervals?: Partial> + /** Finds the k nearest vectors to a query vector, as measured by a similarity + * metric. knn query finds nearest vectors through approximate search on indexed + * dense_vectors. */ + knn?: KnnQuery + /** Returns documents that match a provided text, number, date or boolean value. + * The provided text is analyzed before matching. */ + match?: Partial> + /** Matches all documents, giving them all a `_score` of 1.0. */ + match_all?: QueryDslMatchAllQuery + /** Analyzes its input and constructs a `bool` query from the terms. + * Each term except the last is used in a `term` query. + * The last term is used in a prefix query. */ + match_bool_prefix?: Partial> + /** Matches no documents. */ + match_none?: QueryDslMatchNoneQuery + /** Analyzes the text and creates a phrase query out of the analyzed text. */ + match_phrase?: Partial> + /** Returns documents that contain the words of a provided text, in the same order as provided. + * The last term of the provided text is treated as a prefix, matching any words that begin with that term. */ + match_phrase_prefix?: Partial> + /** Returns documents that are "like" a given set of documents. */ + more_like_this?: QueryDslMoreLikeThisQuery + /** Enables you to search for a provided text, number, date or boolean value across multiple fields. + * The provided text is analyzed before matching. */ + multi_match?: QueryDslMultiMatchQuery + /** Wraps another query to search nested fields. + * If an object matches the search, the nested query returns the root parent document. */ + nested?: QueryDslNestedQuery + /** Returns child documents joined to a specific parent document. */ + parent_id?: QueryDslParentIdQuery + /** Matches queries stored in an index. */ + percolate?: QueryDslPercolateQuery + /** Promotes selected documents to rank higher than those matching a given query. */ + pinned?: QueryDslPinnedQuery + /** Returns documents that contain a specific prefix in a provided field. */ + prefix?: Partial> + /** Returns documents based on a provided query string, using a parser with a strict syntax. */ + query_string?: QueryDslQueryStringQuery + /** Returns documents that contain terms within a provided range. */ + range?: Partial> + /** Boosts the relevance score of documents based on the numeric value of a `rank_feature` or `rank_features` field. */ + rank_feature?: QueryDslRankFeatureQuery + /** Returns documents that contain terms matching a regular expression. */ + regexp?: Partial> + rule?: QueryDslRuleQuery + /** Filters documents based on a provided script. + * The script query is typically used in a filter context. */ + script?: QueryDslScriptQuery + /** Uses a script to provide a custom score for returned documents. */ + script_score?: QueryDslScriptScoreQuery + /** A semantic query to semantic_text field types */ + semantic?: QueryDslSemanticQuery + /** Queries documents that contain fields indexed using the `shape` type. */ + shape?: QueryDslShapeQuery + /** Returns documents based on a provided query string, using a parser with a limited but fault-tolerant syntax. */ + simple_query_string?: QueryDslSimpleQueryStringQuery + /** Returns matches which enclose another span query. */ + span_containing?: QueryDslSpanContainingQuery + /** Wrapper to allow span queries to participate in composite single-field span queries by _lying_ about their search field. */ + span_field_masking?: QueryDslSpanFieldMaskingQuery + /** Matches spans near the beginning of a field. */ + span_first?: QueryDslSpanFirstQuery + /** Allows you to wrap a multi term query (one of `wildcard`, `fuzzy`, `prefix`, `range`, or `regexp` query) as a `span` query, so it can be nested. */ + span_multi?: QueryDslSpanMultiTermQuery + /** Matches spans which are near one another. + * You can specify `slop`, the maximum number of intervening unmatched positions, as well as whether matches are required to be in-order. */ + span_near?: QueryDslSpanNearQuery + /** Removes matches which overlap with another span query or which are within x tokens before (controlled by the parameter `pre`) or y tokens after (controlled by the parameter `post`) another span query. */ + span_not?: QueryDslSpanNotQuery + /** Matches the union of its span clauses. */ + span_or?: QueryDslSpanOrQuery + /** Matches spans containing a term. */ + span_term?: Partial> + /** Returns matches which are enclosed inside another span query. */ + span_within?: QueryDslSpanWithinQuery + /** Using input query vectors or a natural language processing model to convert a query into a list of token-weight pairs, queries against a sparse vector field. */ + sparse_vector?: QueryDslSparseVectorQuery + /** Returns documents that contain an exact term in a provided field. + * To return a document, the query term must exactly match the queried field's value, including whitespace and capitalization. */ + term?: Partial> + /** Returns documents that contain one or more exact terms in a provided field. + * To return a document, one or more terms must exactly match a field value, including whitespace and capitalization. */ + terms?: QueryDslTermsQuery + /** Returns documents that contain a minimum number of exact terms in a provided field. + * To return a document, a required number of terms must exactly match the field values, including whitespace and capitalization. */ + terms_set?: Partial> + /** Uses a natural language processing model to convert the query text into a list of token-weight pairs which are then used in a query against a sparse vector or rank features field. */ + text_expansion?: Partial> + /** Supports returning text_expansion query results by sending in precomputed tokens with the query. */ + weighted_tokens?: Partial> + /** Returns documents that contain terms matching a wildcard pattern. */ + wildcard?: Partial> + /** A query that accepts any other query as base64 encoded string. */ + wrapper?: QueryDslWrapperQuery + type?: QueryDslTypeQuery +} + +export interface QueryDslQueryStringQuery extends QueryDslQueryBase { + /** If `true`, the wildcard characters `*` and `?` are allowed as the first character of the query string. */ + allow_leading_wildcard?: boolean + /** Analyzer used to convert text in the query string into tokens. */ + analyzer?: string + /** If `true`, the query attempts to analyze wildcard terms in the query string. */ + analyze_wildcard?: boolean + /** If `true`, match phrase queries are automatically created for multi-term synonyms. */ + auto_generate_synonyms_phrase_query?: boolean + /** Default field to search if no field is provided in the query string. + * Supports wildcards (`*`). + * Defaults to the `index.query.default_field` index setting, which has a default value of `*`. */ + default_field?: Field + /** Default boolean logic used to interpret text in the query string if no operators are specified. */ + default_operator?: QueryDslOperator + /** If `true`, enable position increments in queries constructed from a `query_string` search. */ + enable_position_increments?: boolean + escape?: boolean + /** Array of fields to search. Supports wildcards (`*`). */ + fields?: Field[] + /** Maximum edit distance allowed for fuzzy matching. */ + fuzziness?: Fuzziness + /** Maximum number of terms to which the query expands for fuzzy matching. */ + fuzzy_max_expansions?: integer + /** Number of beginning characters left unchanged for fuzzy matching. */ + fuzzy_prefix_length?: integer + /** Method used to rewrite the query. */ + fuzzy_rewrite?: MultiTermQueryRewrite + /** If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`). */ + fuzzy_transpositions?: boolean + /** If `true`, format-based errors, such as providing a text value for a numeric field, are ignored. */ + lenient?: boolean + /** Maximum number of automaton states required for the query. */ + max_determinized_states?: integer + /** Minimum number of clauses that must match for a document to be returned. */ + minimum_should_match?: MinimumShouldMatch + /** Maximum number of positions allowed between matching tokens for phrases. */ + phrase_slop?: double + /** Query string you wish to parse and use for search. */ + query: string + /** Analyzer used to convert quoted text in the query string into tokens. + * For quoted text, this parameter overrides the analyzer specified in the `analyzer` parameter. */ + quote_analyzer?: string + /** Suffix appended to quoted text in the query string. + * You can use this suffix to use a different analysis method for exact matches. */ + quote_field_suffix?: string + /** Method used to rewrite the query. */ + rewrite?: MultiTermQueryRewrite + /** How to combine the queries generated from the individual search terms in the resulting `dis_max` query. */ + tie_breaker?: double + /** Coordinated Universal Time (UTC) offset or IANA time zone used to convert date values in the query string to UTC. */ + time_zone?: TimeZone + /** Determines how the query matches and scores documents. */ + type?: QueryDslTextQueryType +} + +export interface QueryDslRandomScoreFunction { + field?: Field + seed?: long | string +} + +export type QueryDslRangeQuery = QueryDslUntypedRangeQuery | QueryDslDateRangeQuery | QueryDslNumberRangeQuery | QueryDslTermRangeQuery + +export interface QueryDslRangeQueryBase extends QueryDslQueryBase { + /** Indicates how the range query matches values for `range` fields. */ + relation?: QueryDslRangeRelation + /** Greater than. */ + gt?: T + /** Greater than or equal to. */ + gte?: T + /** Less than. */ + lt?: T + /** Less than or equal to. */ + lte?: T +} + +export type QueryDslRangeRelation = 'within' | 'contains' | 'intersects' + +export interface QueryDslRankFeatureFunction { +} + +export interface QueryDslRankFeatureFunctionLinear { +} + +export interface QueryDslRankFeatureFunctionLogarithm { + /** Configurable scaling factor. */ + scaling_factor: float +} + +export interface QueryDslRankFeatureFunctionSaturation { + /** Configurable pivot value so that the result will be less than 0.5. */ + pivot?: float +} + +export interface QueryDslRankFeatureFunctionSigmoid { + /** Configurable pivot value so that the result will be less than 0.5. */ + pivot: float + /** Configurable Exponent. */ + exponent: float +} + +export interface QueryDslRankFeatureQuery extends QueryDslQueryBase { + /** `rank_feature` or `rank_features` field used to boost relevance scores. */ + field: Field + /** Saturation function used to boost relevance scores based on the value of the rank feature `field`. */ + saturation?: QueryDslRankFeatureFunctionSaturation + /** Logarithmic function used to boost relevance scores based on the value of the rank feature `field`. */ + log?: QueryDslRankFeatureFunctionLogarithm + /** Linear function used to boost relevance scores based on the value of the rank feature `field`. */ + linear?: QueryDslRankFeatureFunctionLinear + /** Sigmoid function used to boost relevance scores based on the value of the rank feature `field`. */ + sigmoid?: QueryDslRankFeatureFunctionSigmoid +} + +export interface QueryDslRegexpQuery extends QueryDslQueryBase { + /** Allows case insensitive matching of the regular expression value with the indexed field values when set to `true`. + * When `false`, case sensitivity of matching depends on the underlying field’s mapping. */ + case_insensitive?: boolean + /** Enables optional operators for the regular expression. */ + flags?: string + /** Maximum number of automaton states required for the query. */ + max_determinized_states?: integer + /** Method used to rewrite the query. */ + rewrite?: MultiTermQueryRewrite + /** Regular expression for terms you wish to find in the provided field. */ + value: string +} + +export interface QueryDslRuleQuery extends QueryDslQueryBase { + organic: QueryDslQueryContainer + ruleset_ids?: Id | Id[] + ruleset_id?: string + match_criteria: any +} + +export interface QueryDslScriptQuery extends QueryDslQueryBase { + /** Contains a script to run as a query. + * This script must return a boolean value, `true` or `false`. */ + script: Script | ScriptSource +} + +export interface QueryDslScriptScoreFunction { + /** A script that computes a score. */ + script: Script | ScriptSource +} + +export interface QueryDslScriptScoreQuery extends QueryDslQueryBase { + /** Documents with a score lower than this floating point number are excluded from the search results. */ + min_score?: float + /** Query used to return documents. */ + query: QueryDslQueryContainer + /** Script used to compute the score of documents returned by the query. + * Important: final relevance scores from the `script_score` query cannot be negative. */ + script: Script | ScriptSource +} + +export interface QueryDslSemanticQuery extends QueryDslQueryBase { + /** The field to query, which must be a semantic_text field type */ + field: string + /** The query text */ + query: string +} + +export interface QueryDslShapeFieldQuery { + /** Queries using a pre-indexed shape. */ + indexed_shape?: QueryDslFieldLookup + /** Spatial relation between the query shape and the document shape. */ + relation?: GeoShapeRelation + /** Queries using an inline shape definition in GeoJSON or Well Known Text (WKT) format. */ + shape?: GeoShape +} + +export interface QueryDslShapeQueryKeys extends QueryDslQueryBase { + /** When set to `true` the query ignores an unmapped field and will not match any documents. */ + ignore_unmapped?: boolean +} +export type QueryDslShapeQuery = QueryDslShapeQueryKeys +& { [property: string]: QueryDslShapeFieldQuery | boolean | float | string } + +export type QueryDslSimpleQueryStringFlag = 'NONE' | 'AND' | 'NOT' | 'OR' | 'PREFIX' | 'PHRASE' | 'PRECEDENCE' | 'ESCAPE' | 'WHITESPACE' | 'FUZZY' | 'NEAR' | 'SLOP' | 'ALL' + +export type QueryDslSimpleQueryStringFlags = SpecUtilsPipeSeparatedFlags + +export interface QueryDslSimpleQueryStringQuery extends QueryDslQueryBase { + /** Analyzer used to convert text in the query string into tokens. */ + analyzer?: string + /** If `true`, the query attempts to analyze wildcard terms in the query string. */ + analyze_wildcard?: boolean + /** If `true`, the parser creates a match_phrase query for each multi-position token. */ + auto_generate_synonyms_phrase_query?: boolean + /** Default boolean logic used to interpret text in the query string if no operators are specified. */ + default_operator?: QueryDslOperator + /** Array of fields you wish to search. + * Accepts wildcard expressions. + * You also can boost relevance scores for matches to particular fields using a caret (`^`) notation. + * Defaults to the `index.query.default_field index` setting, which has a default value of `*`. */ + fields?: Field[] + /** List of enabled operators for the simple query string syntax. */ + flags?: QueryDslSimpleQueryStringFlags + /** Maximum number of terms to which the query expands for fuzzy matching. */ + fuzzy_max_expansions?: integer + /** Number of beginning characters left unchanged for fuzzy matching. */ + fuzzy_prefix_length?: integer + /** If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`). */ + fuzzy_transpositions?: boolean + /** If `true`, format-based errors, such as providing a text value for a numeric field, are ignored. */ + lenient?: boolean + /** Minimum number of clauses that must match for a document to be returned. */ + minimum_should_match?: MinimumShouldMatch + /** Query string in the simple query string syntax you wish to parse and use for search. */ + query: string + /** Suffix appended to quoted text in the query string. */ + quote_field_suffix?: string +} + +export interface QueryDslSpanContainingQuery extends QueryDslQueryBase { + /** Can be any span query. + * Matching spans from `big` that contain matches from `little` are returned. */ + big: QueryDslSpanQuery + /** Can be any span query. + * Matching spans from `big` that contain matches from `little` are returned. */ + little: QueryDslSpanQuery +} + +export interface QueryDslSpanFieldMaskingQuery extends QueryDslQueryBase { + field: Field + query: QueryDslSpanQuery +} + +export interface QueryDslSpanFirstQuery extends QueryDslQueryBase { + /** Controls the maximum end position permitted in a match. */ + end: integer + /** Can be any other span type query. */ + match: QueryDslSpanQuery +} + +export type QueryDslSpanGapQuery = Partial> + +export interface QueryDslSpanMultiTermQuery extends QueryDslQueryBase { + /** Should be a multi term query (one of `wildcard`, `fuzzy`, `prefix`, `range`, or `regexp` query). */ + match: QueryDslQueryContainer +} + +export interface QueryDslSpanNearQuery extends QueryDslQueryBase { + /** Array of one or more other span type queries. */ + clauses: QueryDslSpanQuery[] + /** Controls whether matches are required to be in-order. */ + in_order?: boolean + /** Controls the maximum number of intervening unmatched positions permitted. */ + slop?: integer +} + +export interface QueryDslSpanNotQuery extends QueryDslQueryBase { + /** The number of tokens from within the include span that can’t have overlap with the exclude span. + * Equivalent to setting both `pre` and `post`. */ + dist?: integer + /** Span query whose matches must not overlap those returned. */ + exclude: QueryDslSpanQuery + /** Span query whose matches are filtered. */ + include: QueryDslSpanQuery + /** The number of tokens after the include span that can’t have overlap with the exclude span. */ + post?: integer + /** The number of tokens before the include span that can’t have overlap with the exclude span. */ + pre?: integer +} + +export interface QueryDslSpanOrQuery extends QueryDslQueryBase { + /** Array of one or more other span type queries. */ + clauses: QueryDslSpanQuery[] +} + +export interface QueryDslSpanQuery { + /** Accepts a list of span queries, but only returns those spans which also match a second span query. */ + span_containing?: QueryDslSpanContainingQuery + /** Allows queries like `span_near` or `span_or` across different fields. */ + span_field_masking?: QueryDslSpanFieldMaskingQuery + /** Accepts another span query whose matches must appear within the first N positions of the field. */ + span_first?: QueryDslSpanFirstQuery + span_gap?: QueryDslSpanGapQuery + /** Wraps a `term`, `range`, `prefix`, `wildcard`, `regexp`, or `fuzzy` query. */ + span_multi?: QueryDslSpanMultiTermQuery + /** Accepts multiple span queries whose matches must be within the specified distance of each other, and possibly in the same order. */ + span_near?: QueryDslSpanNearQuery + /** Wraps another span query, and excludes any documents which match that query. */ + span_not?: QueryDslSpanNotQuery + /** Combines multiple span queriesandreturns documents which match any of the specified queries. */ + span_or?: QueryDslSpanOrQuery + /** The equivalent of the `term` query but for use with other span queries. */ + span_term?: Partial> + /** The result from a single span query is returned as long is its span falls within the spans returned by a list of other span queries. */ + span_within?: QueryDslSpanWithinQuery +} + +export interface QueryDslSpanTermQuery extends QueryDslQueryBase { + value: FieldValue + /** @alias value */ + term: FieldValue +} + +export interface QueryDslSpanWithinQuery extends QueryDslQueryBase { + /** Can be any span query. + * Matching spans from `little` that are enclosed within `big` are returned. */ + big: QueryDslSpanQuery + /** Can be any span query. + * Matching spans from `little` that are enclosed within `big` are returned. */ + little: QueryDslSpanQuery +} + +export interface QueryDslSparseVectorQuery extends QueryDslQueryBase { + /** The name of the field that contains the token-weight pairs to be searched against. + * This field must be a mapped sparse_vector field. */ + field: Field + /** Dictionary of precomputed sparse vectors and their associated weights. + * Only one of inference_id or query_vector may be supplied in a request. */ + query_vector?: Record + /** The inference ID to use to convert the query text into token-weight pairs. + * It must be the same inference ID that was used to create the tokens from the input text. + * Only one of inference_id and query_vector is allowed. + * If inference_id is specified, query must also be specified. + * Only one of inference_id or query_vector may be supplied in a request. */ + inference_id?: Id + /** The query text you want to use for search. + * If inference_id is specified, query must also be specified. */ + query?: string + /** Whether to perform pruning, omitting the non-significant tokens from the query to improve query performance. + * If prune is true but the pruning_config is not specified, pruning will occur but default values will be used. + * Default: false */ + prune?: boolean + /** Optional pruning configuration. + * If enabled, this will omit non-significant tokens from the query in order to improve query performance. + * This is only used if prune is set to true. + * If prune is set to true but pruning_config is not specified, default values will be used. */ + pruning_config?: TokenPruningConfig +} + +export interface QueryDslTermQuery extends QueryDslQueryBase { + /** Term you wish to find in the provided field. */ + value: FieldValue + /** Allows ASCII case insensitive matching of the value with the indexed field values when set to `true`. + * When `false`, the case sensitivity of matching depends on the underlying field’s mapping. */ + case_insensitive?: boolean +} + +export interface QueryDslTermRangeQuery extends QueryDslRangeQueryBase { +} + +export interface QueryDslTermsLookup { + index: IndexName + id: Id + path: Field + routing?: Routing +} + +export interface QueryDslTermsQueryKeys extends QueryDslQueryBase { +} +export type QueryDslTermsQuery = QueryDslTermsQueryKeys +& { [property: string]: QueryDslTermsQueryField | float | string } + +export type QueryDslTermsQueryField = FieldValue[] | QueryDslTermsLookup + +export interface QueryDslTermsSetQuery extends QueryDslQueryBase { + /** Specification describing number of matching terms required to return a document. */ + minimum_should_match?: MinimumShouldMatch + /** Numeric field containing the number of matching terms required to return a document. */ + minimum_should_match_field?: Field + /** Custom script containing the number of matching terms required to return a document. */ + minimum_should_match_script?: Script | ScriptSource + /** Array of terms you wish to find in the provided field. */ + terms: FieldValue[] +} + +export interface QueryDslTextExpansionQuery extends QueryDslQueryBase { + /** The text expansion NLP model to use */ + model_id: string + /** The query text */ + model_text: string + /** Token pruning configurations + * @experimental */ + pruning_config?: TokenPruningConfig +} + +export type QueryDslTextQueryType = 'best_fields' | 'most_fields' | 'cross_fields' | 'phrase' | 'phrase_prefix' | 'bool_prefix' + +export interface QueryDslTypeQuery extends QueryDslQueryBase { + value: string +} + +export interface QueryDslUntypedDecayFunctionKeys extends QueryDslDecayFunctionBase { +} +export type QueryDslUntypedDecayFunction = QueryDslUntypedDecayFunctionKeys +& { [property: string]: QueryDslDecayPlacement | QueryDslMultiValueMode } + +export interface QueryDslUntypedDistanceFeatureQuery extends QueryDslDistanceFeatureQueryBase { +} + +export interface QueryDslUntypedRangeQuery extends QueryDslRangeQueryBase { + /** Date format used to convert `date` values in the query. */ + format?: DateFormat + /** Coordinated Universal Time (UTC) offset or IANA time zone used to convert `date` values in the query to UTC. */ + time_zone?: TimeZone +} + +export interface QueryDslWeightedTokensQuery extends QueryDslQueryBase { + /** The tokens representing this query */ + tokens: Record | Record[] + /** Token pruning configurations */ + pruning_config?: TokenPruningConfig +} + +export interface QueryDslWildcardQuery extends QueryDslQueryBase { + /** Allows case insensitive matching of the pattern with the indexed field values when set to true. Default is false which means the case sensitivity of matching depends on the underlying field’s mapping. */ + case_insensitive?: boolean + /** Method used to rewrite the query. */ + rewrite?: MultiTermQueryRewrite + /** Wildcard pattern for terms you wish to find in the provided field. Required, when wildcard is not set. */ + value?: string + /** Wildcard pattern for terms you wish to find in the provided field. Required, when value is not set. */ + wildcard?: string +} + +export interface QueryDslWrapperQuery extends QueryDslQueryBase { + /** A base64 encoded query. + * The binary data format can be any of JSON, YAML, CBOR or SMILE encodings */ + query: string +} + +export type QueryDslZeroTermsQuery = 'all' | 'none' + +export interface AsyncSearchAsyncSearch> { + /** Partial aggregations results, coming from the shards that have already completed running the query. */ + aggregations?: TAggregations + _clusters?: ClusterStatistics + fields?: Record + hits: SearchHitsMetadata + max_score?: double + /** Indicates how many reductions of the results have been performed. + * If this number increases compared to the last retrieved results for a get asynch search request, you can expect additional results included in the search response. */ + num_reduce_phases?: long + profile?: SearchProfile + pit_id?: Id + _scroll_id?: ScrollId + /** Indicates how many shards have run the query. + * Note that in order for shard results to be included in the search response, they need to be reduced first. */ + _shards: ShardStatistics + suggest?: Record[]> + terminated_early?: boolean + timed_out: boolean + took: long +} + +export interface AsyncSearchAsyncSearchDocumentResponseBase> extends AsyncSearchAsyncSearchResponseBase { + response: AsyncSearchAsyncSearch +} + +export interface AsyncSearchAsyncSearchResponseBase { + id?: Id + /** When the query is no longer running, this property indicates whether the search failed or was successfully completed on all shards. + * While the query is running, `is_partial` is always set to `true`. */ + is_partial: boolean + /** Indicates whether the search is still running or has completed. + * + * > info + * > If the search failed after some shards returned their results or the node that is coordinating the async search dies, results may be partial even though `is_running` is `false`. */ + is_running: boolean + /** Indicates when the async search will expire. */ + expiration_time?: DateTime + expiration_time_in_millis: EpochTime + start_time?: DateTime + start_time_in_millis: EpochTime + /** Indicates when the async search completed. + * It is present only when the search has completed. */ + completion_time?: DateTime + completion_time_in_millis?: EpochTime +} + +export interface AsyncSearchDeleteRequest extends RequestBase { + /** A unique identifier for the async search. */ + id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } +} + +export type AsyncSearchDeleteResponse = AcknowledgedResponseBase + +export interface AsyncSearchGetRequest extends RequestBase { + /** A unique identifier for the async search. */ + id: Id + /** The length of time that the async search should be available in the cluster. + * When not specified, the `keep_alive` set with the corresponding submit async request will be used. + * Otherwise, it is possible to override the value and extend the validity of the request. + * When this period expires, the search, if still running, is cancelled. + * If the search is completed, its saved results are deleted. */ + keep_alive?: Duration + /** Specify whether aggregation and suggester names should be prefixed by their respective types in the response */ + typed_keys?: boolean + /** Specifies to wait for the search to be completed up until the provided timeout. + * Final results will be returned if available before the timeout expires, otherwise the currently available results will be returned once the timeout expires. + * By default no timeout is set meaning that the currently available results will be returned without any additional wait. */ + wait_for_completion_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, keep_alive?: never, typed_keys?: never, wait_for_completion_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, keep_alive?: never, typed_keys?: never, wait_for_completion_timeout?: never } +} + +export type AsyncSearchGetResponse> = AsyncSearchAsyncSearchDocumentResponseBase + +export interface AsyncSearchStatusRequest extends RequestBase { + /** A unique identifier for the async search. */ + id: Id + /** The length of time that the async search needs to be available. + * Ongoing async searches and any saved search results are deleted after this period. */ + keep_alive?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, keep_alive?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, keep_alive?: never } +} + +export type AsyncSearchStatusResponse = AsyncSearchStatusStatusResponseBase + +export interface AsyncSearchStatusStatusResponseBase extends AsyncSearchAsyncSearchResponseBase { + /** The number of shards that have run the query so far. */ + _shards: ShardStatistics + /** Metadata about clusters involved in the cross-cluster search. + * It is not shown for local-only searches. */ + _clusters?: ClusterStatistics + /** If the async search completed, this field shows the status code of the search. + * For example, `200` indicates that the async search was successfully completed. + * `503` indicates that the async search was completed with an error. */ + completion_status?: integer +} + +export interface AsyncSearchSubmitRequest extends RequestBase { + /** A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices */ + index?: Indices + /** Blocks and waits until the search is completed up to a certain timeout. + * When the async search completes within the timeout, the response won’t include the ID as the results are not stored in the cluster. */ + wait_for_completion_timeout?: Duration + /** Specifies how long the async search needs to be available. + * Ongoing async searches and any saved search results are deleted after this period. */ + keep_alive?: Duration + /** If `true`, results are stored for later retrieval when the search completes within the `wait_for_completion_timeout`. */ + keep_on_completion?: boolean + /** Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) */ + allow_no_indices?: boolean + /** Indicate if an error should be returned if there is a partial search failure or timeout */ + allow_partial_search_results?: boolean + /** The analyzer to use for the query string */ + analyzer?: string + /** Specify whether wildcard and prefix queries should be analyzed (default: false) */ + analyze_wildcard?: boolean + /** Affects how often partial results become available, which happens whenever shard results are reduced. + * A partial reduction is performed every time the coordinating node has received a certain number of new shard responses (5 by default). */ + batched_reduce_size?: long + /** The default value is the only supported value. */ + ccs_minimize_roundtrips?: boolean + /** The default operator for query string query (AND or OR) */ + default_operator?: QueryDslOperator + /** The field to use as default where no field prefix is given in the query string */ + df?: string + /** Whether to expand wildcard expression to concrete indices that are open, closed or both. */ + expand_wildcards?: ExpandWildcards + /** Whether specified concrete, expanded or aliased indices should be ignored when throttled */ + ignore_throttled?: boolean + /** Whether specified concrete indices should be ignored when unavailable (missing or closed) */ + ignore_unavailable?: boolean + /** Specify whether format-based query failures (such as providing text to a numeric field) should be ignored */ + lenient?: boolean + /** The number of concurrent shard requests per node this search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests */ + max_concurrent_shard_requests?: integer + /** Specify the node or shard the operation should be performed on (default: random) */ + preference?: string + /** Specifies a subset of projects to target for the search using project + * metadata tags in a subset of Lucene query syntax. + * Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). + * Examples: + * _alias:my-project + * _alias:_origin + * _alias:*pr* + * Supported in serverless only. */ + project_routing?: ProjectRouting + /** Specify if request cache should be used for this request or not, defaults to true */ + request_cache?: boolean + /** A comma-separated list of specific routing values */ + routing?: Routing + /** Search operation type */ + search_type?: SearchType + /** Specifies which field to use for suggestions. */ + suggest_field?: Field + /** Specify suggest mode */ + suggest_mode?: SuggestMode + /** How many suggestions to return in response */ + suggest_size?: long + /** The source text for which the suggestions should be returned. */ + suggest_text?: string + /** Specify whether aggregation and suggester names should be prefixed by their respective types in the response */ + typed_keys?: boolean + /** Indicates whether hits.total should be rendered as an integer or an object in the rest search response */ + rest_total_hits_as_int?: boolean + /** A list of fields to exclude from the returned _source field */ + _source_excludes?: Fields + /** A list of fields to extract and return from the _source field */ + _source_includes?: Fields + /** Query in the Lucene query string syntax */ + q?: string + aggregations?: Record + /** @alias aggregations */ + aggs?: Record + collapse?: SearchFieldCollapse + /** If true, returns detailed information about score computation as part of a hit. */ + explain?: boolean + /** Configuration of search extensions defined by Elasticsearch plugins. */ + ext?: Record + /** Starting document offset. By default, you cannot page through more than 10,000 + * hits using the from and size parameters. To page through more hits, use the + * search_after parameter. */ + from?: integer + highlight?: SearchHighlight + /** Number of hits matching the query to count accurately. If true, the exact + * number of hits is returned at the cost of some performance. If false, the + * response does not include the total number of hits matching the query. + * Defaults to 10,000 hits. */ + track_total_hits?: SearchTrackHits + /** Boosts the _score of documents from specified indices. */ + indices_boost?: Partial>[] + /** Array of wildcard (*) patterns. The request returns doc values for field + * names matching these patterns in the hits.fields property of the response. */ + docvalue_fields?: (QueryDslFieldAndFormat | Field)[] + /** Defines the approximate kNN search to run. */ + knn?: KnnSearch | KnnSearch[] + /** Minimum _score for matching documents. Documents with a lower _score are + * not included in search results and results collected by aggregations. */ + min_score?: double + post_filter?: QueryDslQueryContainer + profile?: boolean + /** Defines the search definition using the Query DSL. */ + query?: QueryDslQueryContainer + rescore?: SearchRescore | SearchRescore[] + /** Retrieve a script evaluation (based on different fields) for each hit. */ + script_fields?: Record + search_after?: SortResults + /** The number of hits to return. By default, you cannot page through more + * than 10,000 hits using the from and size parameters. To page through more + * hits, use the search_after parameter. */ + size?: integer + slice?: SlicedScroll + sort?: Sort + /** Indicates which source fields are returned for matching documents. These + * fields are returned in the hits._source property of the search response. */ + _source?: SearchSourceConfig + /** Array of wildcard (*) patterns. The request returns values for field names + * matching these patterns in the hits.fields property of the response. */ + fields?: (QueryDslFieldAndFormat | Field)[] + suggest?: SearchSuggester + /** Maximum number of documents to collect for each shard. If a query reaches this + * limit, Elasticsearch terminates the query early. Elasticsearch collects documents + * before sorting. Defaults to 0, which does not terminate query execution early. */ + terminate_after?: long + /** Specifies the period of time to wait for a response from each shard. If no response + * is received before the timeout expires, the request fails and returns an error. + * Defaults to no timeout. */ + timeout?: string + /** If true, calculate and return document scores, even if the scores are not used for sorting. */ + track_scores?: boolean + /** If true, returns document version as part of a hit. */ + version?: boolean + /** If true, returns sequence number and primary term of the last modification + * of each hit. See Optimistic concurrency control. */ + seq_no_primary_term?: boolean + /** List of stored fields to return as part of a hit. If no fields are specified, + * no stored fields are included in the response. If this field is specified, the _source + * parameter defaults to false. You can pass _source: true to return both source fields + * and stored fields in the search response. */ + stored_fields?: Fields + /** Limits the search to a point in time (PIT). If you provide a PIT, you + * cannot specify an in the request path. */ + pit?: SearchPointInTimeReference + /** Defines one or more runtime fields in the search request. These fields take + * precedence over mapped fields with the same name. */ + runtime_mappings?: MappingRuntimeFields + /** Stats groups to associate with the search. Each group maintains a statistics + * aggregation for its associated searches. You can retrieve these stats using + * the indices stats API. */ + stats?: string[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, wait_for_completion_timeout?: never, keep_alive?: never, keep_on_completion?: never, allow_no_indices?: never, allow_partial_search_results?: never, analyzer?: never, analyze_wildcard?: never, batched_reduce_size?: never, ccs_minimize_roundtrips?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, lenient?: never, max_concurrent_shard_requests?: never, preference?: never, project_routing?: never, request_cache?: never, routing?: never, search_type?: never, suggest_field?: never, suggest_mode?: never, suggest_size?: never, suggest_text?: never, typed_keys?: never, rest_total_hits_as_int?: never, _source_excludes?: never, _source_includes?: never, q?: never, aggregations?: never, aggs?: never, collapse?: never, explain?: never, ext?: never, from?: never, highlight?: never, track_total_hits?: never, indices_boost?: never, docvalue_fields?: never, knn?: never, min_score?: never, post_filter?: never, profile?: never, query?: never, rescore?: never, script_fields?: never, search_after?: never, size?: never, slice?: never, sort?: never, _source?: never, fields?: never, suggest?: never, terminate_after?: never, timeout?: never, track_scores?: never, version?: never, seq_no_primary_term?: never, stored_fields?: never, pit?: never, runtime_mappings?: never, stats?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, wait_for_completion_timeout?: never, keep_alive?: never, keep_on_completion?: never, allow_no_indices?: never, allow_partial_search_results?: never, analyzer?: never, analyze_wildcard?: never, batched_reduce_size?: never, ccs_minimize_roundtrips?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, lenient?: never, max_concurrent_shard_requests?: never, preference?: never, project_routing?: never, request_cache?: never, routing?: never, search_type?: never, suggest_field?: never, suggest_mode?: never, suggest_size?: never, suggest_text?: never, typed_keys?: never, rest_total_hits_as_int?: never, _source_excludes?: never, _source_includes?: never, q?: never, aggregations?: never, aggs?: never, collapse?: never, explain?: never, ext?: never, from?: never, highlight?: never, track_total_hits?: never, indices_boost?: never, docvalue_fields?: never, knn?: never, min_score?: never, post_filter?: never, profile?: never, query?: never, rescore?: never, script_fields?: never, search_after?: never, size?: never, slice?: never, sort?: never, _source?: never, fields?: never, suggest?: never, terminate_after?: never, timeout?: never, track_scores?: never, version?: never, seq_no_primary_term?: never, stored_fields?: never, pit?: never, runtime_mappings?: never, stats?: never } +} + +export type AsyncSearchSubmitResponse> = AsyncSearchAsyncSearchDocumentResponseBase + +export interface AutoscalingAutoscalingPolicy { + roles: string[] + /** Decider settings. */ + deciders: Record +} + +export interface AutoscalingDeleteAutoscalingPolicyRequest extends RequestBase { + /** the name of the autoscaling policy */ + name: Name + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } +} + +export type AutoscalingDeleteAutoscalingPolicyResponse = AcknowledgedResponseBase + +export interface AutoscalingGetAutoscalingCapacityAutoscalingCapacity { + node: AutoscalingGetAutoscalingCapacityAutoscalingResources + total: AutoscalingGetAutoscalingCapacityAutoscalingResources +} + +export interface AutoscalingGetAutoscalingCapacityAutoscalingDecider { + required_capacity: AutoscalingGetAutoscalingCapacityAutoscalingCapacity + reason_summary?: string + reason_details?: any +} + +export interface AutoscalingGetAutoscalingCapacityAutoscalingDeciders { + required_capacity: AutoscalingGetAutoscalingCapacityAutoscalingCapacity + current_capacity: AutoscalingGetAutoscalingCapacityAutoscalingCapacity + current_nodes: AutoscalingGetAutoscalingCapacityAutoscalingNode[] + deciders: Record +} + +export interface AutoscalingGetAutoscalingCapacityAutoscalingNode { + name: NodeName +} + +export interface AutoscalingGetAutoscalingCapacityAutoscalingResources { + storage: integer + memory: integer +} + +export interface AutoscalingGetAutoscalingCapacityRequest extends RequestBase { + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never } +} + +export interface AutoscalingGetAutoscalingCapacityResponse { + policies: Record +} + +export interface AutoscalingGetAutoscalingPolicyRequest extends RequestBase { + /** the name of the autoscaling policy */ + name: Name + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never } +} + +export type AutoscalingGetAutoscalingPolicyResponse = AutoscalingAutoscalingPolicy + +export interface AutoscalingPutAutoscalingPolicyRequest extends RequestBase { + /** the name of the autoscaling policy */ + name: Name + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + policy?: AutoscalingAutoscalingPolicy + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never, policy?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never, policy?: never } +} + +export type AutoscalingPutAutoscalingPolicyResponse = AcknowledgedResponseBase + +export type CatCatAliasesColumn = 'alias' | 'a' | 'index' | 'i' | 'idx' | 'filter' | 'f' | 'fi' | 'routing.index' | 'ri' | 'routingIndex' | 'routing.search' | 'rs' | 'routingSearch' | 'is_write_index' | 'w' | 'isWriteIndex' | string + +export type CatCatAliasesColumns = CatCatAliasesColumn | CatCatAliasesColumn[] + +export type CatCatAllocationColumn = 'shards' | 's' | 'shards.undesired' | 'write_load.forecast' | 'wlf' | 'writeLoadForecast' | 'disk.indices.forecast' | 'dif' | 'diskIndicesForecast' | 'disk.indices' | 'di' | 'diskIndices' | 'disk.used' | 'du' | 'diskUsed' | 'disk.avail' | 'da' | 'diskAvail' | 'disk.total' | 'dt' | 'diskTotal' | 'disk.percent' | 'dp' | 'diskPercent' | 'host' | 'h' | 'ip' | 'node' | 'n' | 'node.role' | 'r' | 'role' | 'nodeRole' | string + +export type CatCatAllocationColumns = CatCatAllocationColumn | CatCatAllocationColumn[] + +export type CatCatAnomalyDetectorColumn = 'assignment_explanation' | 'ae' | 'buckets.count' | 'bc' | 'bucketsCount' | 'buckets.time.exp_avg' | 'btea' | 'bucketsTimeExpAvg' | 'buckets.time.exp_avg_hour' | 'bteah' | 'bucketsTimeExpAvgHour' | 'buckets.time.max' | 'btmax' | 'bucketsTimeMax' | 'buckets.time.min' | 'btmin' | 'bucketsTimeMin' | 'buckets.time.total' | 'btt' | 'bucketsTimeTotal' | 'data.buckets' | 'db' | 'dataBuckets' | 'data.earliest_record' | 'der' | 'dataEarliestRecord' | 'data.empty_buckets' | 'deb' | 'dataEmptyBuckets' | 'data.input_bytes' | 'dib' | 'dataInputBytes' | 'data.input_fields' | 'dif' | 'dataInputFields' | 'data.input_records' | 'dir' | 'dataInputRecords' | 'data.invalid_dates' | 'did' | 'dataInvalidDates' | 'data.last' | 'dl' | 'dataLast' | 'data.last_empty_bucket' | 'dleb' | 'dataLastEmptyBucket' | 'data.last_sparse_bucket' | 'dlsb' | 'dataLastSparseBucket' | 'data.latest_record' | 'dlr' | 'dataLatestRecord' | 'data.missing_fields' | 'dmf' | 'dataMissingFields' | 'data.out_of_order_timestamps' | 'doot' | 'dataOutOfOrderTimestamps' | 'data.processed_fields' | 'dpf' | 'dataProcessedFields' | 'data.processed_records' | 'dpr' | 'dataProcessedRecords' | 'data.sparse_buckets' | 'dsb' | 'dataSparseBuckets' | 'forecasts.memory.avg' | 'fmavg' | 'forecastsMemoryAvg' | 'forecasts.memory.max' | 'fmmax' | 'forecastsMemoryMax' | 'forecasts.memory.min' | 'fmmin' | 'forecastsMemoryMin' | 'forecasts.memory.total' | 'fmt' | 'forecastsMemoryTotal' | 'forecasts.records.avg' | 'fravg' | 'forecastsRecordsAvg' | 'forecasts.records.max' | 'frmax' | 'forecastsRecordsMax' | 'forecasts.records.min' | 'frmin' | 'forecastsRecordsMin' | 'forecasts.records.total' | 'frt' | 'forecastsRecordsTotal' | 'forecasts.time.avg' | 'ftavg' | 'forecastsTimeAvg' | 'forecasts.time.max' | 'ftmax' | 'forecastsTimeMax' | 'forecasts.time.min' | 'ftmin' | 'forecastsTimeMin' | 'forecasts.time.total' | 'ftt' | 'forecastsTimeTotal' | 'forecasts.total' | 'ft' | 'forecastsTotal' | 'id' | 'model.bucket_allocation_failures' | 'mbaf' | 'modelBucketAllocationFailures' | 'model.by_fields' | 'mbf' | 'modelByFields' | 'model.bytes' | 'mb' | 'modelBytes' | 'model.bytes_exceeded' | 'mbe' | 'modelBytesExceeded' | 'model.categorization_status' | 'mcs' | 'modelCategorizationStatus' | 'model.categorized_doc_count' | 'mcdc' | 'modelCategorizedDocCount' | 'model.dead_category_count' | 'mdcc' | 'modelDeadCategoryCount' | 'model.failed_category_count' | 'mdcc' | 'modelFailedCategoryCount' | 'model.frequent_category_count' | 'mfcc' | 'modelFrequentCategoryCount' | 'model.log_time' | 'mlt' | 'modelLogTime' | 'model.memory_limit' | 'mml' | 'modelMemoryLimit' | 'model.memory_status' | 'mms' | 'modelMemoryStatus' | 'model.over_fields' | 'mof' | 'modelOverFields' | 'model.partition_fields' | 'mpf' | 'modelPartitionFields' | 'model.rare_category_count' | 'mrcc' | 'modelRareCategoryCount' | 'model.timestamp' | 'mt' | 'modelTimestamp' | 'model.total_category_count' | 'mtcc' | 'modelTotalCategoryCount' | 'node.address' | 'na' | 'nodeAddress' | 'node.ephemeral_id' | 'ne' | 'nodeEphemeralId' | 'node.id' | 'ni' | 'nodeId' | 'node.name' | 'nn' | 'nodeName' | 'opened_time' | 'ot' | 'state' | 's' + +export type CatCatAnomalyDetectorColumns = CatCatAnomalyDetectorColumn | CatCatAnomalyDetectorColumn[] + +export type CatCatComponentColumn = 'name' | 'n' | 'version' | 'v' | 'alias_count' | 'a' | 'mapping_count' | 'm' | 'settings_count' | 's' | 'metadata_count' | 'me' | 'included_in' | 'i' | string + +export type CatCatComponentColumns = CatCatComponentColumn | CatCatComponentColumn[] + +export type CatCatCountColumn = 'epoch' | 't' | 'time' | 'timestamp' | 'ts' | 'hms' | 'hhmmss' | 'count' | 'dc' | 'docs.count' | 'docsCount' | string + +export type CatCatCountColumns = CatCatCountColumn | CatCatCountColumn[] + +export type CatCatDatafeedColumn = 'ae' | 'assignment_explanation' | 'bc' | 'buckets.count' | 'bucketsCount' | 'id' | 'na' | 'node.address' | 'nodeAddress' | 'ne' | 'node.ephemeral_id' | 'nodeEphemeralId' | 'ni' | 'node.id' | 'nodeId' | 'nn' | 'node.name' | 'nodeName' | 'sba' | 'search.bucket_avg' | 'searchBucketAvg' | 'sc' | 'search.count' | 'searchCount' | 'seah' | 'search.exp_avg_hour' | 'searchExpAvgHour' | 'st' | 'search.time' | 'searchTime' | 's' | 'state' + +export type CatCatDatafeedColumns = CatCatDatafeedColumn | CatCatDatafeedColumn[] + +export type CatCatDfaColumn = 'assignment_explanation' | 'ae' | 'create_time' | 'ct' | 'createTime' | 'description' | 'd' | 'dest_index' | 'di' | 'destIndex' | 'failure_reason' | 'fr' | 'failureReason' | 'id' | 'model_memory_limit' | 'mml' | 'modelMemoryLimit' | 'node.address' | 'na' | 'nodeAddress' | 'node.ephemeral_id' | 'ne' | 'nodeEphemeralId' | 'node.id' | 'ni' | 'nodeId' | 'node.name' | 'nn' | 'nodeName' | 'progress' | 'p' | 'source_index' | 'si' | 'sourceIndex' | 'state' | 's' | 'type' | 't' | 'version' | 'v' + +export type CatCatDfaColumns = CatCatDfaColumn | CatCatDfaColumn[] + +export type CatCatFieldDataColumn = 'id' | 'host' | 'h' | 'ip' | 'node' | 'n' | 'field' | 'f' | 'size' | 's' | string + +export type CatCatFieldDataColumns = CatCatFieldDataColumn | CatCatFieldDataColumn[] + +export type CatCatHealthColumn = 'epoch' | 't' | 'time' | 'timestamp' | 'ts' | 'hms' | 'hhmmss' | 'cluster' | 'cl' | 'status' | 'st' | 'node.total' | 'nt' | 'nodeTotal' | 'node.data' | 'nd' | 'nodeData' | 'shards' | 't' | 'sh' | 'shards.total' | 'shardsTotal' | 'pri' | 'p' | 'shards.primary' | 'shardsPrimary' | 'relo' | 'r' | 'shards.relocating' | 'shardsRelocating' | 'init' | 'i' | 'shards.initializing' | 'shardsInitializing' | 'unassign' | 'u' | 'shards.unassigned' | 'shardsUnassigned' | 'unassign.pri' | 'up' | 'shards.unassigned.primary' | 'shardsUnassignedPrimary' | 'pending_tasks' | 'pt' | 'pendingTasks' | 'max_task_wait_time' | 'mtwt' | 'maxTaskWaitTime' | 'active_shards_percent' | 'asp' | 'activeShardsPercent' | string + +export type CatCatHealthColumns = CatCatHealthColumn | CatCatHealthColumn[] + +export type CatCatIndicesColumn = 'health' | 'h' | 'status' | 's' | 'index' | 'i' | 'idx' | 'uuid' | 'id' | 'uuid' | 'pri' | 'p' | 'shards.primary' | 'shardsPrimary' | 'rep' | 'r' | 'shards.replica' | 'shardsReplica' | 'docs.count' | 'dc' | 'docsCount' | 'docs.deleted' | 'dd' | 'docsDeleted' | 'creation.date' | 'cd' | 'creation.date.string' | 'cds' | 'store.size' | 'ss' | 'storeSize' | 'pri.store.size' | 'dataset.size' | 'completion.size' | 'cs' | 'completionSize' | 'pri.completion.size' | 'fielddata.memory_size' | 'fm' | 'fielddataMemory' | 'pri.fielddata.memory_size' | 'fielddata.evictions' | 'fe' | 'fielddataEvictions' | 'pri.fielddata.evictions' | 'query_cache.memory_size' | 'qcm' | 'queryCacheMemory' | 'pri.query_cache.memory_size' | 'query_cache.evictions' | 'qce' | 'queryCacheEvictions' | 'pri.query_cache.evictions' | 'request_cache.memory_size' | 'rcm' | 'requestCacheMemory' | 'pri.request_cache.memory_size' | 'request_cache.evictions' | 'rce' | 'requestCacheEvictions' | 'pri.request_cache.evictions' | 'request_cache.hit_count' | 'rchc' | 'requestCacheHitCount' | 'pri.request_cache.hit_count' | 'request_cache.miss_count' | 'rcmc' | 'requestCacheMissCount' | 'pri.request_cache.miss_count' | 'flush.total' | 'ft' | 'flushTotal' | 'pri.flush.total' | 'flush.total_time' | 'ftt' | 'flushTotalTime' | 'pri.flush.total_time' | 'get.current' | 'gc' | 'getCurrent' | 'pri.get.current' | 'get.time' | 'gti' | 'getTime' | 'pri.get.time' | 'get.total' | 'gto' | 'getTotal' | 'pri.get.total' | 'get.exists_time' | 'geti' | 'getExistsTime' | 'pri.get.exists_time' | 'get.exists_total' | 'geto' | 'getExistsTotal' | 'pri.get.exists_total' | 'get.missing_time' | 'gmti' | 'getMissingTime' | 'pri.get.missing_time' | 'get.missing_total' | 'gmto' | 'getMissingTotal' | 'pri.get.missing_total' | 'indexing.delete_current' | 'idc' | 'indexingDeleteCurrent' | 'pri.indexing.delete_current' | 'indexing.delete_time' | 'idti' | 'indexingDeleteTime' | 'pri.indexing.delete_time' | 'indexing.delete_total' | 'idto' | 'indexingDeleteTotal' | 'pri.indexing.delete_total' | 'indexing.index_current' | 'iic' | 'indexingIndexCurrent' | 'pri.indexing.index_current' | 'indexing.index_time' | 'iiti' | 'indexingIndexTime' | 'pri.indexing.index_time' | 'indexing.index_total' | 'iito' | 'indexingIndexTotal' | 'pri.indexing.index_total' | 'indexing.index_failed' | 'iif' | 'indexingIndexFailed' | 'pri.indexing.index_failed' | 'indexing.index_failed_due_to_version_conflict' | 'iifvc' | 'indexingIndexFailedDueToVersionConflict' | 'pri.indexing.index_failed_due_to_version_conflict' | 'merges.current' | 'mc' | 'mergesCurrent' | 'pri.merges.current' | 'merges.current_docs' | 'mcd' | 'mergesCurrentDocs' | 'pri.merges.current_docs' | 'merges.current_size' | 'mcs' | 'mergesCurrentSize' | 'pri.merges.current_size' | 'merges.total' | 'mt' | 'mergesTotal' | 'pri.merges.total' | 'merges.total_docs' | 'mtd' | 'mergesTotalDocs' | 'pri.merges.total_docs' | 'merges.total_size' | 'mts' | 'mergesTotalSize' | 'pri.merges.total_size' | 'merges.total_time' | 'mtt' | 'mergesTotalTime' | 'pri.merges.total_time' | 'refresh.total' | 'rto' | 'refreshTotal' | 'pri.refresh.total' | 'refresh.time' | 'rti' | 'refreshTime' | 'pri.refresh.time' | 'refresh.external_total' | 'rto' | 'refreshTotal' | 'pri.refresh.external_total' | 'refresh.external_time' | 'rti' | 'refreshTime' | 'pri.refresh.external_time' | 'refresh.listeners' | 'rli' | 'refreshListeners' | 'pri.refresh.listeners' | 'search.fetch_current' | 'sfc' | 'searchFetchCurrent' | 'pri.search.fetch_current' | 'search.fetch_time' | 'sfti' | 'searchFetchTime' | 'pri.search.fetch_time' | 'search.fetch_total' | 'sfto' | 'searchFetchTotal' | 'pri.search.fetch_total' | 'search.open_contexts' | 'so' | 'searchOpenContexts' | 'pri.search.open_contexts' | 'search.query_current' | 'sqc' | 'searchQueryCurrent' | 'pri.search.query_current' | 'search.query_time' | 'sqti' | 'searchQueryTime' | 'pri.search.query_time' | 'search.query_total' | 'sqto' | 'searchQueryTotal' | 'pri.search.query_total' | 'search.scroll_current' | 'scc' | 'searchScrollCurrent' | 'pri.search.scroll_current' | 'search.scroll_time' | 'scti' | 'searchScrollTime' | 'pri.search.scroll_time' | 'search.scroll_total' | 'scto' | 'searchScrollTotal' | 'pri.search.scroll_total' | 'segments.count' | 'sc' | 'segmentsCount' | 'pri.segments.count' | 'segments.memory' | 'sm' | 'segmentsMemory' | 'pri.segments.memory' | 'segments.index_writer_memory' | 'siwm' | 'segmentsIndexWriterMemory' | 'pri.segments.index_writer_memory' | 'segments.version_map_memory' | 'svmm' | 'segmentsVersionMapMemory' | 'pri.segments.version_map_memory' | 'segments.fixed_bitset_memory' | 'sfbm' | 'fixedBitsetMemory' | 'pri.segments.fixed_bitset_memory' | 'warmer.current' | 'wc' | 'warmerCurrent' | 'pri.warmer.current' | 'warmer.total' | 'wto' | 'warmerTotal' | 'pri.warmer.total' | 'warmer.total_time' | 'wtt' | 'warmerTotalTime' | 'pri.warmer.total_time' | 'suggest.current' | 'suc' | 'suggestCurrent' | 'pri.suggest.current' | 'suggest.time' | 'suti' | 'suggestTime' | 'pri.suggest.time' | 'suggest.total' | 'suto' | 'suggestTotal' | 'pri.suggest.total' | 'memory.total' | 'tm' | 'memoryTotal' | 'pri.memory.total' | 'bulk.total_operations' | 'bto' | 'bulkTotalOperation' | 'pri.bulk.total_operations' | 'bulk.total_time' | 'btti' | 'bulkTotalTime' | 'pri.bulk.total_time' | 'bulk.total_size_in_bytes' | 'btsi' | 'bulkTotalSizeInBytes' | 'pri.bulk.total_size_in_bytes' | 'bulk.avg_time' | 'bati' | 'bulkAvgTime' | 'pri.bulk.avg_time' | 'bulk.avg_size_in_bytes' | 'basi' | 'bulkAvgSizeInBytes' | 'pri.bulk.avg_size_in_bytes' | 'dense_vector.value_count' | 'dvc' | 'denseVectorCount' | 'pri.dense_vector.value_count' | 'sparse_vector.value_count' | 'svc' | 'sparseVectorCount' | 'pri.sparse_vector.value_count' | string + +export type CatCatIndicesColumns = CatCatIndicesColumn | CatCatIndicesColumn[] + +export type CatCatMasterColumn = 'id' | 'host' | 'h' | 'ip' | 'node' | 'n' | string + +export type CatCatMasterColumns = CatCatMasterColumn | CatCatMasterColumn[] + +export type CatCatNodeColumn = 'build' | 'b' | 'completion.size' | 'cs' | 'completionSize' | 'cpu' | 'disk.avail' | 'd' | 'disk' | 'diskAvail' | 'disk.total' | 'dt' | 'diskTotal' | 'disk.used' | 'du' | 'diskUsed' | 'disk.used_percent' | 'dup' | 'diskUsedPercent' | 'fielddata.evictions' | 'fe' | 'fielddataEvictions' | 'fielddata.memory_size' | 'fm' | 'fielddataMemory' | 'file_desc.current' | 'fdc' | 'fileDescriptorCurrent' | 'file_desc.max' | 'fdm' | 'fileDescriptorMax' | 'file_desc.percent' | 'fdp' | 'fileDescriptorPercent' | 'flush.total' | 'ft' | 'flushTotal' | 'flush.total_time' | 'ftt' | 'flushTotalTime' | 'get.current' | 'gc' | 'getCurrent' | 'get.exists_time' | 'geti' | 'getExistsTime' | 'get.exists_total' | 'geto' | 'getExistsTotal' | 'get.missing_time' | 'gmti' | 'getMissingTime' | 'get.missing_total' | 'gmto' | 'getMissingTotal' | 'get.time' | 'gti' | 'getTime' | 'get.total' | 'gto' | 'getTotal' | 'heap.current' | 'hc' | 'heapCurrent' | 'heap.max' | 'hm' | 'heapMax' | 'heap.percent' | 'hp' | 'heapPercent' | 'http_address' | 'http' | 'id' | 'nodeId' | 'indexing.delete_current' | 'idc' | 'indexingDeleteCurrent' | 'indexing.delete_time' | 'idti' | 'indexingDeleteTime' | 'indexing.delete_total' | 'idto' | 'indexingDeleteTotal' | 'indexing.index_current' | 'iic' | 'indexingIndexCurrent' | 'indexing.index_failed' | 'iif' | 'indexingIndexFailed' | 'indexing.index_failed_due_to_version_conflict' | 'iifvc' | 'indexingIndexFailedDueToVersionConflict' | 'indexing.index_time' | 'iiti' | 'indexingIndexTime' | 'indexing.index_total' | 'iito' | 'indexingIndexTotal' | 'ip' | 'i' | 'jdk' | 'j' | 'load_1m' | 'l' | 'load_5m' | 'l' | 'load_15m' | 'l' | 'available_processors' | 'ap' | 'mappings.total_count' | 'mtc' | 'mappingsTotalCount' | 'mappings.total_estimated_overhead_in_bytes' | 'mteo' | 'mappingsTotalEstimatedOverheadInBytes' | 'master' | 'm' | 'merges.current' | 'mc' | 'mergesCurrent' | 'merges.current_docs' | 'mcd' | 'mergesCurrentDocs' | 'merges.current_size' | 'mcs' | 'mergesCurrentSize' | 'merges.total' | 'mt' | 'mergesTotal' | 'merges.total_docs' | 'mtd' | 'mergesTotalDocs' | 'merges.total_size' | 'mts' | 'mergesTotalSize' | 'merges.total_time' | 'mtt' | 'mergesTotalTime' | 'name' | 'n' | 'node.role' | 'r' | 'role' | 'nodeRole' | 'pid' | 'p' | 'port' | 'po' | 'query_cache.memory_size' | 'qcm' | 'queryCacheMemory' | 'query_cache.evictions' | 'qce' | 'queryCacheEvictions' | 'query_cache.hit_count' | 'qchc' | 'queryCacheHitCount' | 'query_cache.miss_count' | 'qcmc' | 'queryCacheMissCount' | 'ram.current' | 'rc' | 'ramCurrent' | 'ram.max' | 'rm' | 'ramMax' | 'ram.percent' | 'rp' | 'ramPercent' | 'refresh.total' | 'rto' | 'refreshTotal' | 'refresh.time' | 'rti' | 'refreshTime' | 'request_cache.memory_size' | 'rcm' | 'requestCacheMemory' | 'request_cache.evictions' | 'rce' | 'requestCacheEvictions' | 'request_cache.hit_count' | 'rchc' | 'requestCacheHitCount' | 'request_cache.miss_count' | 'rcmc' | 'requestCacheMissCount' | 'script.compilations' | 'scrcc' | 'scriptCompilations' | 'script.cache_evictions' | 'scrce' | 'scriptCacheEvictions' | 'search.fetch_current' | 'sfc' | 'searchFetchCurrent' | 'search.fetch_time' | 'sfti' | 'searchFetchTime' | 'search.fetch_total' | 'sfto' | 'searchFetchTotal' | 'search.open_contexts' | 'so' | 'searchOpenContexts' | 'search.query_current' | 'sqc' | 'searchQueryCurrent' | 'search.query_time' | 'sqti' | 'searchQueryTime' | 'search.query_total' | 'sqto' | 'searchQueryTotal' | 'search.scroll_current' | 'scc' | 'searchScrollCurrent' | 'search.scroll_time' | 'scti' | 'searchScrollTime' | 'search.scroll_total' | 'scto' | 'searchScrollTotal' | 'segments.count' | 'sc' | 'segmentsCount' | 'segments.fixed_bitset_memory' | 'sfbm' | 'fixedBitsetMemory' | 'segments.index_writer_memory' | 'siwm' | 'segmentsIndexWriterMemory' | 'segments.memory' | 'sm' | 'segmentsMemory' | 'segments.version_map_memory' | 'svmm' | 'segmentsVersionMapMemory' | 'shard_stats.total_count' | 'sstc' | 'shards' | 'shardStatsTotalCount' | 'suggest.current' | 'suc' | 'suggestCurrent' | 'suggest.time' | 'suti' | 'suggestTime' | 'suggest.total' | 'suto' | 'suggestTotal' | 'uptime' | 'u' | 'version' | 'v' | string + +export type CatCatNodeColumns = CatCatNodeColumn | CatCatNodeColumn[] + +export type CatCatNodeattrsColumn = 'node' | 'id' | 'id' | 'nodeId' | 'pid' | 'p' | 'host' | 'h' | 'ip' | 'i' | 'port' | 'po' | 'attr' | 'attr.name' | 'value' | 'attr.value' | string + +export type CatCatNodeattrsColumns = CatCatNodeattrsColumn | CatCatNodeattrsColumn[] + +export type CatCatPendingTasksColumn = 'insertOrder' | 'o' | 'timeInQueue' | 't' | 'priority' | 'p' | 'source' | 's' | string + +export type CatCatPendingTasksColumns = CatCatPendingTasksColumn | CatCatPendingTasksColumn[] + +export type CatCatPluginsColumn = 'id' | 'name' | 'n' | 'component' | 'c' | 'version' | 'v' | 'description' | 'd' | string + +export type CatCatPluginsColumns = CatCatPluginsColumn | CatCatPluginsColumn[] + +export type CatCatRecoveryColumn = 'index' | 'i' | 'idx' | 'shard' | 's' | 'sh' | 'start_time' | 'start' | 'start_time_millis' | 'start_millis' | 'stop_time' | 'stop' | 'stop_time_millis' | 'stop_millis' | 'time' | 't' | 'ti' | 'type' | 'ty' | 'stage' | 'st' | 'source_host' | 'shost' | 'source_node' | 'snode' | 'target_host' | 'thost' | 'target_node' | 'tnode' | 'repository' | 'rep' | 'snapshot' | 'snap' | 'files' | 'f' | 'files_recovered' | 'fr' | 'files_percent' | 'fp' | 'files_total' | 'tf' | 'bytes' | 'b' | 'bytes_recovered' | 'br' | 'bytes_percent' | 'bp' | 'bytes_total' | 'tb' | 'translog_ops' | 'to' | 'translog_ops_recovered' | 'tor' | 'translog_ops_percent' | 'top' | string + +export type CatCatRecoveryColumns = CatCatRecoveryColumn | CatCatRecoveryColumn[] + +export interface CatCatRequestBase extends RequestBase, SpecUtilsCommonCatQueryParameters { +} + +export type CatCatSegmentsColumn = 'index' | 'i' | 'idx' | 'shard' | 's' | 'sh' | 'prirep' | 'p' | 'pr' | 'primaryOrReplica' | 'ip' | 'segment' | 'generation' | 'docs.count' | 'docs.deleted' | 'size' | 'size.memory' | 'committed' | 'searchable' | 'version' | 'compound' | 'id' | string + +export type CatCatSegmentsColumns = CatCatSegmentsColumn | CatCatSegmentsColumn[] + +export type CatCatShardColumn = 'completion.size' | 'cs' | 'completionSize' | 'dataset.size' | 'dense_vector.value_count' | 'dvc' | 'denseVectorCount' | 'docs' | 'd' | 'dc' | 'fielddata.evictions' | 'fe' | 'fielddataEvictions' | 'fielddata.memory_size' | 'fm' | 'fielddataMemory' | 'flush.total' | 'ft' | 'flushTotal' | 'flush.total_time' | 'ftt' | 'flushTotalTime' | 'get.current' | 'gc' | 'getCurrent' | 'get.exists_time' | 'geti' | 'getExistsTime' | 'get.exists_total' | 'geto' | 'getExistsTotal' | 'get.missing_time' | 'gmti' | 'getMissingTime' | 'get.missing_total' | 'gmto' | 'getMissingTotal' | 'get.time' | 'gti' | 'getTime' | 'get.total' | 'gto' | 'getTotal' | 'id' | 'index' | 'i' | 'idx' | 'indexing.delete_current' | 'idc' | 'indexingDeleteCurrent' | 'indexing.delete_time' | 'idti' | 'indexingDeleteTime' | 'indexing.delete_total' | 'idto' | 'indexingDeleteTotal' | 'indexing.index_current' | 'iic' | 'indexingIndexCurrent' | 'indexing.index_failed_due_to_version_conflict' | 'iifvc' | 'indexingIndexFailedDueToVersionConflict' | 'indexing.index_failed' | 'iif' | 'indexingIndexFailed' | 'indexing.index_time' | 'iiti' | 'indexingIndexTime' | 'indexing.index_total' | 'iito' | 'indexingIndexTotal' | 'ip' | 'merges.current' | 'mc' | 'mergesCurrent' | 'merges.current_docs' | 'mcd' | 'mergesCurrentDocs' | 'merges.current_size' | 'mcs' | 'mergesCurrentSize' | 'merges.total' | 'mt' | 'mergesTotal' | 'merges.total_docs' | 'mtd' | 'mergesTotalDocs' | 'merges.total_size' | 'mts' | 'mergesTotalSize' | 'merges.total_time' | 'mtt' | 'mergesTotalTime' | 'node' | 'n' | 'prirep' | 'p' | 'pr' | 'primaryOrReplica' | 'query_cache.evictions' | 'qce' | 'queryCacheEvictions' | 'query_cache.memory_size' | 'qcm' | 'queryCacheMemory' | 'recoverysource.type' | 'rs' | 'refresh.time' | 'rti' | 'refreshTime' | 'refresh.total' | 'rto' | 'refreshTotal' | 'search.fetch_current' | 'sfc' | 'searchFetchCurrent' | 'search.fetch_time' | 'sfti' | 'searchFetchTime' | 'search.fetch_total' | 'sfto' | 'searchFetchTotal' | 'search.open_contexts' | 'so' | 'searchOpenContexts' | 'search.query_current' | 'sqc' | 'searchQueryCurrent' | 'search.query_time' | 'sqti' | 'searchQueryTime' | 'search.query_total' | 'sqto' | 'searchQueryTotal' | 'search.scroll_current' | 'scc' | 'searchScrollCurrent' | 'search.scroll_time' | 'scti' | 'searchScrollTime' | 'search.scroll_total' | 'scto' | 'searchScrollTotal' | 'segments.count' | 'sc' | 'segmentsCount' | 'segments.fixed_bitset_memory' | 'sfbm' | 'fixedBitsetMemory' | 'segments.index_writer_memory' | 'siwm' | 'segmentsIndexWriterMemory' | 'segments.memory' | 'sm' | 'segmentsMemory' | 'segments.version_map_memory' | 'svmm' | 'segmentsVersionMapMemory' | 'seq_no.global_checkpoint' | 'sqg' | 'globalCheckpoint' | 'seq_no.local_checkpoint' | 'sql' | 'localCheckpoint' | 'seq_no.max' | 'sqm' | 'maxSeqNo' | 'shard' | 's' | 'sh' | 'dsparse_vector.value_count' | 'svc' | 'sparseVectorCount' | 'state' | 'st' | 'store' | 'sto' | 'suggest.current' | 'suc' | 'suggestCurrent' | 'suggest.time' | 'suti' | 'suggestTime' | 'suggest.total' | 'suto' | 'suggestTotal' | 'sync_id' | 'unassigned.at' | 'ua' | 'unassigned.details' | 'ud' | 'unassigned.for' | 'uf' | 'unassigned.reason' | 'ur' | string + +export type CatCatShardColumns = CatCatShardColumn | CatCatShardColumn[] + +export type CatCatSnapshotsColumn = 'id' | 'snapshot' | 'repository' | 're' | 'repo' | 'status' | 's' | 'start_epoch' | 'ste' | 'startEpoch' | 'start_time' | 'sti' | 'startTime' | 'end_epoch' | 'ete' | 'endEpoch' | 'end_time' | 'eti' | 'endTime' | 'duration' | 'dur' | 'indices' | 'i' | 'successful_shards' | 'ss' | 'failed_shards' | 'fs' | 'total_shards' | 'ts' | 'reason' | 'r' | string + +export type CatCatSnapshotsColumns = CatCatSnapshotsColumn | CatCatSnapshotsColumn[] + +export type CatCatTasksColumn = 'id' | 'action' | 'ac' | 'task_id' | 'ti' | 'parent_task_id' | 'pti' | 'type' | 'ty' | 'start_time' | 'start' | 'timestamp' | 'ts' | 'hms' | 'hhmmss' | 'running_time_ns' | 'time' | 'running_time' | 'time' | 'node_id' | 'ni' | 'ip' | 'i' | 'port' | 'po' | 'node' | 'n' | 'version' | 'v' | 'x_opaque_id' | 'x' | string + +export type CatCatTasksColumns = CatCatTasksColumn | CatCatTasksColumn[] + +export type CatCatTemplatesColumn = 'name' | 'n' | 'index_patterns' | 't' | 'order' | 'o' | 'p' | 'version' | 'v' | 'composed_of' | 'c' | string + +export type CatCatTemplatesColumns = CatCatTemplatesColumn | CatCatTemplatesColumn[] + +export type CatCatThreadPoolColumn = 'active' | 'a' | 'completed' | 'c' | 'core' | 'cr' | 'ephemeral_id' | 'eid' | 'host' | 'h' | 'ip' | 'i' | 'keep_alive' | 'k' | 'largest' | 'l' | 'max' | 'mx' | 'name' | 'node_id' | 'id' | 'node_name' | 'pid' | 'p' | 'pool_size' | 'psz' | 'port' | 'po' | 'queue' | 'q' | 'queue_size' | 'qs' | 'rejected' | 'r' | 'size' | 'sz' | 'type' | 't' | string + +export type CatCatThreadPoolColumns = CatCatThreadPoolColumn | CatCatThreadPoolColumn[] + +export type CatCatTrainedModelsColumn = 'create_time' | 'ct' | 'created_by' | 'c' | 'createdBy' | 'data_frame_analytics_id' | 'df' | 'dataFrameAnalytics' | 'dfid' | 'description' | 'd' | 'heap_size' | 'hs' | 'modelHeapSize' | 'id' | 'ingest.count' | 'ic' | 'ingestCount' | 'ingest.current' | 'icurr' | 'ingestCurrent' | 'ingest.failed' | 'if' | 'ingestFailed' | 'ingest.pipelines' | 'ip' | 'ingestPipelines' | 'ingest.time' | 'it' | 'ingestTime' | 'license' | 'l' | 'operations' | 'o' | 'modelOperations' | 'version' | 'v' + +export type CatCatTrainedModelsColumns = CatCatTrainedModelsColumn | CatCatTrainedModelsColumn[] + +export type CatCatTransformColumn = 'changes_last_detection_time' | 'cldt' | 'checkpoint' | 'cp' | 'checkpoint_duration_time_exp_avg' | 'cdtea' | 'checkpointTimeExpAvg' | 'checkpoint_progress' | 'c' | 'checkpointProgress' | 'create_time' | 'ct' | 'createTime' | 'delete_time' | 'dtime' | 'description' | 'd' | 'dest_index' | 'di' | 'destIndex' | 'documents_deleted' | 'docd' | 'documents_indexed' | 'doci' | 'docs_per_second' | 'dps' | 'documents_processed' | 'docp' | 'frequency' | 'f' | 'id' | 'index_failure' | 'if' | 'index_time' | 'itime' | 'index_total' | 'it' | 'indexed_documents_exp_avg' | 'idea' | 'last_search_time' | 'lst' | 'lastSearchTime' | 'max_page_search_size' | 'mpsz' | 'pages_processed' | 'pp' | 'pipeline' | 'p' | 'processed_documents_exp_avg' | 'pdea' | 'processing_time' | 'pt' | 'reason' | 'r' | 'search_failure' | 'sf' | 'search_time' | 'stime' | 'search_total' | 'st' | 'source_index' | 'si' | 'sourceIndex' | 'state' | 's' | 'transform_type' | 'tt' | 'trigger_count' | 'tc' | 'version' | 'v' + +export type CatCatTransformColumns = CatCatTransformColumn | CatCatTransformColumn[] + +export interface CatAliasesAliasesRecord { + /** alias name */ + alias?: string + /** alias name + * @alias alias */ + a?: string + /** index alias points to */ + index?: IndexName + /** index alias points to + * @alias index */ + i?: IndexName + /** index alias points to + * @alias index */ + idx?: IndexName + /** filter */ + filter?: string + /** filter + * @alias filter */ + f?: string + /** filter + * @alias filter */ + fi?: string + /** index routing */ + 'routing.index'?: string + /** index routing + * @alias 'routing.index' */ + ri?: string + /** index routing + * @alias 'routing.index' */ + routingIndex?: string + /** search routing */ + 'routing.search'?: string + /** search routing + * @alias 'routing.search' */ + rs?: string + /** search routing + * @alias 'routing.search' */ + routingSearch?: string + /** write index */ + is_write_index?: string + /** write index + * @alias is_write_index */ + w?: string + /** write index + * @alias is_write_index */ + isWriteIndex?: string +} + +export interface CatAliasesRequest extends CatCatRequestBase { + /** A comma-separated list of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`. */ + name?: Names + /** A comma-separated list of columns names to display. It supports simple wildcards. */ + h?: CatCatAliasesColumns + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ + s?: Names + /** The type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * It supports comma-separated values, such as `open,hidden`. */ + expand_wildcards?: ExpandWildcards + /** The period to wait for a connection to the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * To indicated that the request should never timeout, you can set it to `-1`. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, h?: never, s?: never, expand_wildcards?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, h?: never, s?: never, expand_wildcards?: never, master_timeout?: never } +} + +export type CatAliasesResponse = CatAliasesAliasesRecord[] + +export interface CatAllocationAllocationRecord { + /** Number of primary and replica shards assigned to the node. */ + shards?: string + /** Number of primary and replica shards assigned to the node. + * @alias shards */ + s?: string + /** Amount of shards that are scheduled to be moved elsewhere in the cluster or -1 other than desired balance allocator is used */ + 'shards.undesired'?: string | null + /** Sum of index write load forecasts */ + 'write_load.forecast'?: SpecUtilsStringified | null + /** Sum of index write load forecasts + * @alias 'write_load.forecast' */ + wlf?: SpecUtilsStringified | null + /** Sum of index write load forecasts + * @alias 'write_load.forecast' */ + writeLoadForecast?: SpecUtilsStringified | null + /** Sum of shard size forecasts */ + 'disk.indices.forecast'?: ByteSize | null + /** Sum of shard size forecasts + * @alias 'disk.indices.forecast' */ + dif?: ByteSize | null + /** Sum of shard size forecasts + * @alias 'disk.indices.forecast' */ + diskIndicesForecast?: ByteSize | null + /** Disk space used by the node’s shards. Does not include disk space for the translog or unassigned shards. + * IMPORTANT: This metric double-counts disk space for hard-linked files, such as those created when shrinking, splitting, or cloning an index. */ + 'disk.indices'?: ByteSize | null + /** Disk space used by the node’s shards. Does not include disk space for the translog or unassigned shards. + * IMPORTANT: This metric double-counts disk space for hard-linked files, such as those created when shrinking, splitting, or cloning an index. + * @alias 'disk.indices' */ + di?: ByteSize | null + /** Disk space used by the node’s shards. Does not include disk space for the translog or unassigned shards. + * IMPORTANT: This metric double-counts disk space for hard-linked files, such as those created when shrinking, splitting, or cloning an index. + * @alias 'disk.indices' */ + diskIndices?: ByteSize | null + /** Total disk space in use. + * Elasticsearch retrieves this metric from the node’s operating system (OS). + * The metric includes disk space for: Elasticsearch, including the translog and unassigned shards; the node’s operating system; any other applications or files on the node. + * Unlike `disk.indices`, this metric does not double-count disk space for hard-linked files. */ + 'disk.used'?: ByteSize | null + /** Total disk space in use. + * Elasticsearch retrieves this metric from the node’s operating system (OS). + * The metric includes disk space for: Elasticsearch, including the translog and unassigned shards; the node’s operating system; any other applications or files on the node. + * Unlike `disk.indices`, this metric does not double-count disk space for hard-linked files. + * @alias 'disk.used' */ + du?: ByteSize | null + /** Total disk space in use. + * Elasticsearch retrieves this metric from the node’s operating system (OS). + * The metric includes disk space for: Elasticsearch, including the translog and unassigned shards; the node’s operating system; any other applications or files on the node. + * Unlike `disk.indices`, this metric does not double-count disk space for hard-linked files. + * @alias 'disk.used' */ + diskUsed?: ByteSize | null + /** Free disk space available to Elasticsearch. + * Elasticsearch retrieves this metric from the node’s operating system. + * Disk-based shard allocation uses this metric to assign shards to nodes based on available disk space. */ + 'disk.avail'?: ByteSize | null + /** Free disk space available to Elasticsearch. + * Elasticsearch retrieves this metric from the node’s operating system. + * Disk-based shard allocation uses this metric to assign shards to nodes based on available disk space. + * @alias 'disk.avail' */ + da?: ByteSize | null + /** Free disk space available to Elasticsearch. + * Elasticsearch retrieves this metric from the node’s operating system. + * Disk-based shard allocation uses this metric to assign shards to nodes based on available disk space. + * @alias 'disk.avail' */ + diskAvail?: ByteSize | null + /** Total disk space for the node, including in-use and available space. */ + 'disk.total'?: ByteSize | null + /** Total disk space for the node, including in-use and available space. + * @alias 'disk.total' */ + dt?: ByteSize | null + /** Total disk space for the node, including in-use and available space. + * @alias 'disk.total' */ + diskTotal?: ByteSize | null + /** Total percentage of disk space in use. Calculated as `disk.used / disk.total`. */ + 'disk.percent'?: Percentage | null + /** Total percentage of disk space in use. Calculated as `disk.used / disk.total`. + * @alias 'disk.percent' */ + dp?: Percentage | null + /** Total percentage of disk space in use. Calculated as `disk.used / disk.total`. + * @alias 'disk.percent' */ + diskPercent?: Percentage | null + /** Network host for the node. Set using the `network.host` setting. */ + host?: Host | null + /** Network host for the node. Set using the `network.host` setting. + * @alias host */ + h?: Host | null + /** IP address and port for the node. */ + ip?: Ip | null + /** Name for the node. Set using the `node.name` setting. */ + node?: string + /** Name for the node. Set using the `node.name` setting. + * @alias node */ + n?: string + /** Node roles */ + 'node.role'?: string | null + /** Node roles + * @alias 'node.role' */ + r?: string | null + /** Node roles + * @alias 'node.role' */ + role?: string | null + /** Node roles + * @alias 'node.role' */ + nodeRole?: string | null +} + +export interface CatAllocationRequest extends CatCatRequestBase { + /** A comma-separated list of node identifiers or names used to limit the returned information. */ + node_id?: NodeIds + /** A comma-separated list of columns names to display. It supports simple wildcards. */ + h?: CatCatAllocationColumns + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ + s?: Names + /** If `true`, the request computes the list of selected nodes from the + * local cluster state. If `false` the list of selected nodes are computed + * from the cluster state of the master node. In both cases the coordinating + * node will send requests for further information to each selected node. */ + local?: boolean + /** Period to wait for a connection to the master node. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { node_id?: never, h?: never, s?: never, local?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { node_id?: never, h?: never, s?: never, local?: never, master_timeout?: never } +} + +export type CatAllocationResponse = CatAllocationAllocationRecord[] + +export interface CatComponentTemplatesComponentTemplate { + name: string + version: string | null + alias_count: string + mapping_count: string + settings_count: string + metadata_count: string + included_in: string +} + +export interface CatComponentTemplatesRequest extends CatCatRequestBase { + /** The name of the component template. + * It accepts wildcard expressions. + * If it is omitted, all component templates are returned. */ + name?: string + /** A comma-separated list of columns names to display. It supports simple wildcards. */ + h?: CatCatComponentColumns + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ + s?: Names + /** If `true`, the request computes the list of selected nodes from the + * local cluster state. If `false` the list of selected nodes are computed + * from the cluster state of the master node. In both cases the coordinating + * node will send requests for further information to each selected node. */ + local?: boolean + /** The period to wait for a connection to the master node. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, h?: never, s?: never, local?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, h?: never, s?: never, local?: never, master_timeout?: never } +} + +export type CatComponentTemplatesResponse = CatComponentTemplatesComponentTemplate[] + +export interface CatCountCountRecord { + /** seconds since 1970-01-01 00:00:00 */ + epoch?: SpecUtilsStringified> + /** seconds since 1970-01-01 00:00:00 + * @alias epoch */ + t?: SpecUtilsStringified> + /** seconds since 1970-01-01 00:00:00 + * @alias epoch */ + time?: SpecUtilsStringified> + /** time in HH:MM:SS */ + timestamp?: TimeOfDay + /** time in HH:MM:SS + * @alias timestamp */ + ts?: TimeOfDay + /** time in HH:MM:SS + * @alias timestamp */ + hms?: TimeOfDay + /** time in HH:MM:SS + * @alias timestamp */ + hhmmss?: TimeOfDay + /** the document count */ + count?: string + /** the document count + * @alias count */ + dc?: string + /** the document count + * @alias count */ + 'docs.count'?: string + /** the document count + * @alias count */ + docsCount?: string +} + +export interface CatCountRequest extends CatCatRequestBase { + /** A comma-separated list of data streams, indices, and aliases used to limit the request. + * It supports wildcards (`*`). + * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ + index?: Indices + /** A comma-separated list of columns names to display. It supports simple wildcards. */ + h?: CatCatCountColumns + /** Specifies a subset of projects to target for the search using project + * metadata tags in a subset of Lucene query syntax. + * Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). + * Examples: + * _alias:my-project + * _alias:_origin + * _alias:*pr* + * Supported in serverless only. */ + project_routing?: ProjectRouting + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ + s?: Names + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, h?: never, project_routing?: never, s?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, h?: never, project_routing?: never, s?: never } +} + +export type CatCountResponse = CatCountCountRecord[] + +export interface CatFielddataFielddataRecord { + /** node id */ + id?: string + /** host name */ + host?: string + /** host name + * @alias host */ + h?: string + /** ip address */ + ip?: string + /** node name */ + node?: string + /** node name + * @alias node */ + n?: string + /** field name */ + field?: string + /** field name + * @alias field */ + f?: string + /** field data usage */ + size?: string +} + +export interface CatFielddataRequest extends CatCatRequestBase { + /** Comma-separated list of fields used to limit returned information. + * To retrieve all fields, omit this parameter. */ + fields?: Fields + /** A comma-separated list of columns names to display. It supports simple wildcards. */ + h?: CatCatFieldDataColumns + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ + s?: Names + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { fields?: never, h?: never, s?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { fields?: never, h?: never, s?: never } +} + +export type CatFielddataResponse = CatFielddataFielddataRecord[] + +export interface CatHealthHealthRecord { + /** seconds since 1970-01-01 00:00:00 */ + epoch?: SpecUtilsStringified> + /** seconds since 1970-01-01 00:00:00 + * @alias epoch */ + time?: SpecUtilsStringified> + /** time in HH:MM:SS */ + timestamp?: TimeOfDay + /** time in HH:MM:SS + * @alias timestamp */ + ts?: TimeOfDay + /** time in HH:MM:SS + * @alias timestamp */ + hms?: TimeOfDay + /** time in HH:MM:SS + * @alias timestamp */ + hhmmss?: TimeOfDay + /** cluster name */ + cluster?: string + /** cluster name + * @alias cluster */ + cl?: string + /** health status */ + status?: string + /** health status + * @alias status */ + st?: string + /** total number of nodes */ + 'node.total'?: string + /** total number of nodes + * @alias 'node.total' */ + nt?: string + /** total number of nodes + * @alias 'node.total' */ + nodeTotal?: string + /** number of nodes that can store data */ + 'node.data'?: string + /** number of nodes that can store data + * @alias 'node.data' */ + nd?: string + /** number of nodes that can store data + * @alias 'node.data' */ + nodeData?: string + /** total number of shards */ + shards?: string + /** total number of shards + * @alias shards */ + t?: string + /** total number of shards + * @alias shards */ + sh?: string + /** total number of shards + * @alias shards */ + 'shards.total'?: string + /** total number of shards + * @alias shards */ + shardsTotal?: string + /** number of primary shards */ + pri?: string + /** number of primary shards + * @alias pri */ + p?: string + /** number of primary shards + * @alias pri */ + 'shards.primary'?: string + /** number of primary shards + * @alias pri */ + shardsPrimary?: string + /** number of relocating nodes */ + relo?: string + /** number of relocating nodes + * @alias relo */ + r?: string + /** number of relocating nodes + * @alias relo */ + 'shards.relocating'?: string + /** number of relocating nodes + * @alias relo */ + shardsRelocating?: string + /** number of initializing nodes */ + init?: string + /** number of initializing nodes + * @alias init */ + i?: string + /** number of initializing nodes + * @alias init */ + 'shards.initializing'?: string + /** number of initializing nodes + * @alias init */ + shardsInitializing?: string + /** number of unassigned primary shards */ + 'unassign.pri'?: string + /** number of unassigned primary shards + * @alias 'unassign.pri' */ + up?: string + /** number of unassigned primary shards + * @alias 'unassign.pri' */ + 'shards.unassigned.primary'?: string + /** number of unassigned primary shards + * @alias 'unassign.pri' */ + shardsUnassignedPrimary?: string + /** number of unassigned shards */ + unassign?: string + /** number of unassigned shards + * @alias unassign */ + u?: string + /** number of unassigned shards + * @alias unassign */ + 'shards.unassigned'?: string + /** number of unassigned shards + * @alias unassign */ + shardsUnassigned?: string + /** number of pending tasks */ + pending_tasks?: string + /** number of pending tasks + * @alias pending_tasks */ + pt?: string + /** number of pending tasks + * @alias pending_tasks */ + pendingTasks?: string + /** wait time of longest task pending */ + max_task_wait_time?: string + /** wait time of longest task pending + * @alias max_task_wait_time */ + mtwt?: string + /** wait time of longest task pending + * @alias max_task_wait_time */ + maxTaskWaitTime?: string + /** active number of shards in percent */ + active_shards_percent?: string + /** active number of shards in percent + * @alias active_shards_percent */ + asp?: string + /** active number of shards in percent + * @alias active_shards_percent */ + activeShardsPercent?: string +} + +export interface CatHealthRequest extends CatCatRequestBase { + /** If true, returns `HH:MM:SS` and Unix epoch timestamps. */ + ts?: boolean + /** A comma-separated list of columns names to display. It supports simple wildcards. */ + h?: CatCatHealthColumns + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ + s?: Names + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { ts?: never, h?: never, s?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { ts?: never, h?: never, s?: never } +} + +export type CatHealthResponse = CatHealthHealthRecord[] + +export interface CatHelpRequest { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } +} + +export interface CatHelpResponse { +} + +export interface CatIndicesIndicesRecord { + /** current health status */ + health?: string + /** current health status + * @alias health */ + h?: string + /** open/close status */ + status?: string + /** open/close status + * @alias status */ + s?: string + /** index name */ + index?: string + /** index name + * @alias index */ + i?: string + /** index name + * @alias index */ + idx?: string + /** index uuid */ + uuid?: string + /** index uuid + * @alias uuid */ + id?: string + /** number of primary shards */ + pri?: string + /** number of primary shards + * @alias pri */ + p?: string + /** number of primary shards + * @alias pri */ + 'shards.primary'?: string + /** number of primary shards + * @alias pri */ + shardsPrimary?: string + /** number of replica shards */ + rep?: string + /** number of replica shards + * @alias rep */ + r?: string + /** number of replica shards + * @alias rep */ + 'shards.replica'?: string + /** number of replica shards + * @alias rep */ + shardsReplica?: string + /** The number of documents in the index, including hidden nested documents. + * For indices with `semantic_text` fields or other nested field types, + * this count includes the internal nested documents. + * To get the logical document count (excluding nested documents), use + * the `_count` API or `_cat/count` API instead. */ + 'docs.count'?: string | null + /** The number of documents in the index, including hidden nested documents. + * For indices with `semantic_text` fields or other nested field types, + * this count includes the internal nested documents. + * To get the logical document count (excluding nested documents), use + * the `_count` API or `_cat/count` API instead. + * @alias 'docs.count' */ + dc?: string | null + /** The number of documents in the index, including hidden nested documents. + * For indices with `semantic_text` fields or other nested field types, + * this count includes the internal nested documents. + * To get the logical document count (excluding nested documents), use + * the `_count` API or `_cat/count` API instead. + * @alias 'docs.count' */ + docsCount?: string | null + /** deleted docs */ + 'docs.deleted'?: string | null + /** deleted docs + * @alias 'docs.deleted' */ + dd?: string | null + /** deleted docs + * @alias 'docs.deleted' */ + docsDeleted?: string | null + /** index creation date (millisecond value) */ + 'creation.date'?: string + /** index creation date (millisecond value) + * @alias 'creation.date' */ + cd?: string + /** index creation date (as string) */ + 'creation.date.string'?: string + /** index creation date (as string) + * @alias 'creation.date.string' */ + cds?: string + /** store size of primaries & replicas */ + 'store.size'?: string | null + /** store size of primaries & replicas + * @alias 'store.size' */ + ss?: string | null + /** store size of primaries & replicas + * @alias 'store.size' */ + storeSize?: string | null + /** store size of primaries */ + 'pri.store.size'?: string | null + /** total size of dataset (including the cache for partially mounted indices) */ + 'dataset.size'?: string | null + /** size of completion */ + 'completion.size'?: string + /** size of completion + * @alias 'completion.size' */ + cs?: string + /** size of completion + * @alias 'completion.size' */ + completionSize?: string + /** size of completion */ + 'pri.completion.size'?: string + /** used fielddata cache */ + 'fielddata.memory_size'?: string + /** used fielddata cache + * @alias 'fielddata.memory_size' */ + fm?: string + /** used fielddata cache + * @alias 'fielddata.memory_size' */ + fielddataMemory?: string + /** used fielddata cache */ + 'pri.fielddata.memory_size'?: string + /** fielddata evictions */ + 'fielddata.evictions'?: string + /** fielddata evictions + * @alias 'fielddata.evictions' */ + fe?: string + /** fielddata evictions + * @alias 'fielddata.evictions' */ + fielddataEvictions?: string + /** fielddata evictions */ + 'pri.fielddata.evictions'?: string + /** used query cache */ + 'query_cache.memory_size'?: string + /** used query cache + * @alias 'query_cache.memory_size' */ + qcm?: string + /** used query cache + * @alias 'query_cache.memory_size' */ + queryCacheMemory?: string + /** used query cache */ + 'pri.query_cache.memory_size'?: string + /** query cache evictions */ + 'query_cache.evictions'?: string + /** query cache evictions + * @alias 'query_cache.evictions' */ + qce?: string + /** query cache evictions + * @alias 'query_cache.evictions' */ + queryCacheEvictions?: string + /** query cache evictions */ + 'pri.query_cache.evictions'?: string + /** used request cache */ + 'request_cache.memory_size'?: string + /** used request cache + * @alias 'request_cache.memory_size' */ + rcm?: string + /** used request cache + * @alias 'request_cache.memory_size' */ + requestCacheMemory?: string + /** used request cache */ + 'pri.request_cache.memory_size'?: string + /** request cache evictions */ + 'request_cache.evictions'?: string + /** request cache evictions + * @alias 'request_cache.evictions' */ + rce?: string + /** request cache evictions + * @alias 'request_cache.evictions' */ + requestCacheEvictions?: string + /** request cache evictions */ + 'pri.request_cache.evictions'?: string + /** request cache hit count */ + 'request_cache.hit_count'?: string + /** request cache hit count + * @alias 'request_cache.hit_count' */ + rchc?: string + /** request cache hit count + * @alias 'request_cache.hit_count' */ + requestCacheHitCount?: string + /** request cache hit count */ + 'pri.request_cache.hit_count'?: string + /** request cache miss count */ + 'request_cache.miss_count'?: string + /** request cache miss count + * @alias 'request_cache.miss_count' */ + rcmc?: string + /** request cache miss count + * @alias 'request_cache.miss_count' */ + requestCacheMissCount?: string + /** request cache miss count */ + 'pri.request_cache.miss_count'?: string + /** number of flushes */ + 'flush.total'?: string + /** number of flushes + * @alias 'flush.total' */ + ft?: string + /** number of flushes + * @alias 'flush.total' */ + flushTotal?: string + /** number of flushes */ + 'pri.flush.total'?: string + /** time spent in flush */ + 'flush.total_time'?: string + /** time spent in flush + * @alias 'flush.total_time' */ + ftt?: string + /** time spent in flush + * @alias 'flush.total_time' */ + flushTotalTime?: string + /** time spent in flush */ + 'pri.flush.total_time'?: string + /** number of current get ops */ + 'get.current'?: string + /** number of current get ops + * @alias 'get.current' */ + gc?: string + /** number of current get ops + * @alias 'get.current' */ + getCurrent?: string + /** number of current get ops */ + 'pri.get.current'?: string + /** time spent in get */ + 'get.time'?: string + /** time spent in get + * @alias 'get.time' */ + gti?: string + /** time spent in get + * @alias 'get.time' */ + getTime?: string + /** time spent in get */ + 'pri.get.time'?: string + /** number of get ops */ + 'get.total'?: string + /** number of get ops + * @alias 'get.total' */ + gto?: string + /** number of get ops + * @alias 'get.total' */ + getTotal?: string + /** number of get ops */ + 'pri.get.total'?: string + /** time spent in successful gets */ + 'get.exists_time'?: string + /** time spent in successful gets + * @alias 'get.exists_time' */ + geti?: string + /** time spent in successful gets + * @alias 'get.exists_time' */ + getExistsTime?: string + /** time spent in successful gets */ + 'pri.get.exists_time'?: string + /** number of successful gets */ + 'get.exists_total'?: string + /** number of successful gets + * @alias 'get.exists_total' */ + geto?: string + /** number of successful gets + * @alias 'get.exists_total' */ + getExistsTotal?: string + /** number of successful gets */ + 'pri.get.exists_total'?: string + /** time spent in failed gets */ + 'get.missing_time'?: string + /** time spent in failed gets + * @alias 'get.missing_time' */ + gmti?: string + /** time spent in failed gets + * @alias 'get.missing_time' */ + getMissingTime?: string + /** time spent in failed gets */ + 'pri.get.missing_time'?: string + /** number of failed gets */ + 'get.missing_total'?: string + /** number of failed gets + * @alias 'get.missing_total' */ + gmto?: string + /** number of failed gets + * @alias 'get.missing_total' */ + getMissingTotal?: string + /** number of failed gets */ + 'pri.get.missing_total'?: string + /** number of current deletions */ + 'indexing.delete_current'?: string + /** number of current deletions + * @alias 'indexing.delete_current' */ + idc?: string + /** number of current deletions + * @alias 'indexing.delete_current' */ + indexingDeleteCurrent?: string + /** number of current deletions */ + 'pri.indexing.delete_current'?: string + /** time spent in deletions */ + 'indexing.delete_time'?: string + /** time spent in deletions + * @alias 'indexing.delete_time' */ + idti?: string + /** time spent in deletions + * @alias 'indexing.delete_time' */ + indexingDeleteTime?: string + /** time spent in deletions */ + 'pri.indexing.delete_time'?: string + /** number of delete ops */ + 'indexing.delete_total'?: string + /** number of delete ops + * @alias 'indexing.delete_total' */ + idto?: string + /** number of delete ops + * @alias 'indexing.delete_total' */ + indexingDeleteTotal?: string + /** number of delete ops */ + 'pri.indexing.delete_total'?: string + /** number of current indexing ops */ + 'indexing.index_current'?: string + /** number of current indexing ops + * @alias 'indexing.index_current' */ + iic?: string + /** number of current indexing ops + * @alias 'indexing.index_current' */ + indexingIndexCurrent?: string + /** number of current indexing ops */ + 'pri.indexing.index_current'?: string + /** time spent in indexing */ + 'indexing.index_time'?: string + /** time spent in indexing + * @alias 'indexing.index_time' */ + iiti?: string + /** time spent in indexing + * @alias 'indexing.index_time' */ + indexingIndexTime?: string + /** time spent in indexing */ + 'pri.indexing.index_time'?: string + /** number of indexing ops */ + 'indexing.index_total'?: string + /** number of indexing ops + * @alias 'indexing.index_total' */ + iito?: string + /** number of indexing ops + * @alias 'indexing.index_total' */ + indexingIndexTotal?: string + /** number of indexing ops */ + 'pri.indexing.index_total'?: string + /** number of failed indexing ops */ + 'indexing.index_failed'?: string + /** number of failed indexing ops + * @alias 'indexing.index_failed' */ + iif?: string + /** number of failed indexing ops + * @alias 'indexing.index_failed' */ + indexingIndexFailed?: string + /** number of failed indexing ops */ + 'pri.indexing.index_failed'?: string + /** number of current merges */ + 'merges.current'?: string + /** number of current merges + * @alias 'merges.current' */ + mc?: string + /** number of current merges + * @alias 'merges.current' */ + mergesCurrent?: string + /** number of current merges */ + 'pri.merges.current'?: string + /** number of current merging docs */ + 'merges.current_docs'?: string + /** number of current merging docs + * @alias 'merges.current_docs' */ + mcd?: string + /** number of current merging docs + * @alias 'merges.current_docs' */ + mergesCurrentDocs?: string + /** number of current merging docs */ + 'pri.merges.current_docs'?: string + /** size of current merges */ + 'merges.current_size'?: string + /** size of current merges + * @alias 'merges.current_size' */ + mcs?: string + /** size of current merges + * @alias 'merges.current_size' */ + mergesCurrentSize?: string + /** size of current merges */ + 'pri.merges.current_size'?: string + /** number of completed merge ops */ + 'merges.total'?: string + /** number of completed merge ops + * @alias 'merges.total' */ + mt?: string + /** number of completed merge ops + * @alias 'merges.total' */ + mergesTotal?: string + /** number of completed merge ops */ + 'pri.merges.total'?: string + /** docs merged */ + 'merges.total_docs'?: string + /** docs merged + * @alias 'merges.total_docs' */ + mtd?: string + /** docs merged + * @alias 'merges.total_docs' */ + mergesTotalDocs?: string + /** docs merged */ + 'pri.merges.total_docs'?: string + /** size merged */ + 'merges.total_size'?: string + /** size merged + * @alias 'merges.total_size' */ + mts?: string + /** size merged + * @alias 'merges.total_size' */ + mergesTotalSize?: string + /** size merged */ + 'pri.merges.total_size'?: string + /** time spent in merges */ + 'merges.total_time'?: string + /** time spent in merges + * @alias 'merges.total_time' */ + mtt?: string + /** time spent in merges + * @alias 'merges.total_time' */ + mergesTotalTime?: string + /** time spent in merges */ + 'pri.merges.total_time'?: string + /** total refreshes */ + 'refresh.total'?: string + /** total refreshes + * @alias 'refresh.total' */ + rto?: string + /** total refreshes + * @alias 'refresh.total' */ + refreshTotal?: string + /** total refreshes */ + 'pri.refresh.total'?: string + /** time spent in refreshes */ + 'refresh.time'?: string + /** time spent in refreshes + * @alias 'refresh.time' */ + rti?: string + /** time spent in refreshes + * @alias 'refresh.time' */ + refreshTime?: string + /** time spent in refreshes */ + 'pri.refresh.time'?: string + /** total external refreshes */ + 'refresh.external_total'?: string + /** total external refreshes + * @alias 'refresh.external_total' */ + reto?: string + /** total external refreshes */ + 'pri.refresh.external_total'?: string + /** time spent in external refreshes */ + 'refresh.external_time'?: string + /** time spent in external refreshes + * @alias 'refresh.external_time' */ + reti?: string + /** time spent in external refreshes */ + 'pri.refresh.external_time'?: string + /** number of pending refresh listeners */ + 'refresh.listeners'?: string + /** number of pending refresh listeners + * @alias 'refresh.listeners' */ + rli?: string + /** number of pending refresh listeners + * @alias 'refresh.listeners' */ + refreshListeners?: string + /** number of pending refresh listeners */ + 'pri.refresh.listeners'?: string + /** current fetch phase ops */ + 'search.fetch_current'?: string + /** current fetch phase ops + * @alias 'search.fetch_current' */ + sfc?: string + /** current fetch phase ops + * @alias 'search.fetch_current' */ + searchFetchCurrent?: string + /** current fetch phase ops */ + 'pri.search.fetch_current'?: string + /** time spent in fetch phase */ + 'search.fetch_time'?: string + /** time spent in fetch phase + * @alias 'search.fetch_time' */ + sfti?: string + /** time spent in fetch phase + * @alias 'search.fetch_time' */ + searchFetchTime?: string + /** time spent in fetch phase */ + 'pri.search.fetch_time'?: string + /** total fetch ops */ + 'search.fetch_total'?: string + /** total fetch ops + * @alias 'search.fetch_total' */ + sfto?: string + /** total fetch ops + * @alias 'search.fetch_total' */ + searchFetchTotal?: string + /** total fetch ops */ + 'pri.search.fetch_total'?: string + /** open search contexts */ + 'search.open_contexts'?: string + /** open search contexts + * @alias 'search.open_contexts' */ + so?: string + /** open search contexts + * @alias 'search.open_contexts' */ + searchOpenContexts?: string + /** open search contexts */ + 'pri.search.open_contexts'?: string + /** current query phase ops */ + 'search.query_current'?: string + /** current query phase ops + * @alias 'search.query_current' */ + sqc?: string + /** current query phase ops + * @alias 'search.query_current' */ + searchQueryCurrent?: string + /** current query phase ops */ + 'pri.search.query_current'?: string + /** time spent in query phase */ + 'search.query_time'?: string + /** time spent in query phase + * @alias 'search.query_time' */ + sqti?: string + /** time spent in query phase + * @alias 'search.query_time' */ + searchQueryTime?: string + /** time spent in query phase */ + 'pri.search.query_time'?: string + /** total query phase ops */ + 'search.query_total'?: string + /** total query phase ops + * @alias 'search.query_total' */ + sqto?: string + /** total query phase ops + * @alias 'search.query_total' */ + searchQueryTotal?: string + /** total query phase ops */ + 'pri.search.query_total'?: string + /** open scroll contexts */ + 'search.scroll_current'?: string + /** open scroll contexts + * @alias 'search.scroll_current' */ + scc?: string + /** open scroll contexts + * @alias 'search.scroll_current' */ + searchScrollCurrent?: string + /** open scroll contexts */ + 'pri.search.scroll_current'?: string + /** time scroll contexts held open */ + 'search.scroll_time'?: string + /** time scroll contexts held open + * @alias 'search.scroll_time' */ + scti?: string + /** time scroll contexts held open + * @alias 'search.scroll_time' */ + searchScrollTime?: string + /** time scroll contexts held open */ + 'pri.search.scroll_time'?: string + /** completed scroll contexts */ + 'search.scroll_total'?: string + /** completed scroll contexts + * @alias 'search.scroll_total' */ + scto?: string + /** completed scroll contexts + * @alias 'search.scroll_total' */ + searchScrollTotal?: string + /** completed scroll contexts */ + 'pri.search.scroll_total'?: string + /** number of segments */ + 'segments.count'?: string + /** number of segments + * @alias 'segments.count' */ + sc?: string + /** number of segments + * @alias 'segments.count' */ + segmentsCount?: string + /** number of segments */ + 'pri.segments.count'?: string + /** memory used by segments */ + 'segments.memory'?: string + /** memory used by segments + * @alias 'segments.memory' */ + sm?: string + /** memory used by segments + * @alias 'segments.memory' */ + segmentsMemory?: string + /** memory used by segments */ + 'pri.segments.memory'?: string + /** memory used by index writer */ + 'segments.index_writer_memory'?: string + /** memory used by index writer + * @alias 'segments.index_writer_memory' */ + siwm?: string + /** memory used by index writer + * @alias 'segments.index_writer_memory' */ + segmentsIndexWriterMemory?: string + /** memory used by index writer */ + 'pri.segments.index_writer_memory'?: string + /** memory used by version map */ + 'segments.version_map_memory'?: string + /** memory used by version map + * @alias 'segments.version_map_memory' */ + svmm?: string + /** memory used by version map + * @alias 'segments.version_map_memory' */ + segmentsVersionMapMemory?: string + /** memory used by version map */ + 'pri.segments.version_map_memory'?: string + /** memory used by fixed bit sets for nested object field types and export type filters for types referred in _parent fields */ + 'segments.fixed_bitset_memory'?: string + /** memory used by fixed bit sets for nested object field types and export type filters for types referred in _parent fields + * @alias 'segments.fixed_bitset_memory' */ + sfbm?: string + /** memory used by fixed bit sets for nested object field types and export type filters for types referred in _parent fields + * @alias 'segments.fixed_bitset_memory' */ + fixedBitsetMemory?: string + /** memory used by fixed bit sets for nested object field types and export type filters for types referred in _parent fields */ + 'pri.segments.fixed_bitset_memory'?: string + /** current warmer ops */ + 'warmer.current'?: string + /** current warmer ops + * @alias 'warmer.current' */ + wc?: string + /** current warmer ops + * @alias 'warmer.current' */ + warmerCurrent?: string + /** current warmer ops */ + 'pri.warmer.current'?: string + /** total warmer ops */ + 'warmer.total'?: string + /** total warmer ops + * @alias 'warmer.total' */ + wto?: string + /** total warmer ops + * @alias 'warmer.total' */ + warmerTotal?: string + /** total warmer ops */ + 'pri.warmer.total'?: string + /** time spent in warmers */ + 'warmer.total_time'?: string + /** time spent in warmers + * @alias 'warmer.total_time' */ + wtt?: string + /** time spent in warmers + * @alias 'warmer.total_time' */ + warmerTotalTime?: string + /** time spent in warmers */ + 'pri.warmer.total_time'?: string + /** number of current suggest ops */ + 'suggest.current'?: string + /** number of current suggest ops + * @alias 'suggest.current' */ + suc?: string + /** number of current suggest ops + * @alias 'suggest.current' */ + suggestCurrent?: string + /** number of current suggest ops */ + 'pri.suggest.current'?: string + /** time spend in suggest */ + 'suggest.time'?: string + /** time spend in suggest + * @alias 'suggest.time' */ + suti?: string + /** time spend in suggest + * @alias 'suggest.time' */ + suggestTime?: string + /** time spend in suggest */ + 'pri.suggest.time'?: string + /** number of suggest ops */ + 'suggest.total'?: string + /** number of suggest ops + * @alias 'suggest.total' */ + suto?: string + /** number of suggest ops + * @alias 'suggest.total' */ + suggestTotal?: string + /** number of suggest ops */ + 'pri.suggest.total'?: string + /** total used memory */ + 'memory.total'?: string + /** total used memory + * @alias 'memory.total' */ + tm?: string + /** total used memory + * @alias 'memory.total' */ + memoryTotal?: string + /** total user memory */ + 'pri.memory.total'?: string + /** indicates if the index is search throttled */ + 'search.throttled'?: string + /** indicates if the index is search throttled + * @alias 'search.throttled' */ + sth?: string + /** number of bulk shard ops */ + 'bulk.total_operations'?: string + /** number of bulk shard ops + * @alias 'bulk.total_operations' */ + bto?: string + /** number of bulk shard ops + * @alias 'bulk.total_operations' */ + bulkTotalOperation?: string + /** number of bulk shard ops */ + 'pri.bulk.total_operations'?: string + /** time spend in shard bulk */ + 'bulk.total_time'?: string + /** time spend in shard bulk + * @alias 'bulk.total_time' */ + btti?: string + /** time spend in shard bulk + * @alias 'bulk.total_time' */ + bulkTotalTime?: string + /** time spend in shard bulk */ + 'pri.bulk.total_time'?: string + /** total size in bytes of shard bulk */ + 'bulk.total_size_in_bytes'?: string + /** total size in bytes of shard bulk + * @alias 'bulk.total_size_in_bytes' */ + btsi?: string + /** total size in bytes of shard bulk + * @alias 'bulk.total_size_in_bytes' */ + bulkTotalSizeInBytes?: string + /** total size in bytes of shard bulk */ + 'pri.bulk.total_size_in_bytes'?: string + /** average time spend in shard bulk */ + 'bulk.avg_time'?: string + /** average time spend in shard bulk + * @alias 'bulk.avg_time' */ + bati?: string + /** average time spend in shard bulk + * @alias 'bulk.avg_time' */ + bulkAvgTime?: string + /** average time spend in shard bulk */ + 'pri.bulk.avg_time'?: string + /** average size in bytes of shard bulk */ + 'bulk.avg_size_in_bytes'?: string + /** average size in bytes of shard bulk + * @alias 'bulk.avg_size_in_bytes' */ + basi?: string + /** average size in bytes of shard bulk + * @alias 'bulk.avg_size_in_bytes' */ + bulkAvgSizeInBytes?: string + /** average size in bytes of shard bulk */ + 'pri.bulk.avg_size_in_bytes'?: string +} + +export interface CatIndicesRequest extends CatCatRequestBase { + /** Comma-separated list of data streams, indices, and aliases used to limit the request. + * Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ + index?: Indices + /** The type of index that wildcard patterns can match. */ + expand_wildcards?: ExpandWildcards + /** The health status used to limit returned indices. By default, the response includes indices of any health status. */ + health?: HealthStatus + /** If true, the response includes information from segments that are not loaded into memory. */ + include_unloaded_segments?: boolean + /** If true, the response only includes information from primary shards. */ + pri?: boolean + /** Period to wait for a connection to the master node. */ + master_timeout?: Duration + /** A comma-separated list of columns names to display. It supports simple wildcards. */ + h?: CatCatIndicesColumns + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ + s?: Names + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, expand_wildcards?: never, health?: never, include_unloaded_segments?: never, pri?: never, master_timeout?: never, h?: never, s?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, expand_wildcards?: never, health?: never, include_unloaded_segments?: never, pri?: never, master_timeout?: never, h?: never, s?: never } +} + +export type CatIndicesResponse = CatIndicesIndicesRecord[] + +export interface CatMasterMasterRecord { + /** node id */ + id?: string + /** host name */ + host?: string + /** host name + * @alias host */ + h?: string + /** ip address */ + ip?: string + /** node name */ + node?: string + /** node name + * @alias node */ + n?: string +} + +export interface CatMasterRequest extends CatCatRequestBase { + /** A comma-separated list of columns names to display. It supports simple wildcards. */ + h?: CatCatMasterColumns + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ + s?: Names + /** If `true`, the request computes the list of selected nodes from the + * local cluster state. If `false` the list of selected nodes are computed + * from the cluster state of the master node. In both cases the coordinating + * node will send requests for further information to each selected node. */ + local?: boolean + /** Period to wait for a connection to the master node. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { h?: never, s?: never, local?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { h?: never, s?: never, local?: never, master_timeout?: never } +} + +export type CatMasterResponse = CatMasterMasterRecord[] + +export interface CatMlDataFrameAnalyticsDataFrameAnalyticsRecord { + /** The identifier for the job. */ + id?: Id + /** The type of analysis that the job performs. */ + type?: string + /** The type of analysis that the job performs. + * @alias type */ + t?: string + /** The time when the job was created. */ + create_time?: string + /** The time when the job was created. + * @alias create_time */ + ct?: string + /** The time when the job was created. + * @alias create_time */ + createTime?: string + /** The version of Elasticsearch when the job was created. */ + version?: VersionString + /** The version of Elasticsearch when the job was created. + * @alias version */ + v?: VersionString + /** The name of the source index. */ + source_index?: IndexName + /** The name of the source index. + * @alias source_index */ + si?: IndexName + /** The name of the source index. + * @alias source_index */ + sourceIndex?: IndexName + /** The name of the destination index. */ + dest_index?: IndexName + /** The name of the destination index. + * @alias dest_index */ + di?: IndexName + /** The name of the destination index. + * @alias dest_index */ + destIndex?: IndexName + /** A description of the job. */ + description?: string + /** A description of the job. + * @alias description */ + d?: string + /** The approximate maximum amount of memory resources that are permitted for the job. */ + model_memory_limit?: string + /** The approximate maximum amount of memory resources that are permitted for the job. + * @alias model_memory_limit */ + mml?: string + /** The approximate maximum amount of memory resources that are permitted for the job. + * @alias model_memory_limit */ + modelMemoryLimit?: string + /** The current status of the job. */ + state?: string + /** The current status of the job. + * @alias state */ + s?: string + /** Messages about the reason why the job failed. */ + failure_reason?: string + /** Messages about the reason why the job failed. + * @alias failure_reason */ + fr?: string + /** Messages about the reason why the job failed. + * @alias failure_reason */ + failureReason?: string + /** The progress report for the job by phase. */ + progress?: string + /** The progress report for the job by phase. + * @alias progress */ + p?: string + /** Messages related to the selection of a node. */ + assignment_explanation?: string + /** Messages related to the selection of a node. + * @alias assignment_explanation */ + ae?: string + /** Messages related to the selection of a node. + * @alias assignment_explanation */ + assignmentExplanation?: string + /** The unique identifier of the assigned node. */ + 'node.id'?: Id + /** The unique identifier of the assigned node. + * @alias 'node.id' */ + ni?: Id + /** The unique identifier of the assigned node. + * @alias 'node.id' */ + nodeId?: Id + /** The name of the assigned node. */ + 'node.name'?: Name + /** The name of the assigned node. + * @alias 'node.name' */ + nn?: Name + /** The name of the assigned node. + * @alias 'node.name' */ + nodeName?: Name + /** The ephemeral identifier of the assigned node. */ + 'node.ephemeral_id'?: Id + /** The ephemeral identifier of the assigned node. + * @alias 'node.ephemeral_id' */ + ne?: Id + /** The ephemeral identifier of the assigned node. + * @alias 'node.ephemeral_id' */ + nodeEphemeralId?: Id + /** The network address of the assigned node. */ + 'node.address'?: string + /** The network address of the assigned node. + * @alias 'node.address' */ + na?: string + /** The network address of the assigned node. + * @alias 'node.address' */ + nodeAddress?: string +} + +export interface CatMlDataFrameAnalyticsRequest extends CatCatRequestBase { + /** The ID of the data frame analytics to fetch */ + id?: Id + /** Whether to ignore if a wildcard expression matches no configs. + * (This includes `_all` string or when no configs have been specified.) */ + allow_no_match?: boolean + /** Comma-separated list of column names to display. */ + h?: CatCatDfaColumns + /** Comma-separated list of column names or column aliases used to sort the + * response. */ + s?: CatCatDfaColumns + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, allow_no_match?: never, h?: never, s?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, allow_no_match?: never, h?: never, s?: never } +} + +export type CatMlDataFrameAnalyticsResponse = CatMlDataFrameAnalyticsDataFrameAnalyticsRecord[] + +export interface CatMlDatafeedsDatafeedsRecord { + /** The datafeed identifier. */ + id?: string + /** The status of the datafeed. */ + state?: MlDatafeedState + /** The status of the datafeed. + * @alias state */ + s?: MlDatafeedState + /** For started datafeeds only, contains messages relating to the selection of a node. */ + assignment_explanation?: string + /** For started datafeeds only, contains messages relating to the selection of a node. + * @alias assignment_explanation */ + ae?: string + /** The number of buckets processed. */ + 'buckets.count'?: string + /** The number of buckets processed. + * @alias 'buckets.count' */ + bc?: string + /** The number of buckets processed. + * @alias 'buckets.count' */ + bucketsCount?: string + /** The number of searches run by the datafeed. */ + 'search.count'?: string + /** The number of searches run by the datafeed. + * @alias 'search.count' */ + sc?: string + /** The number of searches run by the datafeed. + * @alias 'search.count' */ + searchCount?: string + /** The total time the datafeed spent searching, in milliseconds. */ + 'search.time'?: string + /** The total time the datafeed spent searching, in milliseconds. + * @alias 'search.time' */ + st?: string + /** The total time the datafeed spent searching, in milliseconds. + * @alias 'search.time' */ + searchTime?: string + /** The average search time per bucket, in milliseconds. */ + 'search.bucket_avg'?: string + /** The average search time per bucket, in milliseconds. + * @alias 'search.bucket_avg' */ + sba?: string + /** The average search time per bucket, in milliseconds. + * @alias 'search.bucket_avg' */ + searchBucketAvg?: string + /** The exponential average search time per hour, in milliseconds. */ + 'search.exp_avg_hour'?: string + /** The exponential average search time per hour, in milliseconds. + * @alias 'search.exp_avg_hour' */ + seah?: string + /** The exponential average search time per hour, in milliseconds. + * @alias 'search.exp_avg_hour' */ + searchExpAvgHour?: string + /** The unique identifier of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. */ + 'node.id'?: string + /** The unique identifier of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. + * @alias 'node.id' */ + ni?: string + /** The unique identifier of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. + * @alias 'node.id' */ + nodeId?: string + /** The name of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. */ + 'node.name'?: string + /** The name of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. + * @alias 'node.name' */ + nn?: string + /** The name of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. + * @alias 'node.name' */ + nodeName?: string + /** The ephemeral identifier of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. */ + 'node.ephemeral_id'?: string + /** The ephemeral identifier of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. + * @alias 'node.ephemeral_id' */ + ne?: string + /** The ephemeral identifier of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. + * @alias 'node.ephemeral_id' */ + nodeEphemeralId?: string + /** The network address of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. */ + 'node.address'?: string + /** The network address of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. + * @alias 'node.address' */ + na?: string + /** The network address of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. + * @alias 'node.address' */ + nodeAddress?: string +} + +export interface CatMlDatafeedsRequest extends CatCatRequestBase { + /** A numerical character string that uniquely identifies the datafeed. */ + datafeed_id?: Id + /** Specifies what to do when the request: + * + * * Contains wildcard expressions and there are no datafeeds that match. + * * Contains the `_all` string or no identifiers and there are no matches. + * * Contains wildcard expressions and there are only partial matches. + * + * If `true`, the API returns an empty datafeeds array when there are no matches and the subset of results when + * there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only + * partial matches. */ + allow_no_match?: boolean + /** Comma-separated list of column names to display. */ + h?: CatCatDatafeedColumns + /** Comma-separated list of column names or column aliases used to sort the response. */ + s?: CatCatDatafeedColumns + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { datafeed_id?: never, allow_no_match?: never, h?: never, s?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { datafeed_id?: never, allow_no_match?: never, h?: never, s?: never } +} + +export type CatMlDatafeedsResponse = CatMlDatafeedsDatafeedsRecord[] + +export interface CatMlJobsJobsRecord { + /** The anomaly detection job identifier. */ + id?: Id + /** The status of the anomaly detection job. */ + state?: MlJobState + /** The status of the anomaly detection job. + * @alias state */ + s?: MlJobState + /** For open jobs only, the amount of time the job has been opened. */ + opened_time?: string + /** For open jobs only, the amount of time the job has been opened. + * @alias opened_time */ + ot?: string + /** For open anomaly detection jobs only, contains messages relating to the selection of a node to run the job. */ + assignment_explanation?: string + /** For open anomaly detection jobs only, contains messages relating to the selection of a node to run the job. + * @alias assignment_explanation */ + ae?: string + /** The number of input documents that have been processed by the anomaly detection job. + * This value includes documents with missing fields, since they are nonetheless analyzed. + * If you use datafeeds and have aggregations in your search query, the `processed_record_count` is the number of aggregation results processed, not the number of Elasticsearch documents. */ + 'data.processed_records'?: string + /** The number of input documents that have been processed by the anomaly detection job. + * This value includes documents with missing fields, since they are nonetheless analyzed. + * If you use datafeeds and have aggregations in your search query, the `processed_record_count` is the number of aggregation results processed, not the number of Elasticsearch documents. + * @alias 'data.processed_records' */ + dpr?: string + /** The number of input documents that have been processed by the anomaly detection job. + * This value includes documents with missing fields, since they are nonetheless analyzed. + * If you use datafeeds and have aggregations in your search query, the `processed_record_count` is the number of aggregation results processed, not the number of Elasticsearch documents. + * @alias 'data.processed_records' */ + dataProcessedRecords?: string + /** The total number of fields in all the documents that have been processed by the anomaly detection job. + * Only fields that are specified in the detector configuration object contribute to this count. + * The timestamp is not included in this count. */ + 'data.processed_fields'?: string + /** The total number of fields in all the documents that have been processed by the anomaly detection job. + * Only fields that are specified in the detector configuration object contribute to this count. + * The timestamp is not included in this count. + * @alias 'data.processed_fields' */ + dpf?: string + /** The total number of fields in all the documents that have been processed by the anomaly detection job. + * Only fields that are specified in the detector configuration object contribute to this count. + * The timestamp is not included in this count. + * @alias 'data.processed_fields' */ + dataProcessedFields?: string + /** The number of bytes of input data posted to the anomaly detection job. */ + 'data.input_bytes'?: ByteSize + /** The number of bytes of input data posted to the anomaly detection job. + * @alias 'data.input_bytes' */ + dib?: ByteSize + /** The number of bytes of input data posted to the anomaly detection job. + * @alias 'data.input_bytes' */ + dataInputBytes?: ByteSize + /** The number of input documents posted to the anomaly detection job. */ + 'data.input_records'?: string + /** The number of input documents posted to the anomaly detection job. + * @alias 'data.input_records' */ + dir?: string + /** The number of input documents posted to the anomaly detection job. + * @alias 'data.input_records' */ + dataInputRecords?: string + /** The total number of fields in input documents posted to the anomaly detection job. + * This count includes fields that are not used in the analysis. + * However, be aware that if you are using a datafeed, it extracts only the required fields from the documents it retrieves before posting them to the job. */ + 'data.input_fields'?: string + /** The total number of fields in input documents posted to the anomaly detection job. + * This count includes fields that are not used in the analysis. + * However, be aware that if you are using a datafeed, it extracts only the required fields from the documents it retrieves before posting them to the job. + * @alias 'data.input_fields' */ + dif?: string + /** The total number of fields in input documents posted to the anomaly detection job. + * This count includes fields that are not used in the analysis. + * However, be aware that if you are using a datafeed, it extracts only the required fields from the documents it retrieves before posting them to the job. + * @alias 'data.input_fields' */ + dataInputFields?: string + /** The number of input documents with either a missing date field or a date that could not be parsed. */ + 'data.invalid_dates'?: string + /** The number of input documents with either a missing date field or a date that could not be parsed. + * @alias 'data.invalid_dates' */ + did?: string + /** The number of input documents with either a missing date field or a date that could not be parsed. + * @alias 'data.invalid_dates' */ + dataInvalidDates?: string + /** The number of input documents that are missing a field that the anomaly detection job is configured to analyze. + * Input documents with missing fields are still processed because it is possible that not all fields are missing. + * If you are using datafeeds or posting data to the job in JSON format, a high `missing_field_count` is often not an indication of data issues. + * It is not necessarily a cause for concern. */ + 'data.missing_fields'?: string + /** The number of input documents that are missing a field that the anomaly detection job is configured to analyze. + * Input documents with missing fields are still processed because it is possible that not all fields are missing. + * If you are using datafeeds or posting data to the job in JSON format, a high `missing_field_count` is often not an indication of data issues. + * It is not necessarily a cause for concern. + * @alias 'data.missing_fields' */ + dmf?: string + /** The number of input documents that are missing a field that the anomaly detection job is configured to analyze. + * Input documents with missing fields are still processed because it is possible that not all fields are missing. + * If you are using datafeeds or posting data to the job in JSON format, a high `missing_field_count` is often not an indication of data issues. + * It is not necessarily a cause for concern. + * @alias 'data.missing_fields' */ + dataMissingFields?: string + /** The number of input documents that have a timestamp chronologically preceding the start of the current anomaly detection bucket offset by the latency window. + * This information is applicable only when you provide data to the anomaly detection job by using the post data API. + * These out of order documents are discarded, since jobs require time series data to be in ascending chronological order. */ + 'data.out_of_order_timestamps'?: string + /** The number of input documents that have a timestamp chronologically preceding the start of the current anomaly detection bucket offset by the latency window. + * This information is applicable only when you provide data to the anomaly detection job by using the post data API. + * These out of order documents are discarded, since jobs require time series data to be in ascending chronological order. + * @alias 'data.out_of_order_timestamps' */ + doot?: string + /** The number of input documents that have a timestamp chronologically preceding the start of the current anomaly detection bucket offset by the latency window. + * This information is applicable only when you provide data to the anomaly detection job by using the post data API. + * These out of order documents are discarded, since jobs require time series data to be in ascending chronological order. + * @alias 'data.out_of_order_timestamps' */ + dataOutOfOrderTimestamps?: string + /** The number of buckets which did not contain any data. + * If your data contains many empty buckets, consider increasing your `bucket_span` or using functions that are tolerant to gaps in data such as mean, `non_null_sum` or `non_zero_count`. */ + 'data.empty_buckets'?: string + /** The number of buckets which did not contain any data. + * If your data contains many empty buckets, consider increasing your `bucket_span` or using functions that are tolerant to gaps in data such as mean, `non_null_sum` or `non_zero_count`. + * @alias 'data.empty_buckets' */ + deb?: string + /** The number of buckets which did not contain any data. + * If your data contains many empty buckets, consider increasing your `bucket_span` or using functions that are tolerant to gaps in data such as mean, `non_null_sum` or `non_zero_count`. + * @alias 'data.empty_buckets' */ + dataEmptyBuckets?: string + /** The number of buckets that contained few data points compared to the expected number of data points. + * If your data contains many sparse buckets, consider using a longer `bucket_span`. */ + 'data.sparse_buckets'?: string + /** The number of buckets that contained few data points compared to the expected number of data points. + * If your data contains many sparse buckets, consider using a longer `bucket_span`. + * @alias 'data.sparse_buckets' */ + dsb?: string + /** The number of buckets that contained few data points compared to the expected number of data points. + * If your data contains many sparse buckets, consider using a longer `bucket_span`. + * @alias 'data.sparse_buckets' */ + dataSparseBuckets?: string + /** The total number of buckets processed. */ + 'data.buckets'?: string + /** The total number of buckets processed. + * @alias 'data.buckets' */ + db?: string + /** The total number of buckets processed. + * @alias 'data.buckets' */ + dataBuckets?: string + /** The timestamp of the earliest chronologically input document. */ + 'data.earliest_record'?: string + /** The timestamp of the earliest chronologically input document. + * @alias 'data.earliest_record' */ + der?: string + /** The timestamp of the earliest chronologically input document. + * @alias 'data.earliest_record' */ + dataEarliestRecord?: string + /** The timestamp of the latest chronologically input document. */ + 'data.latest_record'?: string + /** The timestamp of the latest chronologically input document. + * @alias 'data.latest_record' */ + dlr?: string + /** The timestamp of the latest chronologically input document. + * @alias 'data.latest_record' */ + dataLatestRecord?: string + /** The timestamp at which data was last analyzed, according to server time. */ + 'data.last'?: string + /** The timestamp at which data was last analyzed, according to server time. + * @alias 'data.last' */ + dl?: string + /** The timestamp at which data was last analyzed, according to server time. + * @alias 'data.last' */ + dataLast?: string + /** The timestamp of the last bucket that did not contain any data. */ + 'data.last_empty_bucket'?: string + /** The timestamp of the last bucket that did not contain any data. + * @alias 'data.last_empty_bucket' */ + dleb?: string + /** The timestamp of the last bucket that did not contain any data. + * @alias 'data.last_empty_bucket' */ + dataLastEmptyBucket?: string + /** The timestamp of the last bucket that was considered sparse. */ + 'data.last_sparse_bucket'?: string + /** The timestamp of the last bucket that was considered sparse. + * @alias 'data.last_sparse_bucket' */ + dlsb?: string + /** The timestamp of the last bucket that was considered sparse. + * @alias 'data.last_sparse_bucket' */ + dataLastSparseBucket?: string + /** The number of bytes of memory used by the models. + * This is the maximum value since the last time the model was persisted. + * If the job is closed, this value indicates the latest size. */ + 'model.bytes'?: ByteSize + /** The number of bytes of memory used by the models. + * This is the maximum value since the last time the model was persisted. + * If the job is closed, this value indicates the latest size. + * @alias 'model.bytes' */ + mb?: ByteSize + /** The number of bytes of memory used by the models. + * This is the maximum value since the last time the model was persisted. + * If the job is closed, this value indicates the latest size. + * @alias 'model.bytes' */ + modelBytes?: ByteSize + /** The status of the mathematical models. */ + 'model.memory_status'?: MlMemoryStatus + /** The status of the mathematical models. + * @alias 'model.memory_status' */ + mms?: MlMemoryStatus + /** The status of the mathematical models. + * @alias 'model.memory_status' */ + modelMemoryStatus?: MlMemoryStatus + /** The number of bytes over the high limit for memory usage at the last allocation failure. */ + 'model.bytes_exceeded'?: ByteSize + /** The number of bytes over the high limit for memory usage at the last allocation failure. + * @alias 'model.bytes_exceeded' */ + mbe?: ByteSize + /** The number of bytes over the high limit for memory usage at the last allocation failure. + * @alias 'model.bytes_exceeded' */ + modelBytesExceeded?: ByteSize + /** The upper limit for model memory usage, checked on increasing values. */ + 'model.memory_limit'?: string + /** The upper limit for model memory usage, checked on increasing values. + * @alias 'model.memory_limit' */ + mml?: string + /** The upper limit for model memory usage, checked on increasing values. + * @alias 'model.memory_limit' */ + modelMemoryLimit?: string + /** The number of `by` field values that were analyzed by the models. + * This value is cumulative for all detectors in the job. */ + 'model.by_fields'?: string + /** The number of `by` field values that were analyzed by the models. + * This value is cumulative for all detectors in the job. + * @alias 'model.by_fields' */ + mbf?: string + /** The number of `by` field values that were analyzed by the models. + * This value is cumulative for all detectors in the job. + * @alias 'model.by_fields' */ + modelByFields?: string + /** The number of `over` field values that were analyzed by the models. + * This value is cumulative for all detectors in the job. */ + 'model.over_fields'?: string + /** The number of `over` field values that were analyzed by the models. + * This value is cumulative for all detectors in the job. + * @alias 'model.over_fields' */ + mof?: string + /** The number of `over` field values that were analyzed by the models. + * This value is cumulative for all detectors in the job. + * @alias 'model.over_fields' */ + modelOverFields?: string + /** The number of `partition` field values that were analyzed by the models. + * This value is cumulative for all detectors in the job. */ + 'model.partition_fields'?: string + /** The number of `partition` field values that were analyzed by the models. + * This value is cumulative for all detectors in the job. + * @alias 'model.partition_fields' */ + mpf?: string + /** The number of `partition` field values that were analyzed by the models. + * This value is cumulative for all detectors in the job. + * @alias 'model.partition_fields' */ + modelPartitionFields?: string + /** The number of buckets for which new entities in incoming data were not processed due to insufficient model memory. + * This situation is also signified by a `hard_limit: memory_status` property value. */ + 'model.bucket_allocation_failures'?: string + /** The number of buckets for which new entities in incoming data were not processed due to insufficient model memory. + * This situation is also signified by a `hard_limit: memory_status` property value. + * @alias 'model.bucket_allocation_failures' */ + mbaf?: string + /** The number of buckets for which new entities in incoming data were not processed due to insufficient model memory. + * This situation is also signified by a `hard_limit: memory_status` property value. + * @alias 'model.bucket_allocation_failures' */ + modelBucketAllocationFailures?: string + /** The status of categorization for the job. */ + 'model.categorization_status'?: MlCategorizationStatus + /** The status of categorization for the job. + * @alias 'model.categorization_status' */ + mcs?: MlCategorizationStatus + /** The status of categorization for the job. + * @alias 'model.categorization_status' */ + modelCategorizationStatus?: MlCategorizationStatus + /** The number of documents that have had a field categorized. */ + 'model.categorized_doc_count'?: string + /** The number of documents that have had a field categorized. + * @alias 'model.categorized_doc_count' */ + mcdc?: string + /** The number of documents that have had a field categorized. + * @alias 'model.categorized_doc_count' */ + modelCategorizedDocCount?: string + /** The number of categories created by categorization. */ + 'model.total_category_count'?: string + /** The number of categories created by categorization. + * @alias 'model.total_category_count' */ + mtcc?: string + /** The number of categories created by categorization. + * @alias 'model.total_category_count' */ + modelTotalCategoryCount?: string + /** The number of categories that match more than 1% of categorized documents. */ + 'model.frequent_category_count'?: string + /** The number of categories that match more than 1% of categorized documents. + * @alias 'model.frequent_category_count' */ + modelFrequentCategoryCount?: string + /** The number of categories that match just one categorized document. */ + 'model.rare_category_count'?: string + /** The number of categories that match just one categorized document. + * @alias 'model.rare_category_count' */ + mrcc?: string + /** The number of categories that match just one categorized document. + * @alias 'model.rare_category_count' */ + modelRareCategoryCount?: string + /** The number of categories created by categorization that will never be assigned again because another category’s definition makes it a superset of the dead category. + * Dead categories are a side effect of the way categorization has no prior training. */ + 'model.dead_category_count'?: string + /** The number of categories created by categorization that will never be assigned again because another category’s definition makes it a superset of the dead category. + * Dead categories are a side effect of the way categorization has no prior training. + * @alias 'model.dead_category_count' */ + mdcc?: string + /** The number of categories created by categorization that will never be assigned again because another category’s definition makes it a superset of the dead category. + * Dead categories are a side effect of the way categorization has no prior training. + * @alias 'model.dead_category_count' */ + modelDeadCategoryCount?: string + /** The number of times that categorization wanted to create a new category but couldn’t because the job had hit its `model_memory_limit`. + * This count does not track which specific categories failed to be created. + * Therefore you cannot use this value to determine the number of unique categories that were missed. */ + 'model.failed_category_count'?: string + /** The number of times that categorization wanted to create a new category but couldn’t because the job had hit its `model_memory_limit`. + * This count does not track which specific categories failed to be created. + * Therefore you cannot use this value to determine the number of unique categories that were missed. + * @alias 'model.failed_category_count' */ + mfcc?: string + /** The number of times that categorization wanted to create a new category but couldn’t because the job had hit its `model_memory_limit`. + * This count does not track which specific categories failed to be created. + * Therefore you cannot use this value to determine the number of unique categories that were missed. + * @alias 'model.failed_category_count' */ + modelFailedCategoryCount?: string + /** The timestamp when the model stats were gathered, according to server time. */ + 'model.log_time'?: string + /** The timestamp when the model stats were gathered, according to server time. + * @alias 'model.log_time' */ + mlt?: string + /** The timestamp when the model stats were gathered, according to server time. + * @alias 'model.log_time' */ + modelLogTime?: string + /** The timestamp of the last record when the model stats were gathered. */ + 'model.timestamp'?: string + /** The timestamp of the last record when the model stats were gathered. + * @alias 'model.timestamp' */ + mt?: string + /** The timestamp of the last record when the model stats were gathered. + * @alias 'model.timestamp' */ + modelTimestamp?: string + /** The number of individual forecasts currently available for the job. + * A value of one or more indicates that forecasts exist. */ + 'forecasts.total'?: string + /** The number of individual forecasts currently available for the job. + * A value of one or more indicates that forecasts exist. + * @alias 'forecasts.total' */ + ft?: string + /** The number of individual forecasts currently available for the job. + * A value of one or more indicates that forecasts exist. + * @alias 'forecasts.total' */ + forecastsTotal?: string + /** The minimum memory usage in bytes for forecasts related to the anomaly detection job. */ + 'forecasts.memory.min'?: string + /** The minimum memory usage in bytes for forecasts related to the anomaly detection job. + * @alias 'forecasts.memory.min' */ + fmmin?: string + /** The minimum memory usage in bytes for forecasts related to the anomaly detection job. + * @alias 'forecasts.memory.min' */ + forecastsMemoryMin?: string + /** The maximum memory usage in bytes for forecasts related to the anomaly detection job. */ + 'forecasts.memory.max'?: string + /** The maximum memory usage in bytes for forecasts related to the anomaly detection job. + * @alias 'forecasts.memory.max' */ + fmmax?: string + /** The maximum memory usage in bytes for forecasts related to the anomaly detection job. + * @alias 'forecasts.memory.max' */ + forecastsMemoryMax?: string + /** The average memory usage in bytes for forecasts related to the anomaly detection job. */ + 'forecasts.memory.avg'?: string + /** The average memory usage in bytes for forecasts related to the anomaly detection job. + * @alias 'forecasts.memory.avg' */ + fmavg?: string + /** The average memory usage in bytes for forecasts related to the anomaly detection job. + * @alias 'forecasts.memory.avg' */ + forecastsMemoryAvg?: string + /** The total memory usage in bytes for forecasts related to the anomaly detection job. */ + 'forecasts.memory.total'?: string + /** The total memory usage in bytes for forecasts related to the anomaly detection job. + * @alias 'forecasts.memory.total' */ + fmt?: string + /** The total memory usage in bytes for forecasts related to the anomaly detection job. + * @alias 'forecasts.memory.total' */ + forecastsMemoryTotal?: string + /** The minimum number of `model_forecast` documents written for forecasts related to the anomaly detection job. */ + 'forecasts.records.min'?: string + /** The minimum number of `model_forecast` documents written for forecasts related to the anomaly detection job. + * @alias 'forecasts.records.min' */ + frmin?: string + /** The minimum number of `model_forecast` documents written for forecasts related to the anomaly detection job. + * @alias 'forecasts.records.min' */ + forecastsRecordsMin?: string + /** The maximum number of `model_forecast` documents written for forecasts related to the anomaly detection job. */ + 'forecasts.records.max'?: string + /** The maximum number of `model_forecast` documents written for forecasts related to the anomaly detection job. + * @alias 'forecasts.records.max' */ + frmax?: string + /** The maximum number of `model_forecast` documents written for forecasts related to the anomaly detection job. + * @alias 'forecasts.records.max' */ + forecastsRecordsMax?: string + /** The average number of `model_forecast` documents written for forecasts related to the anomaly detection job. */ + 'forecasts.records.avg'?: string + /** The average number of `model_forecast` documents written for forecasts related to the anomaly detection job. + * @alias 'forecasts.records.avg' */ + fravg?: string + /** The average number of `model_forecast` documents written for forecasts related to the anomaly detection job. + * @alias 'forecasts.records.avg' */ + forecastsRecordsAvg?: string + /** The total number of `model_forecast` documents written for forecasts related to the anomaly detection job. */ + 'forecasts.records.total'?: string + /** The total number of `model_forecast` documents written for forecasts related to the anomaly detection job. + * @alias 'forecasts.records.total' */ + frt?: string + /** The total number of `model_forecast` documents written for forecasts related to the anomaly detection job. + * @alias 'forecasts.records.total' */ + forecastsRecordsTotal?: string + /** The minimum runtime in milliseconds for forecasts related to the anomaly detection job. */ + 'forecasts.time.min'?: string + /** The minimum runtime in milliseconds for forecasts related to the anomaly detection job. + * @alias 'forecasts.time.min' */ + ftmin?: string + /** The minimum runtime in milliseconds for forecasts related to the anomaly detection job. + * @alias 'forecasts.time.min' */ + forecastsTimeMin?: string + /** The maximum runtime in milliseconds for forecasts related to the anomaly detection job. */ + 'forecasts.time.max'?: string + /** The maximum runtime in milliseconds for forecasts related to the anomaly detection job. + * @alias 'forecasts.time.max' */ + ftmax?: string + /** The maximum runtime in milliseconds for forecasts related to the anomaly detection job. + * @alias 'forecasts.time.max' */ + forecastsTimeMax?: string + /** The average runtime in milliseconds for forecasts related to the anomaly detection job. */ + 'forecasts.time.avg'?: string + /** The average runtime in milliseconds for forecasts related to the anomaly detection job. + * @alias 'forecasts.time.avg' */ + ftavg?: string + /** The average runtime in milliseconds for forecasts related to the anomaly detection job. + * @alias 'forecasts.time.avg' */ + forecastsTimeAvg?: string + /** The total runtime in milliseconds for forecasts related to the anomaly detection job. */ + 'forecasts.time.total'?: string + /** The total runtime in milliseconds for forecasts related to the anomaly detection job. + * @alias 'forecasts.time.total' */ + ftt?: string + /** The total runtime in milliseconds for forecasts related to the anomaly detection job. + * @alias 'forecasts.time.total' */ + forecastsTimeTotal?: string + /** The uniqe identifier of the assigned node. */ + 'node.id'?: NodeId + /** The uniqe identifier of the assigned node. + * @alias 'node.id' */ + ni?: NodeId + /** The uniqe identifier of the assigned node. + * @alias 'node.id' */ + nodeId?: NodeId + /** The name of the assigned node. */ + 'node.name'?: string + /** The name of the assigned node. + * @alias 'node.name' */ + nn?: string + /** The name of the assigned node. + * @alias 'node.name' */ + nodeName?: string + /** The ephemeral identifier of the assigned node. */ + 'node.ephemeral_id'?: NodeId + /** The ephemeral identifier of the assigned node. + * @alias 'node.ephemeral_id' */ + ne?: NodeId + /** The ephemeral identifier of the assigned node. + * @alias 'node.ephemeral_id' */ + nodeEphemeralId?: NodeId + /** The network address of the assigned node. */ + 'node.address'?: string + /** The network address of the assigned node. + * @alias 'node.address' */ + na?: string + /** The network address of the assigned node. + * @alias 'node.address' */ + nodeAddress?: string + /** The number of bucket results produced by the job. */ + 'buckets.count'?: string + /** The number of bucket results produced by the job. + * @alias 'buckets.count' */ + bc?: string + /** The number of bucket results produced by the job. + * @alias 'buckets.count' */ + bucketsCount?: string + /** The sum of all bucket processing times, in milliseconds. */ + 'buckets.time.total'?: string + /** The sum of all bucket processing times, in milliseconds. + * @alias 'buckets.time.total' */ + btt?: string + /** The sum of all bucket processing times, in milliseconds. + * @alias 'buckets.time.total' */ + bucketsTimeTotal?: string + /** The minimum of all bucket processing times, in milliseconds. */ + 'buckets.time.min'?: string + /** The minimum of all bucket processing times, in milliseconds. + * @alias 'buckets.time.min' */ + btmin?: string + /** The minimum of all bucket processing times, in milliseconds. + * @alias 'buckets.time.min' */ + bucketsTimeMin?: string + /** The maximum of all bucket processing times, in milliseconds. */ + 'buckets.time.max'?: string + /** The maximum of all bucket processing times, in milliseconds. + * @alias 'buckets.time.max' */ + btmax?: string + /** The maximum of all bucket processing times, in milliseconds. + * @alias 'buckets.time.max' */ + bucketsTimeMax?: string + /** The exponential moving average of all bucket processing times, in milliseconds. */ + 'buckets.time.exp_avg'?: string + /** The exponential moving average of all bucket processing times, in milliseconds. + * @alias 'buckets.time.exp_avg' */ + btea?: string + /** The exponential moving average of all bucket processing times, in milliseconds. + * @alias 'buckets.time.exp_avg' */ + bucketsTimeExpAvg?: string + /** The exponential moving average of bucket processing times calculated in a one hour time window, in milliseconds. */ + 'buckets.time.exp_avg_hour'?: string + /** The exponential moving average of bucket processing times calculated in a one hour time window, in milliseconds. + * @alias 'buckets.time.exp_avg_hour' */ + bteah?: string + /** The exponential moving average of bucket processing times calculated in a one hour time window, in milliseconds. + * @alias 'buckets.time.exp_avg_hour' */ + bucketsTimeExpAvgHour?: string +} + +export interface CatMlJobsRequest extends CatCatRequestBase { + /** Identifier for the anomaly detection job. */ + job_id?: Id + /** Specifies what to do when the request: + * + * * Contains wildcard expressions and there are no jobs that match. + * * Contains the `_all` string or no identifiers and there are no matches. + * * Contains wildcard expressions and there are only partial matches. + * + * If `true`, the API returns an empty jobs array when there are no matches and the subset of results when there + * are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial + * matches. */ + allow_no_match?: boolean + /** Comma-separated list of column names to display. */ + h?: CatCatAnomalyDetectorColumns + /** Comma-separated list of column names or column aliases used to sort the response. */ + s?: CatCatAnomalyDetectorColumns + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, allow_no_match?: never, h?: never, s?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, allow_no_match?: never, h?: never, s?: never } +} + +export type CatMlJobsResponse = CatMlJobsJobsRecord[] + +export interface CatMlTrainedModelsRequest extends CatCatRequestBase { + /** A unique identifier for the trained model. */ + model_id?: Id + /** Specifies what to do when the request: contains wildcard expressions and there are no models that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. + * If `true`, the API returns an empty array when there are no matches and the subset of results when there are partial matches. + * If `false`, the API returns a 404 status code when there are no matches or only partial matches. */ + allow_no_match?: boolean + /** A comma-separated list of column names to display. */ + h?: CatCatTrainedModelsColumns + /** A comma-separated list of column names or aliases used to sort the response. */ + s?: CatCatTrainedModelsColumns + /** Skips the specified number of transforms. */ + from?: integer + /** The maximum number of transforms to display. */ + size?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { model_id?: never, allow_no_match?: never, h?: never, s?: never, from?: never, size?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { model_id?: never, allow_no_match?: never, h?: never, s?: never, from?: never, size?: never } +} + +export type CatMlTrainedModelsResponse = CatMlTrainedModelsTrainedModelsRecord[] + +export interface CatMlTrainedModelsTrainedModelsRecord { + /** The model identifier. */ + id?: Id + /** Information about the creator of the model. */ + created_by?: string + /** Information about the creator of the model. + * @alias created_by */ + c?: string + /** Information about the creator of the model. + * @alias created_by */ + createdBy?: string + /** The estimated heap size to keep the model in memory. */ + heap_size?: ByteSize + /** The estimated heap size to keep the model in memory. + * @alias heap_size */ + hs?: ByteSize + /** The estimated heap size to keep the model in memory. + * @alias heap_size */ + modelHeapSize?: ByteSize + /** The estimated number of operations to use the model. + * This number helps to measure the computational complexity of the model. */ + operations?: string + /** The estimated number of operations to use the model. + * This number helps to measure the computational complexity of the model. + * @alias operations */ + o?: string + /** The estimated number of operations to use the model. + * This number helps to measure the computational complexity of the model. + * @alias operations */ + modelOperations?: string + /** The license level of the model. */ + license?: string + /** The license level of the model. + * @alias license */ + l?: string + /** The time the model was created. */ + create_time?: DateTime + /** The time the model was created. + * @alias create_time */ + ct?: DateTime + /** The version of Elasticsearch when the model was created. */ + version?: VersionString + /** The version of Elasticsearch when the model was created. + * @alias version */ + v?: VersionString + /** A description of the model. */ + description?: string + /** A description of the model. + * @alias description */ + d?: string + /** The number of pipelines that are referencing the model. */ + 'ingest.pipelines'?: string + /** The number of pipelines that are referencing the model. + * @alias 'ingest.pipelines' */ + ip?: string + /** The number of pipelines that are referencing the model. + * @alias 'ingest.pipelines' */ + ingestPipelines?: string + /** The total number of documents that are processed by the model. */ + 'ingest.count'?: string + /** The total number of documents that are processed by the model. + * @alias 'ingest.count' */ + ic?: string + /** The total number of documents that are processed by the model. + * @alias 'ingest.count' */ + ingestCount?: string + /** The total time spent processing documents with thie model. */ + 'ingest.time'?: string + /** The total time spent processing documents with thie model. + * @alias 'ingest.time' */ + it?: string + /** The total time spent processing documents with thie model. + * @alias 'ingest.time' */ + ingestTime?: string + /** The total number of documents that are currently being handled by the model. */ + 'ingest.current'?: string + /** The total number of documents that are currently being handled by the model. + * @alias 'ingest.current' */ + icurr?: string + /** The total number of documents that are currently being handled by the model. + * @alias 'ingest.current' */ + ingestCurrent?: string + /** The total number of failed ingest attempts with the model. */ + 'ingest.failed'?: string + /** The total number of failed ingest attempts with the model. + * @alias 'ingest.failed' */ + if?: string + /** The total number of failed ingest attempts with the model. + * @alias 'ingest.failed' */ + ingestFailed?: string + /** The identifier for the data frame analytics job that created the model. + * Only displayed if the job is still available. */ + 'data_frame.id'?: string + /** The identifier for the data frame analytics job that created the model. + * Only displayed if the job is still available. + * @alias 'data_frame.id' */ + dfid?: string + /** The identifier for the data frame analytics job that created the model. + * Only displayed if the job is still available. + * @alias 'data_frame.id' */ + dataFrameAnalytics?: string + /** The time the data frame analytics job was created. */ + 'data_frame.create_time'?: string + /** The time the data frame analytics job was created. + * @alias 'data_frame.create_time' */ + dft?: string + /** The time the data frame analytics job was created. + * @alias 'data_frame.create_time' */ + dataFrameAnalyticsTime?: string + /** The source index used to train in the data frame analysis. */ + 'data_frame.source_index'?: string + /** The source index used to train in the data frame analysis. + * @alias 'data_frame.source_index' */ + dfsi?: string + /** The source index used to train in the data frame analysis. + * @alias 'data_frame.source_index' */ + dataFrameAnalyticsSrcIndex?: string + /** The analysis used by the data frame to build the model. */ + 'data_frame.analysis'?: string + /** The analysis used by the data frame to build the model. + * @alias 'data_frame.analysis' */ + dfa?: string + /** The analysis used by the data frame to build the model. + * @alias 'data_frame.analysis' */ + dataFrameAnalyticsAnalysis?: string + type?: string +} + +export interface CatNodeattrsNodeAttributesRecord { + /** The node name. */ + node?: string + /** The unique node identifier. */ + id?: string + /** The process identifier. */ + pid?: string + /** The host name. */ + host?: string + /** The host name. + * @alias host */ + h?: string + /** The IP address. */ + ip?: string + /** The IP address. + * @alias ip */ + i?: string + /** The bound transport port. */ + port?: string + /** The attribute name. */ + attr?: string + /** The attribute value. */ + value?: string +} + +export interface CatNodeattrsRequest extends CatCatRequestBase { + /** A comma-separated list of columns names to display. It supports simple wildcards. */ + h?: CatCatNodeattrsColumns + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ + s?: Names + /** If `true`, the request computes the list of selected nodes from the + * local cluster state. If `false` the list of selected nodes are computed + * from the cluster state of the master node. In both cases the coordinating + * node will send requests for further information to each selected node. */ + local?: boolean + /** Period to wait for a connection to the master node. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { h?: never, s?: never, local?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { h?: never, s?: never, local?: never, master_timeout?: never } +} + +export type CatNodeattrsResponse = CatNodeattrsNodeAttributesRecord[] + +export interface CatNodesNodesRecord { + /** The unique node identifier. */ + id?: Id + /** The unique node identifier. + * @alias id */ + nodeId?: Id + /** The process identifier. */ + pid?: string + /** The process identifier. + * @alias pid */ + p?: string + /** The IP address. */ + ip?: string + /** The IP address. + * @alias ip */ + i?: string + /** The bound transport port. */ + port?: string + /** The bound transport port. + * @alias port */ + po?: string + /** The bound HTTP address. */ + http_address?: string + /** The bound HTTP address. + * @alias http_address */ + http?: string + /** The Elasticsearch version. */ + version?: VersionString + /** The Elasticsearch version. + * @alias version */ + v?: VersionString + /** The Elasticsearch distribution flavor. */ + flavor?: string + /** The Elasticsearch distribution flavor. + * @alias flavor */ + f?: string + /** The Elasticsearch distribution type. */ + type?: string + /** The Elasticsearch distribution type. + * @alias type */ + t?: string + /** The Elasticsearch build hash. */ + build?: string + /** The Elasticsearch build hash. + * @alias build */ + b?: string + /** The Java version. */ + jdk?: string + /** The Java version. + * @alias jdk */ + j?: string + /** The total disk space. */ + 'disk.total'?: ByteSize + /** The total disk space. + * @alias 'disk.total' */ + dt?: ByteSize + /** The total disk space. + * @alias 'disk.total' */ + diskTotal?: ByteSize + /** The used disk space. */ + 'disk.used'?: ByteSize + /** The used disk space. + * @alias 'disk.used' */ + du?: ByteSize + /** The used disk space. + * @alias 'disk.used' */ + diskUsed?: ByteSize + /** The available disk space. */ + 'disk.avail'?: ByteSize + /** The available disk space. + * @alias 'disk.avail' */ + d?: ByteSize + /** The available disk space. + * @alias 'disk.avail' */ + da?: ByteSize + /** The available disk space. + * @alias 'disk.avail' */ + disk?: ByteSize + /** The available disk space. + * @alias 'disk.avail' */ + diskAvail?: ByteSize + /** The used disk space percentage. */ + 'disk.used_percent'?: Percentage + /** The used disk space percentage. + * @alias 'disk.used_percent' */ + dup?: Percentage + /** The used disk space percentage. + * @alias 'disk.used_percent' */ + diskUsedPercent?: Percentage + /** The used heap. */ + 'heap.current'?: string + /** The used heap. + * @alias 'heap.current' */ + hc?: string + /** The used heap. + * @alias 'heap.current' */ + heapCurrent?: string + /** The used heap ratio. */ + 'heap.percent'?: Percentage + /** The used heap ratio. + * @alias 'heap.percent' */ + hp?: Percentage + /** The used heap ratio. + * @alias 'heap.percent' */ + heapPercent?: Percentage + /** The maximum configured heap. */ + 'heap.max'?: string + /** The maximum configured heap. + * @alias 'heap.max' */ + hm?: string + /** The maximum configured heap. + * @alias 'heap.max' */ + heapMax?: string + /** The used machine memory. */ + 'ram.current'?: string + /** The used machine memory. + * @alias 'ram.current' */ + rc?: string + /** The used machine memory. + * @alias 'ram.current' */ + ramCurrent?: string + /** The used machine memory ratio. */ + 'ram.percent'?: Percentage + /** The used machine memory ratio. + * @alias 'ram.percent' */ + rp?: Percentage + /** The used machine memory ratio. + * @alias 'ram.percent' */ + ramPercent?: Percentage + /** The total machine memory. */ + 'ram.max'?: string + /** The total machine memory. + * @alias 'ram.max' */ + rn?: string + /** The total machine memory. + * @alias 'ram.max' */ + ramMax?: string + /** The used file descriptors. */ + 'file_desc.current'?: string + /** The used file descriptors. + * @alias 'file_desc.current' */ + fdc?: string + /** The used file descriptors. + * @alias 'file_desc.current' */ + fileDescriptorCurrent?: string + /** The used file descriptor ratio. */ + 'file_desc.percent'?: Percentage + /** The used file descriptor ratio. + * @alias 'file_desc.percent' */ + fdp?: Percentage + /** The used file descriptor ratio. + * @alias 'file_desc.percent' */ + fileDescriptorPercent?: Percentage + /** The maximum number of file descriptors. */ + 'file_desc.max'?: string + /** The maximum number of file descriptors. + * @alias 'file_desc.max' */ + fdm?: string + /** The maximum number of file descriptors. + * @alias 'file_desc.max' */ + fileDescriptorMax?: string + /** The recent system CPU usage as a percentage. */ + cpu?: string + /** The load average for the most recent minute. */ + load_1m?: string + /** The load average for the last five minutes. */ + load_5m?: string + /** The load average for the last fifteen minutes. */ + load_15m?: string + /** The load average for the last fifteen minutes. + * @alias load_15m */ + l?: string + /** The number of available processors (logical CPU cores available to the JVM). */ + available_processors?: string + /** The number of available processors (logical CPU cores available to the JVM). + * @alias available_processors */ + ap?: string + /** The node uptime. */ + uptime?: string + /** The node uptime. + * @alias uptime */ + u?: string + /** The roles of the node. + * Returned values include `c`(cold node), `d`(data node), `f`(frozen node), `h`(hot node), `i`(ingest node), `l`(machine learning node), `m` (master eligible node), `r`(remote cluster client node), `s`(content node), `t`(transform node), `v`(voting-only node), `w`(warm node),and `-`(coordinating node only). */ + 'node.role'?: string + /** The roles of the node. + * Returned values include `c`(cold node), `d`(data node), `f`(frozen node), `h`(hot node), `i`(ingest node), `l`(machine learning node), `m` (master eligible node), `r`(remote cluster client node), `s`(content node), `t`(transform node), `v`(voting-only node), `w`(warm node),and `-`(coordinating node only). + * @alias 'node.role' */ + r?: string + /** The roles of the node. + * Returned values include `c`(cold node), `d`(data node), `f`(frozen node), `h`(hot node), `i`(ingest node), `l`(machine learning node), `m` (master eligible node), `r`(remote cluster client node), `s`(content node), `t`(transform node), `v`(voting-only node), `w`(warm node),and `-`(coordinating node only). + * @alias 'node.role' */ + role?: string + /** The roles of the node. + * Returned values include `c`(cold node), `d`(data node), `f`(frozen node), `h`(hot node), `i`(ingest node), `l`(machine learning node), `m` (master eligible node), `r`(remote cluster client node), `s`(content node), `t`(transform node), `v`(voting-only node), `w`(warm node),and `-`(coordinating node only). + * @alias 'node.role' */ + nodeRole?: string + /** Indicates whether the node is the elected master node. + * Returned values include `*`(elected master) and `-`(not elected master). */ + master?: string + /** Indicates whether the node is the elected master node. + * Returned values include `*`(elected master) and `-`(not elected master). + * @alias master */ + m?: string + /** The node name. */ + name?: Name + /** The node name. + * @alias name */ + n?: Name + /** The size of completion. */ + 'completion.size'?: string + /** The size of completion. + * @alias 'completion.size' */ + cs?: string + /** The size of completion. + * @alias 'completion.size' */ + completionSize?: string + /** The used fielddata cache. */ + 'fielddata.memory_size'?: string + /** The used fielddata cache. + * @alias 'fielddata.memory_size' */ + fm?: string + /** The used fielddata cache. + * @alias 'fielddata.memory_size' */ + fielddataMemory?: string + /** The fielddata evictions. */ + 'fielddata.evictions'?: string + /** The fielddata evictions. + * @alias 'fielddata.evictions' */ + fe?: string + /** The fielddata evictions. + * @alias 'fielddata.evictions' */ + fielddataEvictions?: string + /** The used query cache. */ + 'query_cache.memory_size'?: string + /** The used query cache. + * @alias 'query_cache.memory_size' */ + qcm?: string + /** The used query cache. + * @alias 'query_cache.memory_size' */ + queryCacheMemory?: string + /** The query cache evictions. */ + 'query_cache.evictions'?: string + /** The query cache evictions. + * @alias 'query_cache.evictions' */ + qce?: string + /** The query cache evictions. + * @alias 'query_cache.evictions' */ + queryCacheEvictions?: string + /** The query cache hit counts. */ + 'query_cache.hit_count'?: string + /** The query cache hit counts. + * @alias 'query_cache.hit_count' */ + qchc?: string + /** The query cache hit counts. + * @alias 'query_cache.hit_count' */ + queryCacheHitCount?: string + /** The query cache miss counts. */ + 'query_cache.miss_count'?: string + /** The query cache miss counts. + * @alias 'query_cache.miss_count' */ + qcmc?: string + /** The query cache miss counts. + * @alias 'query_cache.miss_count' */ + queryCacheMissCount?: string + /** The used request cache. */ + 'request_cache.memory_size'?: string + /** The used request cache. + * @alias 'request_cache.memory_size' */ + rcm?: string + /** The used request cache. + * @alias 'request_cache.memory_size' */ + requestCacheMemory?: string + /** The request cache evictions. */ + 'request_cache.evictions'?: string + /** The request cache evictions. + * @alias 'request_cache.evictions' */ + rce?: string + /** The request cache evictions. + * @alias 'request_cache.evictions' */ + requestCacheEvictions?: string + /** The request cache hit counts. */ + 'request_cache.hit_count'?: string + /** The request cache hit counts. + * @alias 'request_cache.hit_count' */ + rchc?: string + /** The request cache hit counts. + * @alias 'request_cache.hit_count' */ + requestCacheHitCount?: string + /** The request cache miss counts. */ + 'request_cache.miss_count'?: string + /** The request cache miss counts. + * @alias 'request_cache.miss_count' */ + rcmc?: string + /** The request cache miss counts. + * @alias 'request_cache.miss_count' */ + requestCacheMissCount?: string + /** The number of flushes. */ + 'flush.total'?: string + /** The number of flushes. + * @alias 'flush.total' */ + ft?: string + /** The number of flushes. + * @alias 'flush.total' */ + flushTotal?: string + /** The time spent in flush. */ + 'flush.total_time'?: string + /** The time spent in flush. + * @alias 'flush.total_time' */ + ftt?: string + /** The time spent in flush. + * @alias 'flush.total_time' */ + flushTotalTime?: string + /** The number of current get ops. */ + 'get.current'?: string + /** The number of current get ops. + * @alias 'get.current' */ + gc?: string + /** The number of current get ops. + * @alias 'get.current' */ + getCurrent?: string + /** The time spent in get. */ + 'get.time'?: string + /** The time spent in get. + * @alias 'get.time' */ + gti?: string + /** The time spent in get. + * @alias 'get.time' */ + getTime?: string + /** The number of get ops. */ + 'get.total'?: string + /** The number of get ops. + * @alias 'get.total' */ + gto?: string + /** The number of get ops. + * @alias 'get.total' */ + getTotal?: string + /** The time spent in successful gets. */ + 'get.exists_time'?: string + /** The time spent in successful gets. + * @alias 'get.exists_time' */ + geti?: string + /** The time spent in successful gets. + * @alias 'get.exists_time' */ + getExistsTime?: string + /** The number of successful get operations. */ + 'get.exists_total'?: string + /** The number of successful get operations. + * @alias 'get.exists_total' */ + geto?: string + /** The number of successful get operations. + * @alias 'get.exists_total' */ + getExistsTotal?: string + /** The time spent in failed gets. */ + 'get.missing_time'?: string + /** The time spent in failed gets. + * @alias 'get.missing_time' */ + gmti?: string + /** The time spent in failed gets. + * @alias 'get.missing_time' */ + getMissingTime?: string + /** The number of failed gets. */ + 'get.missing_total'?: string + /** The number of failed gets. + * @alias 'get.missing_total' */ + gmto?: string + /** The number of failed gets. + * @alias 'get.missing_total' */ + getMissingTotal?: string + /** The number of current deletions. */ + 'indexing.delete_current'?: string + /** The number of current deletions. + * @alias 'indexing.delete_current' */ + idc?: string + /** The number of current deletions. + * @alias 'indexing.delete_current' */ + indexingDeleteCurrent?: string + /** The time spent in deletions. */ + 'indexing.delete_time'?: string + /** The time spent in deletions. + * @alias 'indexing.delete_time' */ + idti?: string + /** The time spent in deletions. + * @alias 'indexing.delete_time' */ + indexingDeleteTime?: string + /** The number of delete operations. */ + 'indexing.delete_total'?: string + /** The number of delete operations. + * @alias 'indexing.delete_total' */ + idto?: string + /** The number of delete operations. + * @alias 'indexing.delete_total' */ + indexingDeleteTotal?: string + /** The number of current indexing operations. */ + 'indexing.index_current'?: string + /** The number of current indexing operations. + * @alias 'indexing.index_current' */ + iic?: string + /** The number of current indexing operations. + * @alias 'indexing.index_current' */ + indexingIndexCurrent?: string + /** The time spent in indexing. */ + 'indexing.index_time'?: string + /** The time spent in indexing. + * @alias 'indexing.index_time' */ + iiti?: string + /** The time spent in indexing. + * @alias 'indexing.index_time' */ + indexingIndexTime?: string + /** The number of indexing operations. */ + 'indexing.index_total'?: string + /** The number of indexing operations. + * @alias 'indexing.index_total' */ + iito?: string + /** The number of indexing operations. + * @alias 'indexing.index_total' */ + indexingIndexTotal?: string + /** The number of failed indexing operations. */ + 'indexing.index_failed'?: string + /** The number of failed indexing operations. + * @alias 'indexing.index_failed' */ + iif?: string + /** The number of failed indexing operations. + * @alias 'indexing.index_failed' */ + indexingIndexFailed?: string + /** The number of current merges. */ + 'merges.current'?: string + /** The number of current merges. + * @alias 'merges.current' */ + mc?: string + /** The number of current merges. + * @alias 'merges.current' */ + mergesCurrent?: string + /** The number of current merging docs. */ + 'merges.current_docs'?: string + /** The number of current merging docs. + * @alias 'merges.current_docs' */ + mcd?: string + /** The number of current merging docs. + * @alias 'merges.current_docs' */ + mergesCurrentDocs?: string + /** The size of current merges. */ + 'merges.current_size'?: string + /** The size of current merges. + * @alias 'merges.current_size' */ + mcs?: string + /** The size of current merges. + * @alias 'merges.current_size' */ + mergesCurrentSize?: string + /** The number of completed merge operations. */ + 'merges.total'?: string + /** The number of completed merge operations. + * @alias 'merges.total' */ + mt?: string + /** The number of completed merge operations. + * @alias 'merges.total' */ + mergesTotal?: string + /** The docs merged. */ + 'merges.total_docs'?: string + /** The docs merged. + * @alias 'merges.total_docs' */ + mtd?: string + /** The docs merged. + * @alias 'merges.total_docs' */ + mergesTotalDocs?: string + /** The size merged. */ + 'merges.total_size'?: string + /** The size merged. + * @alias 'merges.total_size' */ + mts?: string + /** The size merged. + * @alias 'merges.total_size' */ + mergesTotalSize?: string + /** The time spent in merges. */ + 'merges.total_time'?: string + /** The time spent in merges. + * @alias 'merges.total_time' */ + mtt?: string + /** The time spent in merges. + * @alias 'merges.total_time' */ + mergesTotalTime?: string + /** The total refreshes. */ + 'refresh.total'?: string + /** The time spent in refreshes. */ + 'refresh.time'?: string + /** The total external refreshes. */ + 'refresh.external_total'?: string + /** The total external refreshes. + * @alias 'refresh.external_total' */ + rto?: string + /** The total external refreshes. + * @alias 'refresh.external_total' */ + refreshTotal?: string + /** The time spent in external refreshes. */ + 'refresh.external_time'?: string + /** The time spent in external refreshes. + * @alias 'refresh.external_time' */ + rti?: string + /** The time spent in external refreshes. + * @alias 'refresh.external_time' */ + refreshTime?: string + /** The number of pending refresh listeners. */ + 'refresh.listeners'?: string + /** The number of pending refresh listeners. + * @alias 'refresh.listeners' */ + rli?: string + /** The number of pending refresh listeners. + * @alias 'refresh.listeners' */ + refreshListeners?: string + /** The total script compilations. */ + 'script.compilations'?: string + /** The total script compilations. + * @alias 'script.compilations' */ + scrcc?: string + /** The total script compilations. + * @alias 'script.compilations' */ + scriptCompilations?: string + /** The total compiled scripts evicted from the cache. */ + 'script.cache_evictions'?: string + /** The total compiled scripts evicted from the cache. + * @alias 'script.cache_evictions' */ + scrce?: string + /** The total compiled scripts evicted from the cache. + * @alias 'script.cache_evictions' */ + scriptCacheEvictions?: string + /** The script cache compilation limit triggered. */ + 'script.compilation_limit_triggered'?: string + /** The script cache compilation limit triggered. + * @alias 'script.compilation_limit_triggered' */ + scrclt?: string + /** The script cache compilation limit triggered. + * @alias 'script.compilation_limit_triggered' */ + scriptCacheCompilationLimitTriggered?: string + /** The current fetch phase operations. */ + 'search.fetch_current'?: string + /** The current fetch phase operations. + * @alias 'search.fetch_current' */ + sfc?: string + /** The current fetch phase operations. + * @alias 'search.fetch_current' */ + searchFetchCurrent?: string + /** The time spent in fetch phase. */ + 'search.fetch_time'?: string + /** The time spent in fetch phase. + * @alias 'search.fetch_time' */ + sfti?: string + /** The time spent in fetch phase. + * @alias 'search.fetch_time' */ + searchFetchTime?: string + /** The total fetch operations. */ + 'search.fetch_total'?: string + /** The total fetch operations. + * @alias 'search.fetch_total' */ + sfto?: string + /** The total fetch operations. + * @alias 'search.fetch_total' */ + searchFetchTotal?: string + /** The open search contexts. */ + 'search.open_contexts'?: string + /** The open search contexts. + * @alias 'search.open_contexts' */ + so?: string + /** The open search contexts. + * @alias 'search.open_contexts' */ + searchOpenContexts?: string + /** The current query phase operations. */ + 'search.query_current'?: string + /** The current query phase operations. + * @alias 'search.query_current' */ + sqc?: string + /** The current query phase operations. + * @alias 'search.query_current' */ + searchQueryCurrent?: string + /** The time spent in query phase. */ + 'search.query_time'?: string + /** The time spent in query phase. + * @alias 'search.query_time' */ + sqti?: string + /** The time spent in query phase. + * @alias 'search.query_time' */ + searchQueryTime?: string + /** The total query phase operations. */ + 'search.query_total'?: string + /** The total query phase operations. + * @alias 'search.query_total' */ + sqto?: string + /** The total query phase operations. + * @alias 'search.query_total' */ + searchQueryTotal?: string + /** The open scroll contexts. */ + 'search.scroll_current'?: string + /** The open scroll contexts. + * @alias 'search.scroll_current' */ + scc?: string + /** The open scroll contexts. + * @alias 'search.scroll_current' */ + searchScrollCurrent?: string + /** The time scroll contexts held open. */ + 'search.scroll_time'?: string + /** The time scroll contexts held open. + * @alias 'search.scroll_time' */ + scti?: string + /** The time scroll contexts held open. + * @alias 'search.scroll_time' */ + searchScrollTime?: string + /** The completed scroll contexts. */ + 'search.scroll_total'?: string + /** The completed scroll contexts. + * @alias 'search.scroll_total' */ + scto?: string + /** The completed scroll contexts. + * @alias 'search.scroll_total' */ + searchScrollTotal?: string + /** The number of segments. */ + 'segments.count'?: string + /** The number of segments. + * @alias 'segments.count' */ + sc?: string + /** The number of segments. + * @alias 'segments.count' */ + segmentsCount?: string + /** The memory used by segments. */ + 'segments.memory'?: string + /** The memory used by segments. + * @alias 'segments.memory' */ + sm?: string + /** The memory used by segments. + * @alias 'segments.memory' */ + segmentsMemory?: string + /** The memory used by the index writer. */ + 'segments.index_writer_memory'?: string + /** The memory used by the index writer. + * @alias 'segments.index_writer_memory' */ + siwm?: string + /** The memory used by the index writer. + * @alias 'segments.index_writer_memory' */ + segmentsIndexWriterMemory?: string + /** The memory used by the version map. */ + 'segments.version_map_memory'?: string + /** The memory used by the version map. + * @alias 'segments.version_map_memory' */ + svmm?: string + /** The memory used by the version map. + * @alias 'segments.version_map_memory' */ + segmentsVersionMapMemory?: string + /** The memory used by fixed bit sets for nested object field types and export type filters for types referred in _parent fields. */ + 'segments.fixed_bitset_memory'?: string + /** The memory used by fixed bit sets for nested object field types and export type filters for types referred in _parent fields. + * @alias 'segments.fixed_bitset_memory' */ + sfbm?: string + /** The memory used by fixed bit sets for nested object field types and export type filters for types referred in _parent fields. + * @alias 'segments.fixed_bitset_memory' */ + fixedBitsetMemory?: string + /** The number of current suggest operations. */ + 'suggest.current'?: string + /** The number of current suggest operations. + * @alias 'suggest.current' */ + suc?: string + /** The number of current suggest operations. + * @alias 'suggest.current' */ + suggestCurrent?: string + /** The time spend in suggest. */ + 'suggest.time'?: string + /** The time spend in suggest. + * @alias 'suggest.time' */ + suti?: string + /** The time spend in suggest. + * @alias 'suggest.time' */ + suggestTime?: string + /** The number of suggest operations. */ + 'suggest.total'?: string + /** The number of suggest operations. + * @alias 'suggest.total' */ + suto?: string + /** The number of suggest operations. + * @alias 'suggest.total' */ + suggestTotal?: string + /** The number of bulk shard operations. */ + 'bulk.total_operations'?: string + /** The number of bulk shard operations. + * @alias 'bulk.total_operations' */ + bto?: string + /** The number of bulk shard operations. + * @alias 'bulk.total_operations' */ + bulkTotalOperations?: string + /** The time spend in shard bulk. */ + 'bulk.total_time'?: string + /** The time spend in shard bulk. + * @alias 'bulk.total_time' */ + btti?: string + /** The time spend in shard bulk. + * @alias 'bulk.total_time' */ + bulkTotalTime?: string + /** The total size in bytes of shard bulk. */ + 'bulk.total_size_in_bytes'?: string + /** The total size in bytes of shard bulk. + * @alias 'bulk.total_size_in_bytes' */ + btsi?: string + /** The total size in bytes of shard bulk. + * @alias 'bulk.total_size_in_bytes' */ + bulkTotalSizeInBytes?: string + /** The average time spend in shard bulk. */ + 'bulk.avg_time'?: string + /** The average time spend in shard bulk. + * @alias 'bulk.avg_time' */ + bati?: string + /** The average time spend in shard bulk. + * @alias 'bulk.avg_time' */ + bulkAvgTime?: string + /** The average size in bytes of shard bulk. */ + 'bulk.avg_size_in_bytes'?: string + /** The average size in bytes of shard bulk. + * @alias 'bulk.avg_size_in_bytes' */ + basi?: string + /** The average size in bytes of shard bulk. + * @alias 'bulk.avg_size_in_bytes' */ + bulkAvgSizeInBytes?: string +} + +export interface CatNodesRequest extends CatCatRequestBase { + /** If `true`, return the full node ID. If `false`, return the shortened node ID. */ + full_id?: boolean + /** If true, the response includes information from segments that are not loaded into memory. */ + include_unloaded_segments?: boolean + /** A comma-separated list of columns names to display. + * It supports simple wildcards. */ + h?: CatCatNodeColumns + /** A comma-separated list of column names or aliases that determines the sort order. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ + s?: Names + /** The period to wait for a connection to the master node. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { full_id?: never, include_unloaded_segments?: never, h?: never, s?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { full_id?: never, include_unloaded_segments?: never, h?: never, s?: never, master_timeout?: never } +} + +export type CatNodesResponse = CatNodesNodesRecord[] + +export interface CatPendingTasksPendingTasksRecord { + /** The task insertion order. */ + insertOrder?: string + /** The task insertion order. + * @alias insertOrder */ + o?: string + /** Indicates how long the task has been in queue. */ + timeInQueue?: string + /** Indicates how long the task has been in queue. + * @alias timeInQueue */ + t?: string + /** The task priority. */ + priority?: string + /** The task priority. + * @alias priority */ + p?: string + /** The task source. */ + source?: string + /** The task source. + * @alias source */ + s?: string +} + +export interface CatPendingTasksRequest extends CatCatRequestBase { + /** A comma-separated list of columns names to display. It supports simple wildcards. */ + h?: CatCatPendingTasksColumns + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ + s?: Names + /** If `true`, the request computes the list of selected nodes from the + * local cluster state. If `false` the list of selected nodes are computed + * from the cluster state of the master node. In both cases the coordinating + * node will send requests for further information to each selected node. */ + local?: boolean + /** Period to wait for a connection to the master node. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { h?: never, s?: never, local?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { h?: never, s?: never, local?: never, master_timeout?: never } +} + +export type CatPendingTasksResponse = CatPendingTasksPendingTasksRecord[] + +export interface CatPluginsPluginsRecord { + /** The unique node identifier. */ + id?: NodeId + /** The node name. */ + name?: Name + /** The node name. + * @alias name */ + n?: Name + /** The component name. */ + component?: string + /** The component name. + * @alias component */ + c?: string + /** The component version. */ + version?: VersionString + /** The component version. + * @alias version */ + v?: VersionString + /** The plugin details. */ + description?: string + /** The plugin details. + * @alias description */ + d?: string + /** The plugin type. */ + type?: string + /** The plugin type. + * @alias type */ + t?: string +} + +export interface CatPluginsRequest extends CatCatRequestBase { + /** A comma-separated list of columns names to display. It supports simple wildcards. */ + h?: CatCatPluginsColumns + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ + s?: Names + /** Include bootstrap plugins in the response */ + include_bootstrap?: boolean + /** If `true`, the request computes the list of selected nodes from the + * local cluster state. If `false` the list of selected nodes are computed + * from the cluster state of the master node. In both cases the coordinating + * node will send requests for further information to each selected node. */ + local?: boolean + /** Period to wait for a connection to the master node. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { h?: never, s?: never, include_bootstrap?: never, local?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { h?: never, s?: never, include_bootstrap?: never, local?: never, master_timeout?: never } +} + +export type CatPluginsResponse = CatPluginsPluginsRecord[] + +export interface CatRecoveryRecoveryRecord { + /** The index name. */ + index?: IndexName + /** The index name. + * @alias index */ + i?: IndexName + /** The index name. + * @alias index */ + idx?: IndexName + /** The shard name. */ + shard?: string + /** The shard name. + * @alias shard */ + s?: string + /** The shard name. + * @alias shard */ + sh?: string + /** The recovery start time. */ + start_time?: DateTime + /** The recovery start time. + * @alias start_time */ + start?: DateTime + /** The recovery start time in epoch milliseconds. */ + start_time_millis?: EpochTime + /** The recovery start time in epoch milliseconds. + * @alias start_time_millis */ + start_millis?: EpochTime + /** The recovery stop time. */ + stop_time?: DateTime + /** The recovery stop time. + * @alias stop_time */ + stop?: DateTime + /** The recovery stop time in epoch milliseconds. */ + stop_time_millis?: EpochTime + /** The recovery stop time in epoch milliseconds. + * @alias stop_time_millis */ + stop_millis?: EpochTime + /** The recovery time. */ + time?: Duration + /** The recovery time. + * @alias time */ + t?: Duration + /** The recovery time. + * @alias time */ + ti?: Duration + /** The recovery type. */ + type?: string + /** The recovery type. + * @alias type */ + ty?: string + /** The recovery stage. */ + stage?: string + /** The recovery stage. + * @alias stage */ + st?: string + /** The source host. */ + source_host?: string + /** The source host. + * @alias source_host */ + shost?: string + /** The source node name. */ + source_node?: string + /** The source node name. + * @alias source_node */ + snode?: string + /** The target host. */ + target_host?: string + /** The target host. + * @alias target_host */ + thost?: string + /** The target node name. */ + target_node?: string + /** The target node name. + * @alias target_node */ + tnode?: string + /** The repository name. */ + repository?: string + /** The repository name. + * @alias repository */ + rep?: string + /** The snapshot name. */ + snapshot?: string + /** The snapshot name. + * @alias snapshot */ + snap?: string + /** The number of files to recover. */ + files?: string + /** The number of files to recover. + * @alias files */ + f?: string + /** The files recovered. */ + files_recovered?: string + /** The files recovered. + * @alias files_recovered */ + fr?: string + /** The ratio of files recovered. */ + files_percent?: Percentage + /** The ratio of files recovered. + * @alias files_percent */ + fp?: Percentage + /** The total number of files. */ + files_total?: string + /** The total number of files. + * @alias files_total */ + tf?: string + /** The number of bytes to recover. */ + bytes?: string + /** The number of bytes to recover. + * @alias bytes */ + b?: string + /** The bytes recovered. */ + bytes_recovered?: string + /** The bytes recovered. + * @alias bytes_recovered */ + br?: string + /** The ratio of bytes recovered. */ + bytes_percent?: Percentage + /** The ratio of bytes recovered. + * @alias bytes_percent */ + bp?: Percentage + /** The total number of bytes. */ + bytes_total?: string + /** The total number of bytes. + * @alias bytes_total */ + tb?: string + /** The number of translog operations to recover. */ + translog_ops?: string + /** The number of translog operations to recover. + * @alias translog_ops */ + to?: string + /** The translog operations recovered. */ + translog_ops_recovered?: string + /** The translog operations recovered. + * @alias translog_ops_recovered */ + tor?: string + /** The ratio of translog operations recovered. */ + translog_ops_percent?: Percentage + /** The ratio of translog operations recovered. + * @alias translog_ops_percent */ + top?: Percentage +} + +export interface CatRecoveryRequest extends CatCatRequestBase { + /** A comma-separated list of data streams, indices, and aliases used to limit the request. + * Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ + index?: Indices + /** If `true`, the response only includes ongoing shard recoveries. */ + active_only?: boolean + /** If `true`, the response includes detailed information about shard recoveries. */ + detailed?: boolean + /** A comma-separated list of columns names to display. + * It supports simple wildcards. */ + h?: CatCatRecoveryColumns + /** A comma-separated list of column names or aliases that determines the sort order. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ + s?: Names + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, active_only?: never, detailed?: never, h?: never, s?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, active_only?: never, detailed?: never, h?: never, s?: never } +} + +export type CatRecoveryResponse = CatRecoveryRecoveryRecord[] + +export interface CatRepositoriesRepositoriesRecord { + /** The unique repository identifier. */ + id?: string + /** The unique repository identifier. + * @alias id */ + repoId?: string + /** The repository type. */ + type?: string + /** The repository type. + * @alias type */ + t?: string +} + +export interface CatRepositoriesRequest extends CatCatRequestBase { + /** List of columns to appear in the response. Supports simple wildcards. */ + h?: Names + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ + s?: Names + /** If `true`, the request computes the list of selected nodes from the + * local cluster state. If `false` the list of selected nodes are computed + * from the cluster state of the master node. In both cases the coordinating + * node will send requests for further information to each selected node. */ + local?: boolean + /** Period to wait for a connection to the master node. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { h?: never, s?: never, local?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { h?: never, s?: never, local?: never, master_timeout?: never } +} + +export type CatRepositoriesResponse = CatRepositoriesRepositoriesRecord[] + +export interface CatSegmentsRequest extends CatCatRequestBase { + /** A comma-separated list of data streams, indices, and aliases used to limit the request. + * Supports wildcards (`*`). + * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ + index?: Indices + /** A comma-separated list of columns names to display. + * It supports simple wildcards. */ + h?: CatCatSegmentsColumns + /** A comma-separated list of column names or aliases that determines the sort order. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ + s?: Names + /** If `true`, the request computes the list of selected nodes from the + * local cluster state. If `false` the list of selected nodes are computed + * from the cluster state of the master node. In both cases the coordinating + * node will send requests for further information to each selected node. */ + local?: boolean + /** Period to wait for a connection to the master node. */ + master_timeout?: Duration + /** Type of index that wildcard expressions can match. If the request can target data streams, this argument + * determines whether wildcard expressions match hidden data streams. Supports comma-separated values, + * such as open,hidden. */ + expand_wildcards?: ExpandWildcards + /** If false, the request returns an error if any wildcard expression, index alias, or _all value targets only + * missing or closed indices. This behavior applies even if the request targets other open indices. For example, + * a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. */ + allow_no_indices?: boolean + /** If true, concrete, expanded or aliased indices are ignored when frozen. */ + ignore_throttled?: boolean + /** If true, missing or closed indices are not included in the response. */ + ignore_unavailable?: boolean + /** If true, allow closed indices to be returned in the response otherwise if false, keep the legacy behaviour + * of throwing an exception if index pattern matches closed indices */ + allow_closed?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, h?: never, s?: never, local?: never, master_timeout?: never, expand_wildcards?: never, allow_no_indices?: never, ignore_throttled?: never, ignore_unavailable?: never, allow_closed?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, h?: never, s?: never, local?: never, master_timeout?: never, expand_wildcards?: never, allow_no_indices?: never, ignore_throttled?: never, ignore_unavailable?: never, allow_closed?: never } +} + +export type CatSegmentsResponse = CatSegmentsSegmentsRecord[] + +export interface CatSegmentsSegmentsRecord { + /** The index name. */ + index?: IndexName + /** The index name. + * @alias index */ + i?: IndexName + /** The index name. + * @alias index */ + idx?: IndexName + /** The shard name. */ + shard?: string + /** The shard name. + * @alias shard */ + s?: string + /** The shard name. + * @alias shard */ + sh?: string + /** The shard type: `primary` or `replica`. */ + prirep?: string + /** The shard type: `primary` or `replica`. + * @alias prirep */ + p?: string + /** The shard type: `primary` or `replica`. + * @alias prirep */ + pr?: string + /** The shard type: `primary` or `replica`. + * @alias prirep */ + primaryOrReplica?: string + /** The IP address of the node where it lives. */ + ip?: string + /** The unique identifier of the node where it lives. */ + id?: NodeId + /** The segment name, which is derived from the segment generation and used internally to create file names in the directory of the shard. */ + segment?: string + /** The segment name, which is derived from the segment generation and used internally to create file names in the directory of the shard. + * @alias segment */ + seg?: string + /** The segment generation number. + * Elasticsearch increments this generation number for each segment written then uses this number to derive the segment name. */ + generation?: string + /** The segment generation number. + * Elasticsearch increments this generation number for each segment written then uses this number to derive the segment name. + * @alias generation */ + g?: string + /** The segment generation number. + * Elasticsearch increments this generation number for each segment written then uses this number to derive the segment name. + * @alias generation */ + gen?: string + /** The number of documents in the segment. + * This excludes deleted documents and counts any nested documents separately from their parents. + * It also excludes documents which were indexed recently and do not yet belong to a segment. */ + 'docs.count'?: string + /** The number of documents in the segment. + * This excludes deleted documents and counts any nested documents separately from their parents. + * It also excludes documents which were indexed recently and do not yet belong to a segment. + * @alias 'docs.count' */ + dc?: string + /** The number of documents in the segment. + * This excludes deleted documents and counts any nested documents separately from their parents. + * It also excludes documents which were indexed recently and do not yet belong to a segment. + * @alias 'docs.count' */ + docsCount?: string + /** The number of deleted documents in the segment, which might be higher or lower than the number of delete operations you have performed. + * This number excludes deletes that were performed recently and do not yet belong to a segment. + * Deleted documents are cleaned up by the automatic merge process if it makes sense to do so. + * Also, Elasticsearch creates extra deleted documents to internally track the recent history of operations on a shard. */ + 'docs.deleted'?: string + /** The number of deleted documents in the segment, which might be higher or lower than the number of delete operations you have performed. + * This number excludes deletes that were performed recently and do not yet belong to a segment. + * Deleted documents are cleaned up by the automatic merge process if it makes sense to do so. + * Also, Elasticsearch creates extra deleted documents to internally track the recent history of operations on a shard. + * @alias 'docs.deleted' */ + dd?: string + /** The number of deleted documents in the segment, which might be higher or lower than the number of delete operations you have performed. + * This number excludes deletes that were performed recently and do not yet belong to a segment. + * Deleted documents are cleaned up by the automatic merge process if it makes sense to do so. + * Also, Elasticsearch creates extra deleted documents to internally track the recent history of operations on a shard. + * @alias 'docs.deleted' */ + docsDeleted?: string + /** The segment size in bytes. */ + size?: ByteSize + /** The segment size in bytes. + * @alias size */ + si?: ByteSize + /** The segment memory in bytes. + * A value of `-1` indicates Elasticsearch was unable to compute this number. */ + 'size.memory'?: ByteSize + /** The segment memory in bytes. + * A value of `-1` indicates Elasticsearch was unable to compute this number. + * @alias 'size.memory' */ + sm?: ByteSize + /** The segment memory in bytes. + * A value of `-1` indicates Elasticsearch was unable to compute this number. + * @alias 'size.memory' */ + sizeMemory?: ByteSize + /** If `true`, the segment is synced to disk. + * Segments that are synced can survive a hard reboot. + * If `false`, the data from uncommitted segments is also stored in the transaction log so that Elasticsearch is able to replay changes on the next start. */ + committed?: string + /** If `true`, the segment is synced to disk. + * Segments that are synced can survive a hard reboot. + * If `false`, the data from uncommitted segments is also stored in the transaction log so that Elasticsearch is able to replay changes on the next start. + * @alias committed */ + ic?: string + /** If `true`, the segment is synced to disk. + * Segments that are synced can survive a hard reboot. + * If `false`, the data from uncommitted segments is also stored in the transaction log so that Elasticsearch is able to replay changes on the next start. + * @alias committed */ + isCommitted?: string + /** If `true`, the segment is searchable. + * If `false`, the segment has most likely been written to disk but needs a refresh to be searchable. */ + searchable?: string + /** If `true`, the segment is searchable. + * If `false`, the segment has most likely been written to disk but needs a refresh to be searchable. + * @alias searchable */ + is?: string + /** If `true`, the segment is searchable. + * If `false`, the segment has most likely been written to disk but needs a refresh to be searchable. + * @alias searchable */ + isSearchable?: string + /** The version of Lucene used to write the segment. */ + version?: VersionString + /** The version of Lucene used to write the segment. + * @alias version */ + v?: VersionString + /** If `true`, the segment is stored in a compound file. + * This means Lucene merged all files from the segment in a single file to save file descriptors. */ + compound?: string + /** If `true`, the segment is stored in a compound file. + * This means Lucene merged all files from the segment in a single file to save file descriptors. + * @alias compound */ + ico?: string + /** If `true`, the segment is stored in a compound file. + * This means Lucene merged all files from the segment in a single file to save file descriptors. + * @alias compound */ + isCompound?: string +} + +export interface CatShardsRequest extends CatCatRequestBase { + /** A comma-separated list of data streams, indices, and aliases used to limit the request. + * Supports wildcards (`*`). + * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ + index?: Indices + /** List of columns to appear in the response. Supports simple wildcards. */ + h?: CatCatShardColumns + /** A comma-separated list of column names or aliases that determines the sort order. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ + s?: Names + /** The period to wait for a connection to the master node. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, h?: never, s?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, h?: never, s?: never, master_timeout?: never } +} + +export type CatShardsResponse = CatShardsShardsRecord[] + +export interface CatShardsShardsRecord { + /** The index name. */ + index?: string + /** The index name. + * @alias index */ + i?: string + /** The index name. + * @alias index */ + idx?: string + /** The shard name. */ + shard?: string + /** The shard name. + * @alias shard */ + s?: string + /** The shard name. + * @alias shard */ + sh?: string + /** The shard type: `primary` or `replica`. */ + prirep?: string + /** The shard type: `primary` or `replica`. + * @alias prirep */ + p?: string + /** The shard type: `primary` or `replica`. + * @alias prirep */ + pr?: string + /** The shard type: `primary` or `replica`. + * @alias prirep */ + primaryOrReplica?: string + /** The shard state. + * Returned values include: + * `INITIALIZING`: The shard is recovering from a peer shard or gateway. + * `RELOCATING`: The shard is relocating. + * `STARTED`: The shard has started. + * `UNASSIGNED`: The shard is not assigned to any node. */ + state?: string + /** The shard state. + * Returned values include: + * `INITIALIZING`: The shard is recovering from a peer shard or gateway. + * `RELOCATING`: The shard is relocating. + * `STARTED`: The shard has started. + * `UNASSIGNED`: The shard is not assigned to any node. + * @alias state */ + st?: string + /** The number of documents in the shard. */ + docs?: string | null + /** The number of documents in the shard. + * @alias docs */ + d?: string | null + /** The number of documents in the shard. + * @alias docs */ + dc?: string | null + /** The disk space used by the shard. */ + store?: string | null + /** The disk space used by the shard. + * @alias store */ + sto?: string | null + /** total size of dataset (including the cache for partially mounted indices) */ + dataset?: string | null + /** The IP address of the node. */ + ip?: string | null + /** The unique identifier for the node. */ + id?: string + /** The name of node. */ + node?: string | null + /** The name of node. + * @alias node */ + n?: string | null + /** The sync identifier. */ + sync_id?: string + /** The reason for the last change to the state of an unassigned shard. + * It does not explain why the shard is currently unassigned; use the cluster allocation explain API for that information. + * Returned values include: + * `ALLOCATION_FAILED`: Unassigned as a result of a failed allocation of the shard. + * `CLUSTER_RECOVERED`: Unassigned as a result of a full cluster recovery. + * `DANGLING_INDEX_IMPORTED`: Unassigned as a result of importing a dangling index. + * `EXISTING_INDEX_RESTORED`: Unassigned as a result of restoring into a closed index. + * `FORCED_EMPTY_PRIMARY`: The shard’s allocation was last modified by forcing an empty primary using the cluster reroute API. + * `INDEX_CLOSED`: Unassigned because the index was closed. + * `INDEX_CREATED`: Unassigned as a result of an API creation of an index. + * `INDEX_REOPENED`: Unassigned as a result of opening a closed index. + * `MANUAL_ALLOCATION`: The shard’s allocation was last modified by the cluster reroute API. + * `NEW_INDEX_RESTORED`: Unassigned as a result of restoring into a new index. + * `NODE_LEFT`: Unassigned as a result of the node hosting it leaving the cluster. + * `NODE_RESTARTING`: Similar to `NODE_LEFT`, except that the node was registered as restarting using the node shutdown API. + * `PRIMARY_FAILED`: The shard was initializing as a replica, but the primary shard failed before the initialization completed. + * `REALLOCATED_REPLICA`: A better replica location is identified and causes the existing replica allocation to be cancelled. + * `REINITIALIZED`: When a shard moves from started back to initializing. + * `REPLICA_ADDED`: Unassigned as a result of explicit addition of a replica. + * `REROUTE_CANCELLED`: Unassigned as a result of explicit cancel reroute command. */ + 'unassigned.reason'?: string + /** The reason for the last change to the state of an unassigned shard. + * It does not explain why the shard is currently unassigned; use the cluster allocation explain API for that information. + * Returned values include: + * `ALLOCATION_FAILED`: Unassigned as a result of a failed allocation of the shard. + * `CLUSTER_RECOVERED`: Unassigned as a result of a full cluster recovery. + * `DANGLING_INDEX_IMPORTED`: Unassigned as a result of importing a dangling index. + * `EXISTING_INDEX_RESTORED`: Unassigned as a result of restoring into a closed index. + * `FORCED_EMPTY_PRIMARY`: The shard’s allocation was last modified by forcing an empty primary using the cluster reroute API. + * `INDEX_CLOSED`: Unassigned because the index was closed. + * `INDEX_CREATED`: Unassigned as a result of an API creation of an index. + * `INDEX_REOPENED`: Unassigned as a result of opening a closed index. + * `MANUAL_ALLOCATION`: The shard’s allocation was last modified by the cluster reroute API. + * `NEW_INDEX_RESTORED`: Unassigned as a result of restoring into a new index. + * `NODE_LEFT`: Unassigned as a result of the node hosting it leaving the cluster. + * `NODE_RESTARTING`: Similar to `NODE_LEFT`, except that the node was registered as restarting using the node shutdown API. + * `PRIMARY_FAILED`: The shard was initializing as a replica, but the primary shard failed before the initialization completed. + * `REALLOCATED_REPLICA`: A better replica location is identified and causes the existing replica allocation to be cancelled. + * `REINITIALIZED`: When a shard moves from started back to initializing. + * `REPLICA_ADDED`: Unassigned as a result of explicit addition of a replica. + * `REROUTE_CANCELLED`: Unassigned as a result of explicit cancel reroute command. + * @alias 'unassigned.reason' */ + ur?: string + /** The time at which the shard became unassigned in Coordinated Universal Time (UTC). */ + 'unassigned.at'?: string + /** The time at which the shard became unassigned in Coordinated Universal Time (UTC). + * @alias 'unassigned.at' */ + ua?: string + /** The time at which the shard was requested to be unassigned in Coordinated Universal Time (UTC). */ + 'unassigned.for'?: string + /** The time at which the shard was requested to be unassigned in Coordinated Universal Time (UTC). + * @alias 'unassigned.for' */ + uf?: string + /** Additional details as to why the shard became unassigned. + * It does not explain why the shard is not assigned; use the cluster allocation explain API for that information. */ + 'unassigned.details'?: string + /** Additional details as to why the shard became unassigned. + * It does not explain why the shard is not assigned; use the cluster allocation explain API for that information. + * @alias 'unassigned.details' */ + ud?: string + /** The type of recovery source. */ + 'recoverysource.type'?: string + /** The type of recovery source. + * @alias 'recoverysource.type' */ + rs?: string + /** The size of completion. */ + 'completion.size'?: string + /** The size of completion. + * @alias 'completion.size' */ + cs?: string + /** The size of completion. + * @alias 'completion.size' */ + completionSize?: string + /** The used fielddata cache memory. */ + 'fielddata.memory_size'?: string + /** The used fielddata cache memory. + * @alias 'fielddata.memory_size' */ + fm?: string + /** The used fielddata cache memory. + * @alias 'fielddata.memory_size' */ + fielddataMemory?: string + /** The fielddata cache evictions. */ + 'fielddata.evictions'?: string + /** The fielddata cache evictions. + * @alias 'fielddata.evictions' */ + fe?: string + /** The fielddata cache evictions. + * @alias 'fielddata.evictions' */ + fielddataEvictions?: string + /** The used query cache memory. */ + 'query_cache.memory_size'?: string + /** The used query cache memory. + * @alias 'query_cache.memory_size' */ + qcm?: string + /** The used query cache memory. + * @alias 'query_cache.memory_size' */ + queryCacheMemory?: string + /** The query cache evictions. */ + 'query_cache.evictions'?: string + /** The query cache evictions. + * @alias 'query_cache.evictions' */ + qce?: string + /** The query cache evictions. + * @alias 'query_cache.evictions' */ + queryCacheEvictions?: string + /** The number of flushes. */ + 'flush.total'?: string + /** The number of flushes. + * @alias 'flush.total' */ + ft?: string + /** The number of flushes. + * @alias 'flush.total' */ + flushTotal?: string + /** The time spent in flush. */ + 'flush.total_time'?: string + /** The time spent in flush. + * @alias 'flush.total_time' */ + ftt?: string + /** The time spent in flush. + * @alias 'flush.total_time' */ + flushTotalTime?: string + /** The number of current get operations. */ + 'get.current'?: string + /** The number of current get operations. + * @alias 'get.current' */ + gc?: string + /** The number of current get operations. + * @alias 'get.current' */ + getCurrent?: string + /** The time spent in get operations. */ + 'get.time'?: string + /** The time spent in get operations. + * @alias 'get.time' */ + gti?: string + /** The time spent in get operations. + * @alias 'get.time' */ + getTime?: string + /** The number of get operations. */ + 'get.total'?: string + /** The number of get operations. + * @alias 'get.total' */ + gto?: string + /** The number of get operations. + * @alias 'get.total' */ + getTotal?: string + /** The time spent in successful get operations. */ + 'get.exists_time'?: string + /** The time spent in successful get operations. + * @alias 'get.exists_time' */ + geti?: string + /** The time spent in successful get operations. + * @alias 'get.exists_time' */ + getExistsTime?: string + /** The number of successful get operations. */ + 'get.exists_total'?: string + /** The number of successful get operations. + * @alias 'get.exists_total' */ + geto?: string + /** The number of successful get operations. + * @alias 'get.exists_total' */ + getExistsTotal?: string + /** The time spent in failed get operations. */ + 'get.missing_time'?: string + /** The time spent in failed get operations. + * @alias 'get.missing_time' */ + gmti?: string + /** The time spent in failed get operations. + * @alias 'get.missing_time' */ + getMissingTime?: string + /** The number of failed get operations. */ + 'get.missing_total'?: string + /** The number of failed get operations. + * @alias 'get.missing_total' */ + gmto?: string + /** The number of failed get operations. + * @alias 'get.missing_total' */ + getMissingTotal?: string + /** The number of current deletion operations. */ + 'indexing.delete_current'?: string + /** The number of current deletion operations. + * @alias 'indexing.delete_current' */ + idc?: string + /** The number of current deletion operations. + * @alias 'indexing.delete_current' */ + indexingDeleteCurrent?: string + /** The time spent in deletion operations. */ + 'indexing.delete_time'?: string + /** The time spent in deletion operations. + * @alias 'indexing.delete_time' */ + idti?: string + /** The time spent in deletion operations. + * @alias 'indexing.delete_time' */ + indexingDeleteTime?: string + /** The number of delete operations. */ + 'indexing.delete_total'?: string + /** The number of delete operations. + * @alias 'indexing.delete_total' */ + idto?: string + /** The number of delete operations. + * @alias 'indexing.delete_total' */ + indexingDeleteTotal?: string + /** The number of current indexing operations. */ + 'indexing.index_current'?: string + /** The number of current indexing operations. + * @alias 'indexing.index_current' */ + iic?: string + /** The number of current indexing operations. + * @alias 'indexing.index_current' */ + indexingIndexCurrent?: string + /** The time spent in indexing operations. */ + 'indexing.index_time'?: string + /** The time spent in indexing operations. + * @alias 'indexing.index_time' */ + iiti?: string + /** The time spent in indexing operations. + * @alias 'indexing.index_time' */ + indexingIndexTime?: string + /** The number of indexing operations. */ + 'indexing.index_total'?: string + /** The number of indexing operations. + * @alias 'indexing.index_total' */ + iito?: string + /** The number of indexing operations. + * @alias 'indexing.index_total' */ + indexingIndexTotal?: string + /** The number of failed indexing operations. */ + 'indexing.index_failed'?: string + /** The number of failed indexing operations. + * @alias 'indexing.index_failed' */ + iif?: string + /** The number of failed indexing operations. + * @alias 'indexing.index_failed' */ + indexingIndexFailed?: string + /** The number of current merge operations. */ + 'merges.current'?: string + /** The number of current merge operations. + * @alias 'merges.current' */ + mc?: string + /** The number of current merge operations. + * @alias 'merges.current' */ + mergesCurrent?: string + /** The number of current merging documents. */ + 'merges.current_docs'?: string + /** The number of current merging documents. + * @alias 'merges.current_docs' */ + mcd?: string + /** The number of current merging documents. + * @alias 'merges.current_docs' */ + mergesCurrentDocs?: string + /** The size of current merge operations. */ + 'merges.current_size'?: string + /** The size of current merge operations. + * @alias 'merges.current_size' */ + mcs?: string + /** The size of current merge operations. + * @alias 'merges.current_size' */ + mergesCurrentSize?: string + /** The number of completed merge operations. */ + 'merges.total'?: string + /** The number of completed merge operations. + * @alias 'merges.total' */ + mt?: string + /** The number of completed merge operations. + * @alias 'merges.total' */ + mergesTotal?: string + /** The nuber of merged documents. */ + 'merges.total_docs'?: string + /** The nuber of merged documents. + * @alias 'merges.total_docs' */ + mtd?: string + /** The nuber of merged documents. + * @alias 'merges.total_docs' */ + mergesTotalDocs?: string + /** The size of current merges. */ + 'merges.total_size'?: string + /** The size of current merges. + * @alias 'merges.total_size' */ + mts?: string + /** The size of current merges. + * @alias 'merges.total_size' */ + mergesTotalSize?: string + /** The time spent merging documents. */ + 'merges.total_time'?: string + /** The time spent merging documents. + * @alias 'merges.total_time' */ + mtt?: string + /** The time spent merging documents. + * @alias 'merges.total_time' */ + mergesTotalTime?: string + /** The total number of refreshes. */ + 'refresh.total'?: string + /** The time spent in refreshes. */ + 'refresh.time'?: string + /** The total nunber of external refreshes. */ + 'refresh.external_total'?: string + /** The total nunber of external refreshes. + * @alias 'refresh.external_total' */ + rto?: string + /** The total nunber of external refreshes. + * @alias 'refresh.external_total' */ + refreshTotal?: string + /** The time spent in external refreshes. */ + 'refresh.external_time'?: string + /** The time spent in external refreshes. + * @alias 'refresh.external_time' */ + rti?: string + /** The time spent in external refreshes. + * @alias 'refresh.external_time' */ + refreshTime?: string + /** The number of pending refresh listeners. */ + 'refresh.listeners'?: string + /** The number of pending refresh listeners. + * @alias 'refresh.listeners' */ + rli?: string + /** The number of pending refresh listeners. + * @alias 'refresh.listeners' */ + refreshListeners?: string + /** The current fetch phase operations. */ + 'search.fetch_current'?: string + /** The current fetch phase operations. + * @alias 'search.fetch_current' */ + sfc?: string + /** The current fetch phase operations. + * @alias 'search.fetch_current' */ + searchFetchCurrent?: string + /** The time spent in fetch phase. */ + 'search.fetch_time'?: string + /** The time spent in fetch phase. + * @alias 'search.fetch_time' */ + sfti?: string + /** The time spent in fetch phase. + * @alias 'search.fetch_time' */ + searchFetchTime?: string + /** The total number of fetch operations. */ + 'search.fetch_total'?: string + /** The total number of fetch operations. + * @alias 'search.fetch_total' */ + sfto?: string + /** The total number of fetch operations. + * @alias 'search.fetch_total' */ + searchFetchTotal?: string + /** The number of open search contexts. */ + 'search.open_contexts'?: string + /** The number of open search contexts. + * @alias 'search.open_contexts' */ + so?: string + /** The number of open search contexts. + * @alias 'search.open_contexts' */ + searchOpenContexts?: string + /** The current query phase operations. */ + 'search.query_current'?: string + /** The current query phase operations. + * @alias 'search.query_current' */ + sqc?: string + /** The current query phase operations. + * @alias 'search.query_current' */ + searchQueryCurrent?: string + /** The time spent in query phase. */ + 'search.query_time'?: string + /** The time spent in query phase. + * @alias 'search.query_time' */ + sqti?: string + /** The time spent in query phase. + * @alias 'search.query_time' */ + searchQueryTime?: string + /** The total number of query phase operations. */ + 'search.query_total'?: string + /** The total number of query phase operations. + * @alias 'search.query_total' */ + sqto?: string + /** The total number of query phase operations. + * @alias 'search.query_total' */ + searchQueryTotal?: string + /** The open scroll contexts. */ + 'search.scroll_current'?: string + /** The open scroll contexts. + * @alias 'search.scroll_current' */ + scc?: string + /** The open scroll contexts. + * @alias 'search.scroll_current' */ + searchScrollCurrent?: string + /** The time scroll contexts were held open. */ + 'search.scroll_time'?: string + /** The time scroll contexts were held open. + * @alias 'search.scroll_time' */ + scti?: string + /** The time scroll contexts were held open. + * @alias 'search.scroll_time' */ + searchScrollTime?: string + /** The number of completed scroll contexts. */ + 'search.scroll_total'?: string + /** The number of completed scroll contexts. + * @alias 'search.scroll_total' */ + scto?: string + /** The number of completed scroll contexts. + * @alias 'search.scroll_total' */ + searchScrollTotal?: string + /** The number of segments. */ + 'segments.count'?: string + /** The number of segments. + * @alias 'segments.count' */ + sc?: string + /** The number of segments. + * @alias 'segments.count' */ + segmentsCount?: string + /** The memory used by segments. */ + 'segments.memory'?: string + /** The memory used by segments. + * @alias 'segments.memory' */ + sm?: string + /** The memory used by segments. + * @alias 'segments.memory' */ + segmentsMemory?: string + /** The memory used by the index writer. */ + 'segments.index_writer_memory'?: string + /** The memory used by the index writer. + * @alias 'segments.index_writer_memory' */ + siwm?: string + /** The memory used by the index writer. + * @alias 'segments.index_writer_memory' */ + segmentsIndexWriterMemory?: string + /** The memory used by the version map. */ + 'segments.version_map_memory'?: string + /** The memory used by the version map. + * @alias 'segments.version_map_memory' */ + svmm?: string + /** The memory used by the version map. + * @alias 'segments.version_map_memory' */ + segmentsVersionMapMemory?: string + /** The memory used by fixed bit sets for nested object field types and export type filters for types referred in `_parent` fields. */ + 'segments.fixed_bitset_memory'?: string + /** The memory used by fixed bit sets for nested object field types and export type filters for types referred in `_parent` fields. + * @alias 'segments.fixed_bitset_memory' */ + sfbm?: string + /** The memory used by fixed bit sets for nested object field types and export type filters for types referred in `_parent` fields. + * @alias 'segments.fixed_bitset_memory' */ + fixedBitsetMemory?: string + /** The maximum sequence number. */ + 'seq_no.max'?: string + /** The maximum sequence number. + * @alias 'seq_no.max' */ + sqm?: string + /** The maximum sequence number. + * @alias 'seq_no.max' */ + maxSeqNo?: string + /** The local checkpoint. */ + 'seq_no.local_checkpoint'?: string + /** The local checkpoint. + * @alias 'seq_no.local_checkpoint' */ + sql?: string + /** The local checkpoint. + * @alias 'seq_no.local_checkpoint' */ + localCheckpoint?: string + /** The global checkpoint. */ + 'seq_no.global_checkpoint'?: string + /** The global checkpoint. + * @alias 'seq_no.global_checkpoint' */ + sqg?: string + /** The global checkpoint. + * @alias 'seq_no.global_checkpoint' */ + globalCheckpoint?: string + /** The number of current warmer operations. */ + 'warmer.current'?: string + /** The number of current warmer operations. + * @alias 'warmer.current' */ + wc?: string + /** The number of current warmer operations. + * @alias 'warmer.current' */ + warmerCurrent?: string + /** The total number of warmer operations. */ + 'warmer.total'?: string + /** The total number of warmer operations. + * @alias 'warmer.total' */ + wto?: string + /** The total number of warmer operations. + * @alias 'warmer.total' */ + warmerTotal?: string + /** The time spent in warmer operations. */ + 'warmer.total_time'?: string + /** The time spent in warmer operations. + * @alias 'warmer.total_time' */ + wtt?: string + /** The time spent in warmer operations. + * @alias 'warmer.total_time' */ + warmerTotalTime?: string + /** The shard data path. */ + 'path.data'?: string + /** The shard data path. + * @alias 'path.data' */ + pd?: string + /** The shard data path. + * @alias 'path.data' */ + dataPath?: string + /** The shard state path. */ + 'path.state'?: string + /** The shard state path. + * @alias 'path.state' */ + ps?: string + /** The shard state path. + * @alias 'path.state' */ + statsPath?: string + /** The number of bulk shard operations. */ + 'bulk.total_operations'?: string + /** The number of bulk shard operations. + * @alias 'bulk.total_operations' */ + bto?: string + /** The number of bulk shard operations. + * @alias 'bulk.total_operations' */ + bulkTotalOperations?: string + /** The time spent in shard bulk operations. */ + 'bulk.total_time'?: string + /** The time spent in shard bulk operations. + * @alias 'bulk.total_time' */ + btti?: string + /** The time spent in shard bulk operations. + * @alias 'bulk.total_time' */ + bulkTotalTime?: string + /** The total size in bytes of shard bulk operations. */ + 'bulk.total_size_in_bytes'?: string + /** The total size in bytes of shard bulk operations. + * @alias 'bulk.total_size_in_bytes' */ + btsi?: string + /** The total size in bytes of shard bulk operations. + * @alias 'bulk.total_size_in_bytes' */ + bulkTotalSizeInBytes?: string + /** The average time spent in shard bulk operations. */ + 'bulk.avg_time'?: string + /** The average time spent in shard bulk operations. + * @alias 'bulk.avg_time' */ + bati?: string + /** The average time spent in shard bulk operations. + * @alias 'bulk.avg_time' */ + bulkAvgTime?: string + /** The average size in bytes of shard bulk operations. */ + 'bulk.avg_size_in_bytes'?: string + /** The average size in bytes of shard bulk operations. + * @alias 'bulk.avg_size_in_bytes' */ + basi?: string + /** The average size in bytes of shard bulk operations. + * @alias 'bulk.avg_size_in_bytes' */ + bulkAvgSizeInBytes?: string +} + +export interface CatSnapshotsRequest extends CatCatRequestBase { + /** A comma-separated list of snapshot repositories used to limit the request. + * Accepts wildcard expressions. + * `_all` returns all repositories. + * If any repository fails during the request, Elasticsearch returns an error. */ + repository?: Names + /** If `true`, the response does not include information from unavailable snapshots. */ + ignore_unavailable?: boolean + /** A comma-separated list of columns names to display. + * It supports simple wildcards. */ + h?: CatCatSnapshotsColumns + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ + s?: Names + /** Period to wait for a connection to the master node. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { repository?: never, ignore_unavailable?: never, h?: never, s?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { repository?: never, ignore_unavailable?: never, h?: never, s?: never, master_timeout?: never } +} + +export type CatSnapshotsResponse = CatSnapshotsSnapshotsRecord[] + +export interface CatSnapshotsSnapshotsRecord { + /** The unique identifier for the snapshot. */ + id?: string + /** The unique identifier for the snapshot. + * @alias id */ + snapshot?: string + /** The repository name. */ + repository?: string + /** The repository name. + * @alias repository */ + re?: string + /** The repository name. + * @alias repository */ + repo?: string + /** The state of the snapshot process. + * Returned values include: + * `FAILED`: The snapshot process failed. + * `INCOMPATIBLE`: The snapshot process is incompatible with the current cluster version. + * `IN_PROGRESS`: The snapshot process started but has not completed. + * `PARTIAL`: The snapshot process completed with a partial success. + * `SUCCESS`: The snapshot process completed with a full success. */ + status?: string + /** The state of the snapshot process. + * Returned values include: + * `FAILED`: The snapshot process failed. + * `INCOMPATIBLE`: The snapshot process is incompatible with the current cluster version. + * `IN_PROGRESS`: The snapshot process started but has not completed. + * `PARTIAL`: The snapshot process completed with a partial success. + * `SUCCESS`: The snapshot process completed with a full success. + * @alias status */ + s?: string + /** The Unix epoch time (seconds since 1970-01-01 00:00:00) at which the snapshot process started. */ + start_epoch?: SpecUtilsStringified> + /** The Unix epoch time (seconds since 1970-01-01 00:00:00) at which the snapshot process started. + * @alias start_epoch */ + ste?: SpecUtilsStringified> + /** The Unix epoch time (seconds since 1970-01-01 00:00:00) at which the snapshot process started. + * @alias start_epoch */ + startEpoch?: SpecUtilsStringified> + /** The time (HH:MM:SS) at which the snapshot process started. */ + start_time?: WatcherScheduleTimeOfDay + /** The time (HH:MM:SS) at which the snapshot process started. + * @alias start_time */ + sti?: WatcherScheduleTimeOfDay + /** The time (HH:MM:SS) at which the snapshot process started. + * @alias start_time */ + startTime?: WatcherScheduleTimeOfDay + /** The Unix epoch time (seconds since 1970-01-01 00:00:00) at which the snapshot process ended. */ + end_epoch?: SpecUtilsStringified> + /** The Unix epoch time (seconds since 1970-01-01 00:00:00) at which the snapshot process ended. + * @alias end_epoch */ + ete?: SpecUtilsStringified> + /** The Unix epoch time (seconds since 1970-01-01 00:00:00) at which the snapshot process ended. + * @alias end_epoch */ + endEpoch?: SpecUtilsStringified> + /** The time (HH:MM:SS) at which the snapshot process ended. */ + end_time?: TimeOfDay + /** The time (HH:MM:SS) at which the snapshot process ended. + * @alias end_time */ + eti?: TimeOfDay + /** The time (HH:MM:SS) at which the snapshot process ended. + * @alias end_time */ + endTime?: TimeOfDay + /** The time it took the snapshot process to complete, in time units. */ + duration?: Duration + /** The time it took the snapshot process to complete, in time units. + * @alias duration */ + dur?: Duration + /** The number of indices in the snapshot. */ + indices?: string + /** The number of indices in the snapshot. + * @alias indices */ + i?: string + /** The number of successful shards in the snapshot. */ + successful_shards?: string + /** The number of successful shards in the snapshot. + * @alias successful_shards */ + ss?: string + /** The number of failed shards in the snapshot. */ + failed_shards?: string + /** The number of failed shards in the snapshot. + * @alias failed_shards */ + fs?: string + /** The total number of shards in the snapshot. */ + total_shards?: string + /** The total number of shards in the snapshot. + * @alias total_shards */ + ts?: string + /** The reason for any snapshot failures. */ + reason?: string + /** The reason for any snapshot failures. + * @alias reason */ + r?: string +} + +export interface CatTasksRequest extends CatCatRequestBase { + /** The task action names, which are used to limit the response. */ + actions?: string[] + /** If `true`, the response includes detailed information about shard recoveries. */ + detailed?: boolean + /** Unique node identifiers, which are used to limit the response. */ + nodes?: string[] + /** The parent task identifier, which is used to limit the response. */ + parent_task_id?: string + /** A comma-separated list of columns names to display. It supports simple wildcards. */ + h?: CatCatTasksColumns + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ + s?: Names + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** If `true`, the request blocks until the task has completed. */ + wait_for_completion?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { actions?: never, detailed?: never, nodes?: never, parent_task_id?: never, h?: never, s?: never, timeout?: never, wait_for_completion?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { actions?: never, detailed?: never, nodes?: never, parent_task_id?: never, h?: never, s?: never, timeout?: never, wait_for_completion?: never } +} + +export type CatTasksResponse = CatTasksTasksRecord[] + +export interface CatTasksTasksRecord { + /** The identifier of the task with the node. */ + id?: Id + /** The task action. */ + action?: string + /** The task action. + * @alias action */ + ac?: string + /** The unique task identifier. */ + task_id?: Id + /** The unique task identifier. + * @alias task_id */ + ti?: Id + /** The parent task identifier. */ + parent_task_id?: string + /** The parent task identifier. + * @alias parent_task_id */ + pti?: string + /** The task type. */ + type?: string + /** The task type. + * @alias type */ + ty?: string + /** The start time in milliseconds. */ + start_time?: string + /** The start time in milliseconds. + * @alias start_time */ + start?: string + /** The start time in `HH:MM:SS` format. */ + timestamp?: string + /** The start time in `HH:MM:SS` format. + * @alias timestamp */ + ts?: string + /** The start time in `HH:MM:SS` format. + * @alias timestamp */ + hms?: string + /** The start time in `HH:MM:SS` format. + * @alias timestamp */ + hhmmss?: string + /** The running time in nanoseconds. */ + running_time_ns?: string + /** The running time. */ + running_time?: string + /** The running time. + * @alias running_time */ + time?: string + /** The unique node identifier. */ + node_id?: NodeId + /** The unique node identifier. + * @alias node_id */ + ni?: NodeId + /** The IP address for the node. */ + ip?: string + /** The IP address for the node. + * @alias ip */ + i?: string + /** The bound transport port for the node. */ + port?: string + /** The bound transport port for the node. + * @alias port */ + po?: string + /** The node name. */ + node?: string + /** The node name. + * @alias node */ + n?: string + /** The Elasticsearch version. */ + version?: VersionString + /** The Elasticsearch version. + * @alias version */ + v?: VersionString + /** The X-Opaque-ID header. */ + x_opaque_id?: string + /** The X-Opaque-ID header. + * @alias x_opaque_id */ + x?: string + /** The task action description. */ + description?: string + /** The task action description. + * @alias description */ + desc?: string +} + +export interface CatTemplatesRequest extends CatCatRequestBase { + /** The name of the template to return. + * Accepts wildcard expressions. If omitted, all templates are returned. */ + name?: Name + /** A comma-separated list of columns names to display. It supports simple wildcards. */ + h?: CatCatTemplatesColumns + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ + s?: Names + /** If `true`, the request computes the list of selected nodes from the + * local cluster state. If `false` the list of selected nodes are computed + * from the cluster state of the master node. In both cases the coordinating + * node will send requests for further information to each selected node. */ + local?: boolean + /** Period to wait for a connection to the master node. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, h?: never, s?: never, local?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, h?: never, s?: never, local?: never, master_timeout?: never } +} + +export type CatTemplatesResponse = CatTemplatesTemplatesRecord[] + +export interface CatTemplatesTemplatesRecord { + /** The template name. */ + name?: Name + /** The template name. + * @alias name */ + n?: Name + /** The template index patterns. */ + index_patterns?: string + /** The template index patterns. + * @alias index_patterns */ + t?: string + /** The template application order or priority number. */ + order?: string + /** The template application order or priority number. + * @alias order */ + o?: string + /** The template application order or priority number. + * @alias order */ + p?: string + /** The template version. */ + version?: VersionString | null + /** The template version. + * @alias version */ + v?: VersionString | null + /** The component templates that comprise the index template. */ + composed_of?: string + /** The component templates that comprise the index template. + * @alias composed_of */ + c?: string +} + +export interface CatThreadPoolRequest extends CatCatRequestBase { + /** A comma-separated list of thread pool names used to limit the request. + * Accepts wildcard expressions. */ + thread_pool_patterns?: Names + /** List of columns to appear in the response. Supports simple wildcards. */ + h?: CatCatThreadPoolColumns + /** A comma-separated list of column names or aliases that determines the sort order. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ + s?: Names + /** If `true`, the request computes the list of selected nodes from the + * local cluster state. If `false` the list of selected nodes are computed + * from the cluster state of the master node. In both cases the coordinating + * node will send requests for further information to each selected node. */ + local?: boolean + /** The period to wait for a connection to the master node. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { thread_pool_patterns?: never, h?: never, s?: never, local?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { thread_pool_patterns?: never, h?: never, s?: never, local?: never, master_timeout?: never } +} + +export type CatThreadPoolResponse = CatThreadPoolThreadPoolRecord[] + +export interface CatThreadPoolThreadPoolRecord { + /** The node name. */ + node_name?: string + /** The node name. + * @alias node_name */ + nn?: string + /** The persistent node identifier. */ + node_id?: NodeId + /** The persistent node identifier. + * @alias node_id */ + id?: NodeId + /** The ephemeral node identifier. */ + ephemeral_node_id?: string + /** The ephemeral node identifier. + * @alias ephemeral_node_id */ + eid?: string + /** The process identifier. */ + pid?: string + /** The process identifier. + * @alias pid */ + p?: string + /** The host name for the current node. */ + host?: string + /** The host name for the current node. + * @alias host */ + h?: string + /** The IP address for the current node. */ + ip?: string + /** The IP address for the current node. + * @alias ip */ + i?: string + /** The bound transport port for the current node. */ + port?: string + /** The bound transport port for the current node. + * @alias port */ + po?: string + /** The thread pool name. */ + name?: string + /** The thread pool name. + * @alias name */ + n?: string + /** The thread pool type. + * Returned values include `fixed`, `fixed_auto_queue_size`, `direct`, and `scaling`. */ + type?: string + /** The thread pool type. + * Returned values include `fixed`, `fixed_auto_queue_size`, `direct`, and `scaling`. + * @alias type */ + t?: string + /** The number of active threads in the current thread pool. */ + active?: string + /** The number of active threads in the current thread pool. + * @alias active */ + a?: string + /** The number of threads in the current thread pool. */ + pool_size?: string + /** The number of threads in the current thread pool. + * @alias pool_size */ + psz?: string + /** The number of tasks currently in queue. */ + queue?: string + /** The number of tasks currently in queue. + * @alias queue */ + q?: string + /** The maximum number of tasks permitted in the queue. */ + queue_size?: string + /** The maximum number of tasks permitted in the queue. + * @alias queue_size */ + qs?: string + /** The number of rejected tasks. */ + rejected?: string + /** The number of rejected tasks. + * @alias rejected */ + r?: string + /** The highest number of active threads in the current thread pool. */ + largest?: string + /** The highest number of active threads in the current thread pool. + * @alias largest */ + l?: string + /** The number of completed tasks. */ + completed?: string + /** The number of completed tasks. + * @alias completed */ + c?: string + /** The core number of active threads allowed in a scaling thread pool. */ + core?: string | null + /** The core number of active threads allowed in a scaling thread pool. + * @alias core */ + cr?: string | null + /** The maximum number of active threads allowed in a scaling thread pool. */ + max?: string | null + /** The maximum number of active threads allowed in a scaling thread pool. + * @alias max */ + mx?: string | null + /** The number of active threads allowed in a fixed thread pool. */ + size?: string | null + /** The number of active threads allowed in a fixed thread pool. + * @alias size */ + sz?: string | null + /** The thread keep alive time. */ + keep_alive?: string | null + /** The thread keep alive time. + * @alias keep_alive */ + ka?: string | null +} + +export interface CatTransformsRequest extends CatCatRequestBase { + /** A transform identifier or a wildcard expression. + * If you do not specify one of these options, the API returns information for all transforms. */ + transform_id?: Id + /** Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. + * If `true`, it returns an empty transforms array when there are no matches and the subset of results when there are partial matches. + * If `false`, the request returns a 404 status code when there are no matches or only partial matches. */ + allow_no_match?: boolean + /** Skips the specified number of transforms. */ + from?: integer + /** Comma-separated list of column names to display. */ + h?: CatCatTransformColumns + /** Comma-separated list of column names or column aliases used to sort the response. */ + s?: CatCatTransformColumns + /** The maximum number of transforms to obtain. */ + size?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { transform_id?: never, allow_no_match?: never, from?: never, h?: never, s?: never, size?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { transform_id?: never, allow_no_match?: never, from?: never, h?: never, s?: never, size?: never } +} + +export type CatTransformsResponse = CatTransformsTransformsRecord[] + +export interface CatTransformsTransformsRecord { + /** The transform identifier. */ + id?: Id + /** The status of the transform. + * Returned values include: + * `aborting`: The transform is aborting. + * `failed: The transform failed. For more information about the failure, check the `reason` field. + * `indexing`: The transform is actively processing data and creating new documents. + * `started`: The transform is running but not actively indexing data. + * `stopped`: The transform is stopped. + * `stopping`: The transform is stopping. */ + state?: string + /** The status of the transform. + * Returned values include: + * `aborting`: The transform is aborting. + * `failed: The transform failed. For more information about the failure, check the `reason` field. + * `indexing`: The transform is actively processing data and creating new documents. + * `started`: The transform is running but not actively indexing data. + * `stopped`: The transform is stopped. + * `stopping`: The transform is stopping. + * @alias state */ + s?: string + /** The sequence number for the checkpoint. */ + checkpoint?: string + /** The sequence number for the checkpoint. + * @alias checkpoint */ + c?: string + /** The number of documents that have been processed from the source index of the transform. */ + documents_processed?: string + /** The number of documents that have been processed from the source index of the transform. + * @alias documents_processed */ + docp?: string + /** The number of documents that have been processed from the source index of the transform. + * @alias documents_processed */ + documentsProcessed?: string + /** The progress of the next checkpoint that is currently in progress. */ + checkpoint_progress?: string | null + /** The progress of the next checkpoint that is currently in progress. + * @alias checkpoint_progress */ + cp?: string | null + /** The progress of the next checkpoint that is currently in progress. + * @alias checkpoint_progress */ + checkpointProgress?: string | null + /** The timestamp of the last search in the source indices. + * This field is shown only if the transform is running. */ + last_search_time?: string | null + /** The timestamp of the last search in the source indices. + * This field is shown only if the transform is running. + * @alias last_search_time */ + lst?: string | null + /** The timestamp of the last search in the source indices. + * This field is shown only if the transform is running. + * @alias last_search_time */ + lastSearchTime?: string | null + /** The timestamp when changes were last detected in the source indices. */ + changes_last_detection_time?: string | null + /** The timestamp when changes were last detected in the source indices. + * @alias changes_last_detection_time */ + cldt?: string | null + /** The time the transform was created. */ + create_time?: string + /** The time the transform was created. + * @alias create_time */ + ct?: string + /** The time the transform was created. + * @alias create_time */ + createTime?: string + /** The version of Elasticsearch that existed on the node when the transform was created. */ + version?: VersionString + /** The version of Elasticsearch that existed on the node when the transform was created. + * @alias version */ + v?: VersionString + /** The source indices for the transform. */ + source_index?: string + /** The source indices for the transform. + * @alias source_index */ + si?: string + /** The source indices for the transform. + * @alias source_index */ + sourceIndex?: string + /** The destination index for the transform. */ + dest_index?: string + /** The destination index for the transform. + * @alias dest_index */ + di?: string + /** The destination index for the transform. + * @alias dest_index */ + destIndex?: string + /** The unique identifier for the ingest pipeline. */ + pipeline?: string + /** The unique identifier for the ingest pipeline. + * @alias pipeline */ + p?: string + /** The description of the transform. */ + description?: string + /** The description of the transform. + * @alias description */ + d?: string + /** The type of transform: `batch` or `continuous`. */ + transform_type?: string + /** The type of transform: `batch` or `continuous`. + * @alias transform_type */ + tt?: string + /** The interval between checks for changes in the source indices when the transform is running continuously. */ + frequency?: string + /** The interval between checks for changes in the source indices when the transform is running continuously. + * @alias frequency */ + f?: string + /** The initial page size that is used for the composite aggregation for each checkpoint. */ + max_page_search_size?: string + /** The initial page size that is used for the composite aggregation for each checkpoint. + * @alias max_page_search_size */ + mpsz?: string + /** The number of input documents per second. */ + docs_per_second?: string + /** The number of input documents per second. + * @alias docs_per_second */ + dps?: string + /** If a transform has a `failed` state, these details describe the reason for failure. */ + reason?: string + /** If a transform has a `failed` state, these details describe the reason for failure. + * @alias reason */ + r?: string + /** The total number of search operations on the source index for the transform. */ + search_total?: string + /** The total number of search operations on the source index for the transform. + * @alias search_total */ + st?: string + /** The total number of search failures. */ + search_failure?: string + /** The total number of search failures. + * @alias search_failure */ + sf?: string + /** The total amount of search time, in milliseconds. */ + search_time?: string + /** The total amount of search time, in milliseconds. + * @alias search_time */ + stime?: string + /** The total number of index operations done by the transform. */ + index_total?: string + /** The total number of index operations done by the transform. + * @alias index_total */ + it?: string + /** The total number of indexing failures. */ + index_failure?: string + /** The total number of indexing failures. + * @alias index_failure */ + if?: string + /** The total time spent indexing documents, in milliseconds. */ + index_time?: string + /** The total time spent indexing documents, in milliseconds. + * @alias index_time */ + itime?: string + /** The number of documents that have been indexed into the destination index for the transform. */ + documents_indexed?: string + /** The number of documents that have been indexed into the destination index for the transform. + * @alias documents_indexed */ + doci?: string + /** The total time spent deleting documents, in milliseconds. */ + delete_time?: string + /** The total time spent deleting documents, in milliseconds. + * @alias delete_time */ + dtime?: string + /** The number of documents deleted from the destination index due to the retention policy for the transform. */ + documents_deleted?: string + /** The number of documents deleted from the destination index due to the retention policy for the transform. + * @alias documents_deleted */ + docd?: string + /** The number of times the transform has been triggered by the scheduler. + * For example, the scheduler triggers the transform indexer to check for updates or ingest new data at an interval specified in the `frequency` property. */ + trigger_count?: string + /** The number of times the transform has been triggered by the scheduler. + * For example, the scheduler triggers the transform indexer to check for updates or ingest new data at an interval specified in the `frequency` property. + * @alias trigger_count */ + tc?: string + /** The number of search or bulk index operations processed. + * Documents are processed in batches instead of individually. */ + pages_processed?: string + /** The number of search or bulk index operations processed. + * Documents are processed in batches instead of individually. + * @alias pages_processed */ + pp?: string + /** The total time spent processing results, in milliseconds. */ + processing_time?: string + /** The total time spent processing results, in milliseconds. + * @alias processing_time */ + pt?: string + /** The exponential moving average of the duration of the checkpoint, in milliseconds. */ + checkpoint_duration_time_exp_avg?: string + /** The exponential moving average of the duration of the checkpoint, in milliseconds. + * @alias checkpoint_duration_time_exp_avg */ + cdtea?: string + /** The exponential moving average of the duration of the checkpoint, in milliseconds. + * @alias checkpoint_duration_time_exp_avg */ + checkpointTimeExpAvg?: string + /** The exponential moving average of the number of new documents that have been indexed. */ + indexed_documents_exp_avg?: string + /** The exponential moving average of the number of new documents that have been indexed. + * @alias indexed_documents_exp_avg */ + idea?: string + /** The exponential moving average of the number of documents that have been processed. */ + processed_documents_exp_avg?: string + /** The exponential moving average of the number of documents that have been processed. + * @alias processed_documents_exp_avg */ + pdea?: string +} + +export interface CcrFollowIndexStats { + /** The name of the follower index. */ + index: IndexName + /** An array of shard-level following task statistics. */ + shards: CcrShardStats[] +} + +export interface CcrReadException { + /** The exception that caused the read to fail. */ + exception: ErrorCause + /** The starting sequence number of the batch requested from the leader. */ + from_seq_no: SequenceNumber + /** The number of times the batch has been retried. */ + retries: integer +} + +export interface CcrShardStats { + /** The total of transferred bytes read from the leader. + * This is only an estimate and does not account for compression if enabled. */ + bytes_read: long + /** The number of failed reads. */ + failed_read_requests: long + /** The number of failed bulk write requests on the follower. */ + failed_write_requests: long + fatal_exception?: ErrorCause + /** The index aliases version the follower is synced up to. */ + follower_aliases_version: VersionNumber + /** The current global checkpoint on the follower. + * The difference between the `leader_global_checkpoint` and the `follower_global_checkpoint` is an indication of how much the follower is lagging the leader. */ + follower_global_checkpoint: long + /** The name of the follower index. */ + follower_index: string + /** The mapping version the follower is synced up to. */ + follower_mapping_version: VersionNumber + /** The current maximum sequence number on the follower. */ + follower_max_seq_no: SequenceNumber + /** The index settings version the follower is synced up to. */ + follower_settings_version: VersionNumber + /** The starting sequence number of the last batch of operations requested from the leader. */ + last_requested_seq_no: SequenceNumber + /** The current global checkpoint on the leader known to the follower task. */ + leader_global_checkpoint: long + /** The name of the index in the leader cluster being followed. */ + leader_index: string + /** The current maximum sequence number on the leader known to the follower task. */ + leader_max_seq_no: SequenceNumber + /** The total number of operations read from the leader. */ + operations_read: long + /** The number of operations written on the follower. */ + operations_written: long + /** The number of active read requests from the follower. */ + outstanding_read_requests: integer + /** The number of active bulk write requests on the follower. */ + outstanding_write_requests: integer + /** An array of objects representing failed reads. */ + read_exceptions: CcrReadException[] + /** The remote cluster containing the leader index. */ + remote_cluster: string + /** The numerical shard ID, with values from 0 to one less than the number of replicas. */ + shard_id: integer + /** The number of successful fetches. */ + successful_read_requests: long + /** The number of bulk write requests run on the follower. */ + successful_write_requests: long + time_since_last_read?: Duration + /** The number of milliseconds since a read request was sent to the leader. + * When the follower is caught up to the leader, this number will increase up to the configured `read_poll_timeout` at which point another read request will be sent to the leader. */ + time_since_last_read_millis: DurationValue + total_read_remote_exec_time?: Duration + /** The total time reads spent running on the remote cluster. */ + total_read_remote_exec_time_millis: DurationValue + total_read_time?: Duration + /** The total time reads were outstanding, measured from the time a read was sent to the leader to the time a reply was returned to the follower. */ + total_read_time_millis: DurationValue + total_write_time?: Duration + /** The total time spent writing on the follower. */ + total_write_time_millis: DurationValue + /** The number of write operations queued on the follower. */ + write_buffer_operation_count: long + /** The total number of bytes of operations currently queued for writing. */ + write_buffer_size_in_bytes: ByteSize +} + +export interface CcrDeleteAutoFollowPatternRequest extends RequestBase { + /** The auto-follow pattern collection to delete. */ + name: Name + /** The period to wait for a connection to the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never } +} + +export type CcrDeleteAutoFollowPatternResponse = AcknowledgedResponseBase + +export interface CcrFollowRequest extends RequestBase { + /** The name of the follower index. */ + index: IndexName + /** Period to wait for a connection to the master node. */ + master_timeout?: Duration + /** Specifies the number of shards to wait on being active before responding. This defaults to waiting on none of the shards to be + * active. + * A shard must be restored from the leader index before being active. Restoring a follower shard requires transferring all the + * remote Lucene segment files to the follower index. */ + wait_for_active_shards?: WaitForActiveShards + /** If the leader index is part of a data stream, the name to which the local data stream for the followed index should be renamed. */ + data_stream_name?: string + /** The name of the index in the leader cluster to follow. */ + leader_index: IndexName + /** The maximum number of outstanding reads requests from the remote cluster. */ + max_outstanding_read_requests?: long + /** The maximum number of outstanding write requests on the follower. */ + max_outstanding_write_requests?: integer + /** The maximum number of operations to pull per read from the remote cluster. */ + max_read_request_operation_count?: integer + /** The maximum size in bytes of per read of a batch of operations pulled from the remote cluster. */ + max_read_request_size?: ByteSize + /** The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when + * retrying. */ + max_retry_delay?: Duration + /** The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be + * deferred until the number of queued operations goes below the limit. */ + max_write_buffer_count?: integer + /** The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will + * be deferred until the total bytes of queued operations goes below the limit. */ + max_write_buffer_size?: ByteSize + /** The maximum number of operations per bulk write request executed on the follower. */ + max_write_request_operation_count?: integer + /** The maximum total bytes of operations per bulk write request executed on the follower. */ + max_write_request_size?: ByteSize + /** The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. + * When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. + * Then the follower will immediately attempt to read from the leader again. */ + read_poll_timeout?: Duration + /** The remote cluster containing the leader index. */ + remote_cluster: string + /** Settings to override from the leader index. */ + settings?: IndicesIndexSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, master_timeout?: never, wait_for_active_shards?: never, data_stream_name?: never, leader_index?: never, max_outstanding_read_requests?: never, max_outstanding_write_requests?: never, max_read_request_operation_count?: never, max_read_request_size?: never, max_retry_delay?: never, max_write_buffer_count?: never, max_write_buffer_size?: never, max_write_request_operation_count?: never, max_write_request_size?: never, read_poll_timeout?: never, remote_cluster?: never, settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, master_timeout?: never, wait_for_active_shards?: never, data_stream_name?: never, leader_index?: never, max_outstanding_read_requests?: never, max_outstanding_write_requests?: never, max_read_request_operation_count?: never, max_read_request_size?: never, max_retry_delay?: never, max_write_buffer_count?: never, max_write_buffer_size?: never, max_write_request_operation_count?: never, max_write_request_size?: never, read_poll_timeout?: never, remote_cluster?: never, settings?: never } +} + +export interface CcrFollowResponse { + follow_index_created: boolean + follow_index_shards_acked: boolean + index_following_started: boolean +} + +export interface CcrFollowInfoFollowerIndex { + /** The name of the follower index. */ + follower_index: IndexName + /** The name of the index in the leader cluster that is followed. */ + leader_index: IndexName + /** An object that encapsulates cross-cluster replication parameters. If the follower index's status is paused, this object is omitted. */ + parameters?: CcrFollowInfoFollowerIndexParameters + /** The remote cluster that contains the leader index. */ + remote_cluster: Name + /** The status of the index following: `active` or `paused`. */ + status: CcrFollowInfoFollowerIndexStatus +} + +export interface CcrFollowInfoFollowerIndexParameters { + /** The maximum number of outstanding reads requests from the remote cluster. */ + max_outstanding_read_requests?: long + /** The maximum number of outstanding write requests on the follower. */ + max_outstanding_write_requests?: integer + /** The maximum number of operations to pull per read from the remote cluster. */ + max_read_request_operation_count?: integer + /** The maximum size in bytes of per read of a batch of operations pulled from the remote cluster. */ + max_read_request_size?: ByteSize + /** The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when + * retrying. */ + max_retry_delay?: Duration + /** The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be + * deferred until the number of queued operations goes below the limit. */ + max_write_buffer_count?: integer + /** The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will + * be deferred until the total bytes of queued operations goes below the limit. */ + max_write_buffer_size?: ByteSize + /** The maximum number of operations per bulk write request executed on the follower. */ + max_write_request_operation_count?: integer + /** The maximum total bytes of operations per bulk write request executed on the follower. */ + max_write_request_size?: ByteSize + /** The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. + * When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. + * Then the follower will immediately attempt to read from the leader again. */ + read_poll_timeout?: Duration +} + +export type CcrFollowInfoFollowerIndexStatus = 'active' | 'paused' + +export interface CcrFollowInfoRequest extends RequestBase { + /** A comma-delimited list of follower index patterns. */ + index: Indices + /** The period to wait for a connection to the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, master_timeout?: never } +} + +export interface CcrFollowInfoResponse { + follower_indices: CcrFollowInfoFollowerIndex[] +} + +export interface CcrFollowStatsRequest extends RequestBase { + /** A comma-delimited list of index patterns. */ + index: Indices + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, timeout?: never } +} + +export interface CcrFollowStatsResponse { + /** An array of follower index statistics. */ + indices: CcrFollowIndexStats[] +} + +export interface CcrForgetFollowerRequest extends RequestBase { + /** the name of the leader index for which specified follower retention leases should be removed */ + index: IndexName + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + follower_cluster?: string + follower_index?: IndexName + follower_index_uuid?: Uuid + leader_remote_cluster?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, timeout?: never, follower_cluster?: never, follower_index?: never, follower_index_uuid?: never, leader_remote_cluster?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, timeout?: never, follower_cluster?: never, follower_index?: never, follower_index_uuid?: never, leader_remote_cluster?: never } +} + +export interface CcrForgetFollowerResponse { + _shards: ShardStatistics +} + +export interface CcrGetAutoFollowPatternAutoFollowPattern { + name: Name + pattern: CcrGetAutoFollowPatternAutoFollowPatternSummary +} + +export interface CcrGetAutoFollowPatternAutoFollowPatternSummary { + active: boolean + /** The remote cluster containing the leader indices to match against. */ + remote_cluster: string + /** The name of follower index. */ + follow_index_pattern?: IndexPattern + /** An array of simple index patterns to match against indices in the remote cluster specified by the remote_cluster field. */ + leader_index_patterns: IndexPatterns + /** An array of simple index patterns that can be used to exclude indices from being auto-followed. */ + leader_index_exclusion_patterns: IndexPatterns + /** The maximum number of outstanding reads requests from the remote cluster. */ + max_outstanding_read_requests: integer +} + +export interface CcrGetAutoFollowPatternRequest extends RequestBase { + /** The auto-follow pattern collection that you want to retrieve. + * If you do not specify a name, the API returns information for all collections. */ + name?: Name + /** The period to wait for a connection to the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never } +} + +export interface CcrGetAutoFollowPatternResponse { + patterns: CcrGetAutoFollowPatternAutoFollowPattern[] +} + +export interface CcrPauseAutoFollowPatternRequest extends RequestBase { + /** The name of the auto-follow pattern to pause. */ + name: Name + /** The period to wait for a connection to the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never } +} + +export type CcrPauseAutoFollowPatternResponse = AcknowledgedResponseBase + +export interface CcrPauseFollowRequest extends RequestBase { + /** The name of the follower index. */ + index: IndexName + /** The period to wait for a connection to the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, master_timeout?: never } +} + +export type CcrPauseFollowResponse = AcknowledgedResponseBase + +export interface CcrPutAutoFollowPatternRequest extends RequestBase { + /** The name of the collection of auto-follow patterns. */ + name: Name + /** Period to wait for a connection to the master node. */ + master_timeout?: Duration + /** The remote cluster containing the leader indices to match against. */ + remote_cluster: string + /** The name of follower index. The template {{leader_index}} can be used to derive the name of the follower index from the name of the leader index. When following a data stream, use {{leader_index}}; CCR does not support changes to the names of a follower data stream’s backing indices. */ + follow_index_pattern?: IndexPattern + /** An array of simple index patterns to match against indices in the remote cluster specified by the remote_cluster field. */ + leader_index_patterns?: IndexPatterns + /** An array of simple index patterns that can be used to exclude indices from being auto-followed. Indices in the remote cluster whose names are matching one or more leader_index_patterns and one or more leader_index_exclusion_patterns won’t be followed. */ + leader_index_exclusion_patterns?: IndexPatterns + /** The maximum number of outstanding reads requests from the remote cluster. */ + max_outstanding_read_requests?: integer + /** Settings to override from the leader index. Note that certain settings can not be overrode (e.g., index.number_of_shards). */ + settings?: Record + /** The maximum number of outstanding reads requests from the remote cluster. */ + max_outstanding_write_requests?: integer + /** The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. Then the follower will immediately attempt to read from the leader again. */ + read_poll_timeout?: Duration + /** The maximum number of operations to pull per read from the remote cluster. */ + max_read_request_operation_count?: integer + /** The maximum size in bytes of per read of a batch of operations pulled from the remote cluster. */ + max_read_request_size?: ByteSize + /** The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when retrying. */ + max_retry_delay?: Duration + /** The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the number of queued operations goes below the limit. */ + max_write_buffer_count?: integer + /** The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the total bytes of queued operations goes below the limit. */ + max_write_buffer_size?: ByteSize + /** The maximum number of operations per bulk write request executed on the follower. */ + max_write_request_operation_count?: integer + /** The maximum total bytes of operations per bulk write request executed on the follower. */ + max_write_request_size?: ByteSize + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, remote_cluster?: never, follow_index_pattern?: never, leader_index_patterns?: never, leader_index_exclusion_patterns?: never, max_outstanding_read_requests?: never, settings?: never, max_outstanding_write_requests?: never, read_poll_timeout?: never, max_read_request_operation_count?: never, max_read_request_size?: never, max_retry_delay?: never, max_write_buffer_count?: never, max_write_buffer_size?: never, max_write_request_operation_count?: never, max_write_request_size?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, remote_cluster?: never, follow_index_pattern?: never, leader_index_patterns?: never, leader_index_exclusion_patterns?: never, max_outstanding_read_requests?: never, settings?: never, max_outstanding_write_requests?: never, read_poll_timeout?: never, max_read_request_operation_count?: never, max_read_request_size?: never, max_retry_delay?: never, max_write_buffer_count?: never, max_write_buffer_size?: never, max_write_request_operation_count?: never, max_write_request_size?: never } +} + +export type CcrPutAutoFollowPatternResponse = AcknowledgedResponseBase + +export interface CcrResumeAutoFollowPatternRequest extends RequestBase { + /** The name of the auto-follow pattern to resume. */ + name: Name + /** The period to wait for a connection to the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never } +} + +export type CcrResumeAutoFollowPatternResponse = AcknowledgedResponseBase + +export interface CcrResumeFollowRequest extends RequestBase { + /** The name of the follow index to resume following. */ + index: IndexName + /** Period to wait for a connection to the master node. */ + master_timeout?: Duration + max_outstanding_read_requests?: long + max_outstanding_write_requests?: long + max_read_request_operation_count?: long + max_read_request_size?: string + max_retry_delay?: Duration + max_write_buffer_count?: long + max_write_buffer_size?: string + max_write_request_operation_count?: long + max_write_request_size?: string + read_poll_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, master_timeout?: never, max_outstanding_read_requests?: never, max_outstanding_write_requests?: never, max_read_request_operation_count?: never, max_read_request_size?: never, max_retry_delay?: never, max_write_buffer_count?: never, max_write_buffer_size?: never, max_write_request_operation_count?: never, max_write_request_size?: never, read_poll_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, master_timeout?: never, max_outstanding_read_requests?: never, max_outstanding_write_requests?: never, max_read_request_operation_count?: never, max_read_request_size?: never, max_retry_delay?: never, max_write_buffer_count?: never, max_write_buffer_size?: never, max_write_request_operation_count?: never, max_write_request_size?: never, read_poll_timeout?: never } +} + +export type CcrResumeFollowResponse = AcknowledgedResponseBase + +export interface CcrStatsAutoFollowStats { + auto_followed_clusters: CcrStatsAutoFollowedCluster[] + /** The number of indices that the auto-follow coordinator failed to automatically follow. + * The causes of recent failures are captured in the logs of the elected master node and in the `auto_follow_stats.recent_auto_follow_errors` field. */ + number_of_failed_follow_indices: long + /** The number of times that the auto-follow coordinator failed to retrieve the cluster state from a remote cluster registered in a collection of auto-follow patterns. */ + number_of_failed_remote_cluster_state_requests: long + /** The number of indices that the auto-follow coordinator successfully followed. */ + number_of_successful_follow_indices: long + /** An array of objects representing failures by the auto-follow coordinator. */ + recent_auto_follow_errors: ErrorCause[] +} + +export interface CcrStatsAutoFollowedCluster { + cluster_name: Name + last_seen_metadata_version: VersionNumber + time_since_last_check_millis: DurationValue +} + +export interface CcrStatsFollowStats { + indices: CcrFollowIndexStats[] +} + +export interface CcrStatsRequest extends RequestBase { + /** The period to wait for a connection to the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ + master_timeout?: Duration + /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never, timeout?: never } +} + +export interface CcrStatsResponse { + /** Statistics for the auto-follow coordinator. */ + auto_follow_stats: CcrStatsAutoFollowStats + /** Shard-level statistics for follower indices. */ + follow_stats: CcrStatsFollowStats +} + +export interface CcrUnfollowRequest extends RequestBase { + /** The name of the follower index. */ + index: IndexName + /** The period to wait for a connection to the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, master_timeout?: never } +} + +export type CcrUnfollowResponse = AcknowledgedResponseBase + +export interface ClusterComponentTemplate { + name: Name + component_template: ClusterComponentTemplateNode +} + +export interface ClusterComponentTemplateNode { + template: ClusterComponentTemplateSummary + version?: VersionNumber + _meta?: Metadata + deprecated?: boolean + /** Date and time when the component template was created. Only returned if the `human` query parameter is `true`. */ + created_date?: DateTime + /** Date and time when the component template was created, in milliseconds since the epoch. */ + created_date_millis?: EpochTime + /** Date and time when the component template was last modified. Only returned if the `human` query parameter is `true`. */ + modified_date?: DateTime + /** Date and time when the component template was last modified, in milliseconds since the epoch. */ + modified_date_millis?: EpochTime +} + +export interface ClusterComponentTemplateSummary { + _meta?: Metadata + version?: VersionNumber + settings?: Record + mappings?: MappingTypeMapping + aliases?: Record + lifecycle?: IndicesDataStreamLifecycleWithRollover + data_stream_options?: IndicesDataStreamOptionsTemplate | null +} + +export interface ClusterAllocationExplainAllocationDecision { + decider: string + decision: ClusterAllocationExplainAllocationExplainDecision + explanation: string +} + +export type ClusterAllocationExplainAllocationExplainDecision = 'NO' | 'YES' | 'THROTTLE' | 'ALWAYS' + +export interface ClusterAllocationExplainAllocationStore { + allocation_id: string + found: boolean + in_sync: boolean + matching_size_in_bytes: long + matching_sync_id: boolean + store_exception: string +} + +export interface ClusterAllocationExplainClusterInfo { + nodes: Record + shard_sizes: Record + shard_data_set_sizes?: Record + shard_paths: Record + reserved_sizes: ClusterAllocationExplainReservedSize[] +} + +export interface ClusterAllocationExplainCurrentNode { + id: Id + name: Name + roles: NodeRoles + attributes: Record + transport_address: TransportAddress + weight_ranking: integer +} + +export type ClusterAllocationExplainDecision = 'yes' | 'no' | 'worse_balance' | 'throttled' | 'awaiting_info' | 'allocation_delayed' | 'no_valid_shard_copy' | 'no_attempt' + +export interface ClusterAllocationExplainDiskUsage { + path: string + total_bytes: long + used_bytes: long + free_bytes: long + free_disk_percent: double + used_disk_percent: double +} + +export interface ClusterAllocationExplainNodeAllocationExplanation { + deciders?: ClusterAllocationExplainAllocationDecision[] + node_attributes: Record + node_decision: ClusterAllocationExplainDecision + node_id: Id + node_name: Name + roles: NodeRoles + store?: ClusterAllocationExplainAllocationStore + transport_address: TransportAddress + weight_ranking?: integer +} + +export interface ClusterAllocationExplainNodeDiskUsage { + node_name: Name + least_available: ClusterAllocationExplainDiskUsage + most_available: ClusterAllocationExplainDiskUsage +} + +export interface ClusterAllocationExplainRequest extends RequestBase { + /** If true, returns information about disk usage and shard sizes. */ + include_disk_info?: boolean + /** If true, returns YES decisions in explanation. */ + include_yes_decisions?: boolean + /** Period to wait for a connection to the master node. */ + master_timeout?: Duration + /** The name of the index that you would like an explanation for. */ + index?: IndexName + /** An identifier for the shard that you would like an explanation for. */ + shard?: integer + /** If true, returns an explanation for the primary shard for the specified shard ID. */ + primary?: boolean + /** Explain a shard only if it is currently located on the specified node name or node ID. */ + current_node?: NodeId + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { include_disk_info?: never, include_yes_decisions?: never, master_timeout?: never, index?: never, shard?: never, primary?: never, current_node?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { include_disk_info?: never, include_yes_decisions?: never, master_timeout?: never, index?: never, shard?: never, primary?: never, current_node?: never } +} + +export interface ClusterAllocationExplainReservedSize { + node_id: Id + path: string + total: long + shards: string[] +} + +export interface ClusterAllocationExplainResponse { + allocate_explanation?: string + allocation_delay?: Duration + allocation_delay_in_millis?: DurationValue + can_allocate?: ClusterAllocationExplainDecision + can_move_to_other_node?: ClusterAllocationExplainDecision + can_rebalance_cluster?: ClusterAllocationExplainDecision + can_rebalance_cluster_decisions?: ClusterAllocationExplainAllocationDecision[] + can_rebalance_to_other_node?: ClusterAllocationExplainDecision + can_remain_decisions?: ClusterAllocationExplainAllocationDecision[] + can_remain_on_current_node?: ClusterAllocationExplainDecision + cluster_info?: ClusterAllocationExplainClusterInfo + configured_delay?: Duration + configured_delay_in_millis?: DurationValue + current_node?: ClusterAllocationExplainCurrentNode + current_state: string + index: IndexName + move_explanation?: string + node_allocation_decisions?: ClusterAllocationExplainNodeAllocationExplanation[] + primary: boolean + rebalance_explanation?: string + remaining_delay?: Duration + remaining_delay_in_millis?: DurationValue + shard: integer + unassigned_info?: ClusterAllocationExplainUnassignedInformation + note?: string +} + +export interface ClusterAllocationExplainUnassignedInformation { + at: DateTime + last_allocation_status?: string + reason: ClusterAllocationExplainUnassignedInformationReason + details?: string + failed_allocation_attempts?: integer + delayed?: boolean + allocation_status?: string +} + +export type ClusterAllocationExplainUnassignedInformationReason = 'INDEX_CREATED' | 'CLUSTER_RECOVERED' | 'INDEX_REOPENED' | 'DANGLING_INDEX_IMPORTED' | 'NEW_INDEX_RESTORED' | 'EXISTING_INDEX_RESTORED' | 'REPLICA_ADDED' | 'ALLOCATION_FAILED' | 'NODE_LEFT' | 'REROUTE_CANCELLED' | 'REINITIALIZED' | 'REALLOCATED_REPLICA' | 'PRIMARY_FAILED' | 'FORCED_EMPTY_PRIMARY' | 'MANUAL_ALLOCATION' + +export interface ClusterDeleteComponentTemplateRequest extends RequestBase { + /** Comma-separated list or wildcard expression of component template names used to limit the request. */ + name: Names + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } +} + +export type ClusterDeleteComponentTemplateResponse = AcknowledgedResponseBase + +export interface ClusterDeleteVotingConfigExclusionsRequest extends RequestBase { + /** Period to wait for a connection to the master node. */ + master_timeout?: Duration + /** Specifies whether to wait for all excluded nodes to be removed from the + * cluster before clearing the voting configuration exclusions list. + * Defaults to true, meaning that all excluded nodes must be removed from + * the cluster before this API takes any action. If set to false then the + * voting configuration exclusions list is cleared even if some excluded + * nodes are still in the cluster. */ + wait_for_removal?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never, wait_for_removal?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never, wait_for_removal?: never } +} + +export type ClusterDeleteVotingConfigExclusionsResponse = boolean + +export interface ClusterExistsComponentTemplateRequest extends RequestBase { + /** Comma-separated list of component template names used to limit the request. + * Wildcard (*) expressions are supported. */ + name: Names + /** Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an + * error. */ + master_timeout?: Duration + /** If true, the request retrieves information from the local node only. + * Defaults to false, which means information is retrieved from the master node. */ + local?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, local?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, local?: never } +} + +export type ClusterExistsComponentTemplateResponse = boolean + +export interface ClusterGetComponentTemplateRequest extends RequestBase { + /** Comma-separated list of component template names used to limit the request. + * Wildcard (`*`) expressions are supported. */ + name?: Name + /** If `true`, returns settings in flat format. */ + flat_settings?: boolean + /** Filter out results, for example to filter out sensitive information. Supports wildcards or full settings keys */ + settings_filter?: string | string[] + /** Return all default configurations for the component template (default: false) */ + include_defaults?: boolean + /** If `true`, the request retrieves information from the local node only. + * If `false`, information is retrieved from the master node. */ + local?: boolean + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, flat_settings?: never, settings_filter?: never, include_defaults?: never, local?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, flat_settings?: never, settings_filter?: never, include_defaults?: never, local?: never, master_timeout?: never } +} + +export interface ClusterGetComponentTemplateResponse { + component_templates: ClusterComponentTemplate[] +} + +export interface ClusterGetSettingsRequest extends RequestBase { + /** If `true`, returns settings in flat format. */ + flat_settings?: boolean + /** If `true`, also returns default values for all other cluster settings, reflecting the values + * in the `elasticsearch.yml` file of one of the nodes in the cluster. If the nodes in your + * cluster do not all have the same values in their `elasticsearch.yml` config files then the + * values returned by this API may vary from invocation to invocation and may not reflect the + * values that Elasticsearch uses in all situations. Use the `GET _nodes/settings` API to + * fetch the settings for each individual node in your cluster. */ + include_defaults?: boolean + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { flat_settings?: never, include_defaults?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { flat_settings?: never, include_defaults?: never, master_timeout?: never, timeout?: never } +} + +export interface ClusterGetSettingsResponse { + /** The settings that persist after the cluster restarts. */ + persistent: Record + /** The settings that do not persist after the cluster restarts. */ + transient: Record + /** The default setting values. */ + defaults?: Record +} + +export interface ClusterHealthHealthResponseBody { + /** The number of active primary shards. */ + active_primary_shards: integer + /** The total number of active primary and replica shards. */ + active_shards: integer + /** The ratio of active shards in the cluster expressed as a string formatted percentage. */ + active_shards_percent?: string + /** The ratio of active shards in the cluster expressed as a percentage. */ + active_shards_percent_as_number: double + /** The name of the cluster. */ + cluster_name: Name + /** The number of shards whose allocation has been delayed by the timeout settings. */ + delayed_unassigned_shards: integer + indices?: Record + /** The number of shards that are under initialization. */ + initializing_shards: integer + /** The number of nodes that are dedicated data nodes. */ + number_of_data_nodes: integer + /** The number of unfinished fetches. */ + number_of_in_flight_fetch: integer + /** The number of nodes within the cluster. */ + number_of_nodes: integer + /** The number of cluster-level changes that have not yet been executed. */ + number_of_pending_tasks: integer + /** The number of shards that are under relocation. */ + relocating_shards: integer + status: HealthStatus + /** The time since the earliest initiated task is waiting for being performed. */ + task_max_waiting_in_queue?: Duration + /** The time expressed in milliseconds since the earliest initiated task is waiting for being performed. */ + task_max_waiting_in_queue_millis: DurationValue + /** If false the response returned within the period of time that is specified by the timeout parameter (30s by default) */ + timed_out: boolean + /** The number of primary shards that are not allocated. */ + unassigned_primary_shards: integer + /** The number of shards that are not allocated. */ + unassigned_shards: integer +} + +export interface ClusterHealthIndexHealthStats { + active_primary_shards: integer + active_shards: integer + initializing_shards: integer + number_of_replicas: integer + number_of_shards: integer + relocating_shards: integer + shards?: Record + status: HealthStatus + unassigned_shards: integer + unassigned_primary_shards: integer +} + +export interface ClusterHealthRequest extends RequestBase { + /** Comma-separated list of data streams, indices, and index aliases used to limit the request. Wildcard expressions (`*`) are supported. To target all data streams and indices in a cluster, omit this parameter or use _all or `*`. */ + index?: Indices + /** Whether to expand wildcard expression to concrete indices that are open, closed or both. */ + expand_wildcards?: ExpandWildcards + /** Can be one of cluster, indices or shards. Controls the details level of the health information returned. */ + level?: Level + /** If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. */ + local?: boolean + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** A number controlling to how many active shards to wait for, all to wait for all shards in the cluster to be active, or 0 to not wait. */ + wait_for_active_shards?: WaitForActiveShards + /** Can be one of immediate, urgent, high, normal, low, languid. Wait until all currently queued events with the given priority are processed. */ + wait_for_events?: WaitForEvents + /** The request waits until the specified number N of nodes is available. It also accepts >=N, <=N, >N and yellow > red. By default, will not wait for any status. */ + wait_for_status?: HealthStatus + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, expand_wildcards?: never, level?: never, local?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never, wait_for_events?: never, wait_for_nodes?: never, wait_for_no_initializing_shards?: never, wait_for_no_relocating_shards?: never, wait_for_status?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, expand_wildcards?: never, level?: never, local?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never, wait_for_events?: never, wait_for_nodes?: never, wait_for_no_initializing_shards?: never, wait_for_no_relocating_shards?: never, wait_for_status?: never } +} + +export type ClusterHealthResponse = ClusterHealthHealthResponseBody + +export interface ClusterHealthShardHealthStats { + active_shards: integer + initializing_shards: integer + primary_active: boolean + relocating_shards: integer + status: HealthStatus + unassigned_shards: integer + unassigned_primary_shards: integer +} + +export type ClusterHealthWaitForNodes = string | integer + +export interface ClusterInfoRequest extends RequestBase { + /** Limits the information returned to the specific target. Supports a comma-separated list, such as http,ingest. */ + target: ClusterInfoTargets + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { target?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { target?: never } +} + +export interface ClusterInfoResponse { + cluster_name: Name + http?: NodesHttp + ingest?: NodesIngest + thread_pool?: Record + script?: NodesScripting +} + +export interface ClusterPendingTasksPendingTask { + /** Indicates whether the pending tasks are currently executing or not. */ + executing: boolean + /** The number that represents when the task has been inserted into the task queue. */ + insert_order: integer + /** The priority of the pending task. + * The valid priorities in descending priority order are: `IMMEDIATE` > `URGENT` > `HIGH` > `NORMAL` > `LOW` > `LANGUID`. */ + priority: string + /** A general description of the cluster task that may include a reason and origin. */ + source: string + /** The time since the task is waiting for being performed. */ + time_in_queue?: Duration + /** The time expressed in milliseconds since the task is waiting for being performed. */ + time_in_queue_millis: DurationValue +} + +export interface ClusterPendingTasksRequest extends RequestBase { + /** If `true`, the request retrieves information from the local node only. + * If `false`, information is retrieved from the master node. */ + local?: boolean + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { local?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { local?: never, master_timeout?: never } +} + +export interface ClusterPendingTasksResponse { + tasks: ClusterPendingTasksPendingTask[] +} + +export interface ClusterPostVotingConfigExclusionsRequest extends RequestBase { + /** A comma-separated list of the names of the nodes to exclude from the + * voting configuration. If specified, you may not also specify node_ids. */ + node_names?: Names + /** A comma-separated list of the persistent ids of the nodes to exclude + * from the voting configuration. If specified, you may not also specify node_names. */ + node_ids?: Ids + /** Period to wait for a connection to the master node. */ + master_timeout?: Duration + /** When adding a voting configuration exclusion, the API waits for the + * specified nodes to be excluded from the voting configuration before + * returning. If the timeout expires before the appropriate condition + * is satisfied, the request fails and returns an error. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { node_names?: never, node_ids?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { node_names?: never, node_ids?: never, master_timeout?: never, timeout?: never } +} + +export type ClusterPostVotingConfigExclusionsResponse = boolean + +export interface ClusterPutComponentTemplateRequest extends RequestBase { + /** Name of the component template to create. + * Elasticsearch includes the following built-in component templates: `logs-mappings`; `logs-settings`; `metrics-mappings`; `metrics-settings`;`synthetics-mapping`; `synthetics-settings`. + * Elastic Agent uses these templates to configure backing indices for its data streams. + * If you use Elastic Agent and want to overwrite one of these templates, set the `version` for your replacement template higher than the current version. + * If you don’t use Elastic Agent and want to disable all built-in component and index templates, set `stack.templates.enabled` to `false` using the cluster update settings API. */ + name: Name + /** If `true`, this request cannot replace or update existing component templates. */ + create?: boolean + /** User defined reason for create the component template. */ + cause?: string + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** The template to be applied which includes mappings, settings, or aliases configuration. */ + template: IndicesIndexState + /** Version number used to manage component templates externally. + * This number isn't automatically generated or incremented by Elasticsearch. + * To unset a version, replace the template without specifying a version. */ + version?: VersionNumber + /** Optional user metadata about the component template. + * It may have any contents. This map is not automatically generated by Elasticsearch. + * This information is stored in the cluster state, so keeping it short is preferable. + * To unset `_meta`, replace the template without specifying this information. */ + _meta?: Metadata + /** Marks this index template as deprecated. When creating or updating a non-deprecated index template + * that uses deprecated components, Elasticsearch will emit a deprecation warning. */ + deprecated?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, create?: never, cause?: never, master_timeout?: never, template?: never, version?: never, _meta?: never, deprecated?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, create?: never, cause?: never, master_timeout?: never, template?: never, version?: never, _meta?: never, deprecated?: never } +} + +export type ClusterPutComponentTemplateResponse = AcknowledgedResponseBase + +export interface ClusterPutSettingsRequest extends RequestBase { + /** Return settings in flat format (default: false) */ + flat_settings?: boolean + /** Explicit operation timeout for connection to master node */ + master_timeout?: Duration + /** Explicit operation timeout */ + timeout?: Duration + /** The settings that persist after the cluster restarts. */ + persistent?: Record + /** The settings that do not persist after the cluster restarts. */ + transient?: Record + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { flat_settings?: never, master_timeout?: never, timeout?: never, persistent?: never, transient?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { flat_settings?: never, master_timeout?: never, timeout?: never, persistent?: never, transient?: never } +} + +export interface ClusterPutSettingsResponse { + acknowledged: boolean + persistent: Record + transient: Record +} + +export type ClusterRemoteInfoClusterRemoteInfo = ClusterRemoteInfoClusterRemoteSniffInfo | ClusterRemoteInfoClusterRemoteProxyInfo + +export interface ClusterRemoteInfoClusterRemoteProxyInfo { + /** The connection mode for the remote cluster. */ + mode: 'proxy' + /** If it is `true`, there is at least one open connection to the remote cluster. + * If it is `false`, it means that the cluster no longer has an open connection to the remote cluster. + * It does not necessarily mean that the remote cluster is down or unavailable, just that at some point a connection was lost. */ + connected: boolean + /** The initial connect timeout for remote cluster connections. */ + initial_connect_timeout: Duration + /** If `true`, cross-cluster search skips the remote cluster when its nodes are unavailable during the search and ignores errors returned by the remote cluster. */ + skip_unavailable: boolean + /** The address for remote connections when proxy mode is configured. */ + proxy_address: string + server_name: string + /** The number of open socket connections to the remote cluster when proxy mode is configured. */ + num_proxy_sockets_connected: integer + /** The maximum number of socket connections to the remote cluster when proxy mode is configured. */ + max_proxy_socket_connections: integer + /** This field is present and has a value of `::es_redacted::` only when the remote cluster is configured with the API key based model. Otherwise, the field is not present. */ + cluster_credentials?: string +} + +export interface ClusterRemoteInfoClusterRemoteSniffInfo { + /** The connection mode for the remote cluster. */ + mode: 'sniff' + /** If it is `true`, there is at least one open connection to the remote cluster. + * If it is `false`, it means that the cluster no longer has an open connection to the remote cluster. + * It does not necessarily mean that the remote cluster is down or unavailable, just that at some point a connection was lost. */ + connected: boolean + /** The maximum number of connections maintained for the remote cluster when sniff mode is configured. */ + max_connections_per_cluster: integer + /** The number of connected nodes in the remote cluster when sniff mode is configured. */ + num_nodes_connected: long + /** The initial connect timeout for remote cluster connections. */ + initial_connect_timeout: Duration + /** If `true`, cross-cluster search skips the remote cluster when its nodes are unavailable during the search and ignores errors returned by the remote cluster. */ + skip_unavailable: boolean + /** The initial seed transport addresses of the remote cluster when sniff mode is configured. */ + seeds: string[] +} + +export interface ClusterRemoteInfoRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } +} + +export type ClusterRemoteInfoResponse = Record + +export interface ClusterRerouteCommand { + /** Cancel allocation of a shard (or recovery). Accepts index and shard for index name and shard number, and node for the node to cancel the shard allocation on. This can be used to force resynchronization of existing replicas from the primary shard by cancelling them and allowing them to be reinitialized through the standard recovery process. By default only replica shard allocations can be cancelled. If it is necessary to cancel the allocation of a primary shard then the allow_primary flag must also be included in the request. */ + cancel?: ClusterRerouteCommandCancelAction + /** Move a started shard from one node to another node. Accepts index and shard for index name and shard number, from_node for the node to move the shard from, and to_node for the node to move the shard to. */ + move?: ClusterRerouteCommandMoveAction + /** Allocate an unassigned replica shard to a node. Accepts index and shard for index name and shard number, and node to allocate the shard to. Takes allocation deciders into account. */ + allocate_replica?: ClusterRerouteCommandAllocateReplicaAction + /** Allocate a primary shard to a node that holds a stale copy. Accepts the index and shard for index name and shard number, and node to allocate the shard to. Using this command may lead to data loss for the provided shard id. If a node which has the good copy of the data rejoins the cluster later on, that data will be deleted or overwritten with the data of the stale copy that was forcefully allocated with this command. To ensure that these implications are well-understood, this command requires the flag accept_data_loss to be explicitly set to true. */ + allocate_stale_primary?: ClusterRerouteCommandAllocatePrimaryAction + /** Allocate an empty primary shard to a node. Accepts the index and shard for index name and shard number, and node to allocate the shard to. Using this command leads to a complete loss of all data that was indexed into this shard, if it was previously started. If a node which has a copy of the data rejoins the cluster later on, that data will be deleted. To ensure that these implications are well-understood, this command requires the flag accept_data_loss to be explicitly set to true. */ + allocate_empty_primary?: ClusterRerouteCommandAllocatePrimaryAction +} + +export interface ClusterRerouteCommandAllocatePrimaryAction { + index: IndexName + shard: integer + node: string + /** If a node which has a copy of the data rejoins the cluster later on, that data will be deleted. To ensure that these implications are well-understood, this command requires the flag accept_data_loss to be explicitly set to true */ + accept_data_loss: boolean +} + +export interface ClusterRerouteCommandAllocateReplicaAction { + index: IndexName + shard: integer + node: string +} + +export interface ClusterRerouteCommandCancelAction { + index: IndexName + shard: integer + node: string + allow_primary?: boolean +} + +export interface ClusterRerouteCommandMoveAction { + index: IndexName + shard: integer + /** The node to move the shard from */ + from_node: string + /** The node to move the shard to */ + to_node: string +} + +export interface ClusterRerouteRequest extends RequestBase { + /** If true, then the request simulates the operation. + * It will calculate the result of applying the commands to the current cluster state and return the resulting cluster state after the commands (and rebalancing) have been applied; it will not actually perform the requested changes. */ + dry_run?: boolean + /** If true, then the response contains an explanation of why the commands can or cannot run. */ + explain?: boolean + /** Limits the information returned to the specified metrics. */ + metric?: string | string[] + /** If true, then retries allocation of shards that are blocked due to too many subsequent allocation failures. */ + retry_failed?: boolean + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** Defines the commands to perform. */ + commands?: ClusterRerouteCommand[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { dry_run?: never, explain?: never, metric?: never, retry_failed?: never, master_timeout?: never, timeout?: never, commands?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { dry_run?: never, explain?: never, metric?: never, retry_failed?: never, master_timeout?: never, timeout?: never, commands?: never } +} + +export interface ClusterRerouteRerouteDecision { + decider: string + decision: string + explanation: string +} + +export interface ClusterRerouteRerouteExplanation { + command: string + decisions: ClusterRerouteRerouteDecision[] + parameters: ClusterRerouteRerouteParameters +} + +export interface ClusterRerouteRerouteParameters { + allow_primary: boolean + index: IndexName + node: NodeName + shard: integer + from_node?: NodeName + to_node?: NodeName +} + +export interface ClusterRerouteResponse { + acknowledged: boolean + explanations?: ClusterRerouteRerouteExplanation[] + /** There aren't any guarantees on the output/structure of the raw cluster state. + * Here you will find the internal representation of the cluster, which can + * differ from the external representation. */ + state?: any +} + +export type ClusterStateClusterStateMetric = '_all' | 'version' | 'master_node' | 'blocks' | 'nodes' | 'metadata' | 'routing_table' | 'routing_nodes' | 'customs' + +export type ClusterStateClusterStateMetrics = ClusterStateClusterStateMetric | ClusterStateClusterStateMetric[] + +export interface ClusterStateRequest extends RequestBase { + /** Limit the information returned to the specified metrics */ + metric?: ClusterStateClusterStateMetrics + /** A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices */ + index?: Indices + /** Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) */ + allow_no_indices?: boolean + /** Whether to expand wildcard expression to concrete indices that are open, closed or both. */ + expand_wildcards?: ExpandWildcards + /** Return settings in flat format (default: false) */ + flat_settings?: boolean + /** Whether specified concrete indices should be ignored when unavailable (missing or closed) */ + ignore_unavailable?: boolean + /** Return local information, do not retrieve the state from master node (default: false) */ + local?: boolean + /** Timeout for waiting for new cluster state in case it is blocked */ + master_timeout?: Duration + /** Wait for the metadata version to be equal or greater than the specified metadata version */ + wait_for_metadata_version?: VersionNumber + /** The maximum time to wait for wait_for_metadata_version before timing out */ + wait_for_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { metric?: never, index?: never, allow_no_indices?: never, expand_wildcards?: never, flat_settings?: never, ignore_unavailable?: never, local?: never, master_timeout?: never, wait_for_metadata_version?: never, wait_for_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { metric?: never, index?: never, allow_no_indices?: never, expand_wildcards?: never, flat_settings?: never, ignore_unavailable?: never, local?: never, master_timeout?: never, wait_for_metadata_version?: never, wait_for_timeout?: never } +} + +export type ClusterStateResponse = any + +export interface ClusterStatsCCSStats { + /** Contains remote cluster settings and metrics collected from them. + * The keys are cluster names, and the values are per-cluster data. + * Only present if `include_remotes` option is set to true. */ + clusters?: Record + /** Information about cross-cluster search usage. */ + _search: ClusterStatsCCSUsageStats + /** Information about ES|QL cross-cluster query usage. */ + _esql?: ClusterStatsCCSUsageStats +} + +export interface ClusterStatsCCSUsageClusterStats { + /** The total number of successful (not skipped) cross-cluster search requests that were executed against this cluster. This may include requests where partial results were returned, but not requests in which the cluster has been skipped entirely. */ + total: integer + /** The total number of cross-cluster search requests for which this cluster was skipped. */ + skipped: integer + /** Statistics about the time taken to execute requests against this cluster. */ + took: ClusterStatsCCSUsageTimeValue +} + +export interface ClusterStatsCCSUsageStats { + /** The total number of cross-cluster search requests that have been executed by the cluster. */ + total: integer + /** The total number of cross-cluster search requests that have been successfully executed by the cluster. */ + success: integer + /** The total number of cross-cluster search requests (successful or failed) that had at least one remote cluster skipped. */ + skipped: integer + /** Statistics about the time taken to execute cross-cluster search requests. */ + took: ClusterStatsCCSUsageTimeValue + /** Statistics about the time taken to execute cross-cluster search requests for which the `ccs_minimize_roundtrips` setting was set to `true`. */ + took_mrt_true?: ClusterStatsCCSUsageTimeValue + /** Statistics about the time taken to execute cross-cluster search requests for which the `ccs_minimize_roundtrips` setting was set to `false`. */ + took_mrt_false?: ClusterStatsCCSUsageTimeValue + /** The maximum number of remote clusters that were queried in a single cross-cluster search request. */ + remotes_per_search_max: integer + /** The average number of remote clusters that were queried in a single cross-cluster search request. */ + remotes_per_search_avg: double + /** Statistics about the reasons for cross-cluster search request failures. The keys are the failure reason names and the values are the number of requests that failed for that reason. */ + failure_reasons: Record + /** The keys are the names of the search feature, and the values are the number of requests that used that feature. Single request can use more than one feature (e.g. both `async` and `wildcard`). */ + features: Record + /** Statistics about the clients that executed cross-cluster search requests. The keys are the names of the clients, and the values are the number of requests that were executed by that client. Only known clients (such as `kibana` or `elasticsearch`) are counted. */ + clients: Record + /** Statistics about the clusters that were queried in cross-cluster search requests. The keys are cluster names, and the values are per-cluster telemetry data. This also includes the local cluster itself, which uses the name `(local)`. */ + clusters: Record +} + +export interface ClusterStatsCCSUsageTimeValue { + /** The maximum time taken to execute a request, in milliseconds. */ + max: DurationValue + /** The average time taken to execute a request, in milliseconds. */ + avg: DurationValue + /** The 90th percentile of the time taken to execute requests, in milliseconds. */ + p90: DurationValue +} + +export interface ClusterStatsCharFilterTypes { + /** Contains statistics about analyzer types used in selected nodes. */ + analyzer_types: ClusterStatsFieldTypes[] + /** Contains statistics about built-in analyzers used in selected nodes. */ + built_in_analyzers: ClusterStatsFieldTypes[] + /** Contains statistics about built-in character filters used in selected nodes. */ + built_in_char_filters: ClusterStatsFieldTypes[] + /** Contains statistics about built-in token filters used in selected nodes. */ + built_in_filters: ClusterStatsFieldTypes[] + /** Contains statistics about built-in tokenizers used in selected nodes. */ + built_in_tokenizers: ClusterStatsFieldTypes[] + /** Contains statistics about character filter types used in selected nodes. */ + char_filter_types: ClusterStatsFieldTypes[] + /** Contains statistics about token filter types used in selected nodes. */ + filter_types: ClusterStatsFieldTypes[] + /** Contains statistics about tokenizer types used in selected nodes. */ + tokenizer_types: ClusterStatsFieldTypes[] + /** Contains statistics about synonyms types used in selected nodes. */ + synonyms: Record +} + +export interface ClusterStatsClusterFileSystem { + path?: string + mount?: string + type?: string + /** Total number of bytes available to JVM in file stores across all selected nodes. + * Depending on operating system or process-level restrictions, this number may be less than `nodes.fs.free_in_byes`. + * This is the actual amount of free disk space the selected Elasticsearch nodes can use. */ + available_in_bytes?: long + /** Total number of bytes available to JVM in file stores across all selected nodes. + * Depending on operating system or process-level restrictions, this number may be less than `nodes.fs.free_in_byes`. + * This is the actual amount of free disk space the selected Elasticsearch nodes can use. */ + available?: ByteSize + /** Total number, in bytes, of unallocated bytes in file stores across all selected nodes. */ + free_in_bytes?: long + /** Total number of unallocated bytes in file stores across all selected nodes. */ + free?: ByteSize + /** Total size, in bytes, of all file stores across all selected nodes. */ + total_in_bytes?: long + /** Total size of all file stores across all selected nodes. */ + total?: ByteSize + low_watermark_free_space?: ByteSize + low_watermark_free_space_in_bytes?: long + high_watermark_free_space?: ByteSize + high_watermark_free_space_in_bytes?: long + flood_stage_free_space?: ByteSize + flood_stage_free_space_in_bytes?: long + frozen_flood_stage_free_space?: ByteSize + frozen_flood_stage_free_space_in_bytes?: long +} + +export interface ClusterStatsClusterIndices { + /** Contains statistics about analyzers and analyzer components used in selected nodes. */ + analysis?: ClusterStatsCharFilterTypes + /** Contains statistics about memory used for completion in selected nodes. */ + completion: CompletionStats + /** Total number of indices with shards assigned to selected nodes. */ + count: long + /** Contains counts for documents in selected nodes. */ + docs: DocStats + /** Contains statistics about the field data cache of selected nodes. */ + fielddata: FielddataStats + /** Contains statistics about the query cache of selected nodes. */ + query_cache: QueryCacheStats + /** Holds a snapshot of the search usage statistics. + * Used to hold the stats for a single node that's part of a ClusterStatsNodeResponse, as well as to + * accumulate stats for the entire cluster and return them as part of the ClusterStatsResponse. */ + search: ClusterStatsSearchUsageStats + /** Contains statistics about segments in selected nodes. */ + segments: SegmentsStats + /** Contains statistics about indices with shards assigned to selected nodes. */ + shards: ClusterStatsClusterIndicesShards + /** Contains statistics about the size of shards assigned to selected nodes. */ + store: StoreStats + /** Contains statistics about field mappings in selected nodes. */ + mappings?: ClusterStatsFieldTypesMappings + /** Contains statistics about analyzers and analyzer components used in selected nodes. */ + versions?: ClusterStatsIndicesVersions[] + /** Contains statistics about indexed dense vector */ + dense_vector: ClusterStatsDenseVectorStats + /** Contains statistics about indexed sparse vector */ + sparse_vector: ClusterStatsSparseVectorStats +} + +export interface ClusterStatsClusterIndicesShards { + /** Contains statistics about shards assigned to selected nodes. */ + index?: ClusterStatsClusterIndicesShardsIndex + /** Number of primary shards assigned to selected nodes. */ + primaries?: double + /** Ratio of replica shards to primary shards across all selected nodes. */ + replication?: double + /** Total number of shards assigned to selected nodes. */ + total?: double +} + +export interface ClusterStatsClusterIndicesShardsIndex { + /** Contains statistics about the number of primary shards assigned to selected nodes. */ + primaries: ClusterStatsClusterShardMetrics + /** Contains statistics about the number of replication shards assigned to selected nodes. */ + replication: ClusterStatsClusterShardMetrics + /** Contains statistics about the number of shards assigned to selected nodes. */ + shards: ClusterStatsClusterShardMetrics +} + +export interface ClusterStatsClusterIngest { + number_of_pipelines: integer + processor_stats: Record +} + +export interface ClusterStatsClusterJvm { + /** Uptime duration, in milliseconds, since JVM last started. */ + max_uptime_in_millis: DurationValue + /** Uptime duration since JVM last started. */ + max_uptime?: Duration + /** Contains statistics about memory used by selected nodes. */ + mem: ClusterStatsClusterJvmMemory + /** Number of active threads in use by JVM across all selected nodes. */ + threads: long + /** Contains statistics about the JVM versions used by selected nodes. */ + versions: ClusterStatsClusterJvmVersion[] +} + +export interface ClusterStatsClusterJvmMemory { + /** Maximum amount of memory, in bytes, available for use by the heap across all selected nodes. */ + heap_max_in_bytes: long + /** Maximum amount of memory available for use by the heap across all selected nodes. */ + heap_max?: ByteSize + /** Memory, in bytes, currently in use by the heap across all selected nodes. */ + heap_used_in_bytes: long + /** Memory currently in use by the heap across all selected nodes. */ + heap_used?: ByteSize +} + +export interface ClusterStatsClusterJvmVersion { + /** Always `true`. All distributions come with a bundled Java Development Kit (JDK). */ + bundled_jdk: boolean + /** Total number of selected nodes using JVM. */ + count: integer + /** If `true`, a bundled JDK is in use by JVM. */ + using_bundled_jdk: boolean + /** Version of JVM used by one or more selected nodes. */ + version: VersionString + /** Name of the JVM. */ + vm_name: string + /** Vendor of the JVM. */ + vm_vendor: string + /** Full version number of JVM. + * The full version number includes a plus sign (+) followed by the build number. */ + vm_version: VersionString +} + +export interface ClusterStatsClusterNetworkTypes { + /** Contains statistics about the HTTP network types used by selected nodes. */ + http_types: Record + /** Contains statistics about the transport network types used by selected nodes. */ + transport_types: Record +} + +export interface ClusterStatsClusterNodeCount { + total: integer + coordinating_only?: integer + data?: integer + data_cold?: integer + data_content?: integer + data_frozen?: integer + data_hot?: integer + data_warm?: integer + index?: integer + ingest?: integer + master?: integer + ml?: integer + remote_cluster_client?: integer + search?: integer + transform?: integer + voting_only?: integer +} + +export interface ClusterStatsClusterNodes { + /** Contains counts for nodes selected by the request’s node filters. */ + count: ClusterStatsClusterNodeCount + /** Contains statistics about the discovery types used by selected nodes. */ + discovery_types: Record + /** Contains statistics about file stores by selected nodes. */ + fs: ClusterStatsClusterFileSystem + indexing_pressure: ClusterStatsIndexingPressure + ingest: ClusterStatsClusterIngest + /** Contains statistics about the Java Virtual Machines (JVMs) used by selected nodes. */ + jvm: ClusterStatsClusterJvm + /** Contains statistics about the transport and HTTP networks used by selected nodes. */ + network_types: ClusterStatsClusterNetworkTypes + /** Contains statistics about the operating systems used by selected nodes. */ + os: ClusterStatsClusterOperatingSystem + /** Contains statistics about Elasticsearch distributions installed on selected nodes. */ + packaging_types: ClusterStatsNodePackagingType[] + /** Contains statistics about installed plugins and modules by selected nodes. + * If no plugins or modules are installed, this array is empty. */ + plugins: PluginStats[] + /** Contains statistics about processes used by selected nodes. */ + process: ClusterStatsClusterProcess + /** Array of Elasticsearch versions used on selected nodes. */ + versions: VersionString[] +} + +export interface ClusterStatsClusterOperatingSystem { + /** Number of processors used to calculate thread pool size across all selected nodes. + * This number can be set with the processors setting of a node and defaults to the number of processors reported by the operating system. + * In both cases, this number will never be larger than 32. */ + allocated_processors: integer + /** Contains statistics about processor architectures (for example, x86_64 or aarch64) used by selected nodes. */ + architectures?: ClusterStatsClusterOperatingSystemArchitecture[] + /** Number of processors available to JVM across all selected nodes. */ + available_processors: integer + /** Contains statistics about memory used by selected nodes. */ + mem: ClusterStatsOperatingSystemMemoryInfo + /** Contains statistics about operating systems used by selected nodes. */ + names: ClusterStatsClusterOperatingSystemName[] + /** Contains statistics about operating systems used by selected nodes. */ + pretty_names: ClusterStatsClusterOperatingSystemPrettyName[] +} + +export interface ClusterStatsClusterOperatingSystemArchitecture { + /** Name of an architecture used by one or more selected nodes. */ + arch: string + /** Number of selected nodes using the architecture. */ + count: integer +} + +export interface ClusterStatsClusterOperatingSystemName { + /** Number of selected nodes using the operating system. */ + count: integer + /** Name of an operating system used by one or more selected nodes. */ + name: Name +} + +export interface ClusterStatsClusterOperatingSystemPrettyName { + /** Number of selected nodes using the operating system. */ + count: integer + /** Human-readable name of an operating system used by one or more selected nodes. */ + pretty_name: Name +} + +export interface ClusterStatsClusterProcess { + /** Contains statistics about CPU used by selected nodes. */ + cpu: ClusterStatsClusterProcessCpu + /** Contains statistics about open file descriptors in selected nodes. */ + open_file_descriptors: ClusterStatsClusterProcessOpenFileDescriptors +} + +export interface ClusterStatsClusterProcessCpu { + /** Percentage of CPU used across all selected nodes. + * Returns `-1` if not supported. */ + percent: integer +} + +export interface ClusterStatsClusterProcessOpenFileDescriptors { + /** Average number of concurrently open file descriptors. + * Returns `-1` if not supported. */ + avg: long + /** Maximum number of concurrently open file descriptors allowed across all selected nodes. + * Returns `-1` if not supported. */ + max: long + /** Minimum number of concurrently open file descriptors across all selected nodes. + * Returns -1 if not supported. */ + min: long +} + +export interface ClusterStatsClusterProcessor { + count: long + current: long + failed: long + time?: Duration + time_in_millis: DurationValue +} + +export interface ClusterStatsClusterShardMetrics { + /** Mean number of shards in an index, counting only shards assigned to selected nodes. */ + avg: double + /** Maximum number of shards in an index, counting only shards assigned to selected nodes. */ + max: double + /** Minimum number of shards in an index, counting only shards assigned to selected nodes. */ + min: double +} + +export interface ClusterStatsClusterSnapshotStats { + current_counts: ClusterStatsSnapshotCurrentCounts + repositories: Record +} + +export interface ClusterStatsDenseVectorOffHeapStats { + total_size_bytes: long + total_size?: ByteSize + total_veb_size_bytes: long + total_veb_size?: ByteSize + total_vec_size_bytes: long + total_vec_size?: ByteSize + total_veq_size_bytes: long + total_veq_size?: ByteSize + total_vex_size_bytes: long + total_vex_size?: ByteSize + fielddata?: Record> +} + +export interface ClusterStatsDenseVectorStats { + value_count: long + off_heap?: ClusterStatsDenseVectorOffHeapStats +} + +export interface ClusterStatsExtendedRetrieversSearchUsage { + text_similarity_reranker?: ClusterStatsExtendedTextSimilarityRetrieverUsage +} + +export interface ClusterStatsExtendedSearchUsage { + retrievers?: ClusterStatsExtendedRetrieversSearchUsage +} + +export interface ClusterStatsExtendedTextSimilarityRetrieverUsage { + chunk_rescorer?: long +} + +export interface ClusterStatsFieldTypes { + /** The name for the field type in selected nodes. */ + name: Name + /** The number of occurrences of the field type in selected nodes. */ + count: integer + /** The number of indices containing the field type in selected nodes. */ + index_count: integer + /** For dense_vector field types, number of indexed vector types in selected nodes. */ + indexed_vector_count?: integer + /** For dense_vector field types, the maximum dimension of all indexed vector types in selected nodes. */ + indexed_vector_dim_max?: integer + /** For dense_vector field types, the minimum dimension of all indexed vector types in selected nodes. */ + indexed_vector_dim_min?: integer + /** The number of fields that declare a script. */ + script_count?: integer + /** For dense_vector field types, count of mappings by index type */ + vector_index_type_count?: Record + /** For dense_vector field types, count of mappings by similarity */ + vector_similarity_type_count?: Record + /** For dense_vector field types, count of mappings by element type */ + vector_element_type_count?: Record +} + +export interface ClusterStatsFieldTypesMappings { + /** Contains statistics about field data types used in selected nodes. */ + field_types: ClusterStatsFieldTypes[] + /** Contains statistics about runtime field data types used in selected nodes. */ + runtime_field_types: ClusterStatsRuntimeFieldTypes[] + /** Total number of fields in all non-system indices. */ + total_field_count?: long + /** Total number of fields in all non-system indices, accounting for mapping deduplication. */ + total_deduplicated_field_count?: long + /** Total size of all mappings after deduplication and compression. */ + total_deduplicated_mapping_size?: ByteSize + /** Total size of all mappings, in bytes, after deduplication and compression. */ + total_deduplicated_mapping_size_in_bytes?: long + /** Source mode usage count. */ + source_modes: Record +} + +export interface ClusterStatsIndexingPressure { + memory: NodesIndexingPressureMemory +} + +export interface ClusterStatsIndicesVersions { + index_count: integer + primary_shard_count: integer + total_primary_bytes: long + total_primary_size?: ByteSize + version: VersionString +} + +export interface ClusterStatsNodePackagingType { + /** Number of selected nodes using the distribution flavor and file type. */ + count: integer + /** Type of Elasticsearch distribution. This is always `default`. */ + flavor: string + /** File type (such as `tar` or `zip`) used for the distribution package. */ + type: string +} + +export interface ClusterStatsOperatingSystemMemoryInfo { + /** Total amount, in bytes, of memory across all selected nodes, but using the value specified using the `es.total_memory_bytes` system property instead of measured total memory for those nodes where that system property was set. */ + adjusted_total_in_bytes?: long + /** Total amount of memory across all selected nodes, but using the value specified using the `es.total_memory_bytes` system property instead of measured total memory for those nodes where that system property was set. */ + adjusted_total?: ByteSize + /** Amount, in bytes, of free physical memory across all selected nodes. */ + free_in_bytes: long + /** Amount of free physical memory across all selected nodes. */ + free?: ByteSize + /** Percentage of free physical memory across all selected nodes. */ + free_percent: integer + /** Total amount, in bytes, of physical memory across all selected nodes. */ + total_in_bytes: long + /** Total amount of physical memory across all selected nodes. */ + total?: ByteSize + /** Amount, in bytes, of physical memory in use across all selected nodes. */ + used_in_bytes: long + /** Amount of physical memory in use across all selected nodes. */ + used?: ByteSize + /** Percentage of physical memory in use across all selected nodes. */ + used_percent: integer +} + +export interface ClusterStatsPerRepositoryStats { + type: string + oldest_start_time_millis: UnitMillis + oldest_start_time?: DateFormat + current_counts: ClusterStatsRepositoryStatsCurrentCounts +} + +export interface ClusterStatsRemoteClusterInfo { + /** The UUID of the remote cluster. */ + cluster_uuid: string + /** The connection mode used to communicate with the remote cluster. */ + mode: string + /** The `skip_unavailable` setting used for this remote cluster. */ + skip_unavailable: boolean + /** Transport compression setting used for this remote cluster. */ + 'transport.compress': string + /** Health status of the cluster, based on the state of its primary and replica shards. */ + status: HealthStatus + /** The list of Elasticsearch versions used by the nodes on the remote cluster. */ + version: VersionString[] + /** The total count of nodes in the remote cluster. */ + nodes_count: integer + /** The total number of shards in the remote cluster. */ + shards_count: integer + /** The total number of indices in the remote cluster. */ + indices_count: integer + /** Total data set size, in bytes, of all shards assigned to selected nodes. */ + indices_total_size_in_bytes: long + /** Total data set size of all shards assigned to selected nodes, as a human-readable string. */ + indices_total_size?: string + /** Maximum amount of memory, in bytes, available for use by the heap across the nodes of the remote cluster. */ + max_heap_in_bytes: long + /** Maximum amount of memory available for use by the heap across the nodes of the remote cluster, as a human-readable string. */ + max_heap?: string + /** Total amount, in bytes, of physical memory across the nodes of the remote cluster. */ + mem_total_in_bytes: long + /** Total amount of physical memory across the nodes of the remote cluster, as a human-readable string. */ + mem_total?: string +} + +export interface ClusterStatsRepositoryStatsCurrentCounts { + snapshots: integer + clones: integer + finalizations: integer + deletions: integer + snapshot_deletions: integer + active_deletions: integer + shards: ClusterStatsRepositoryStatsShards +} + +export interface ClusterStatsRepositoryStatsShards { + total: integer + complete: integer + incomplete: integer + states: Record +} + +export interface ClusterStatsRequest extends RequestBase { + /** Comma-separated list of node filters used to limit returned information. Defaults to all nodes in the cluster. */ + node_id?: NodeIds + /** Include remote cluster data into the response */ + include_remotes?: boolean + /** Period to wait for each node to respond. + * If a node does not respond before its timeout expires, the response does not include its stats. + * However, timed out nodes are included in the response’s `_nodes.failed` property. Defaults to no timeout. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { node_id?: never, include_remotes?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { node_id?: never, include_remotes?: never, timeout?: never } +} + +export type ClusterStatsResponse = ClusterStatsStatsResponseBase + +export interface ClusterStatsRuntimeFieldTypes { + /** Maximum number of characters for a single runtime field script. */ + chars_max: integer + /** Total number of characters for the scripts that define the current runtime field data type. */ + chars_total: integer + /** Number of runtime fields mapped to the field data type in selected nodes. */ + count: integer + /** Maximum number of accesses to doc_values for a single runtime field script */ + doc_max: integer + /** Total number of accesses to doc_values for the scripts that define the current runtime field data type. */ + doc_total: integer + /** Number of indices containing a mapping of the runtime field data type in selected nodes. */ + index_count: integer + /** Script languages used for the runtime fields scripts. */ + lang: string[] + /** Maximum number of lines for a single runtime field script. */ + lines_max: integer + /** Total number of lines for the scripts that define the current runtime field data type. */ + lines_total: integer + /** Field data type used in selected nodes. */ + name: Name + /** Number of runtime fields that don’t declare a script. */ + scriptless_count: integer + /** Number of runtime fields that shadow an indexed field. */ + shadowed_count: integer + /** Maximum number of accesses to _source for a single runtime field script. */ + source_max: integer + /** Total number of accesses to _source for the scripts that define the current runtime field data type. */ + source_total: integer +} + +export interface ClusterStatsSearchUsageStats { + total: long + queries: Record + rescorers: Record + sections: Record + retrievers: Record + extended: ClusterStatsExtendedSearchUsage +} + +export type ClusterStatsShardState = 'INIT' | 'SUCCESS' | 'FAILED' | 'ABORTED' | 'MISSING' | 'WAITING' | 'QUEUED' | 'PAUSED_FOR_NODE_REMOVAL' + +export interface ClusterStatsSnapshotCurrentCounts { + /** Snapshots currently in progress */ + snapshots: integer + /** Incomplete shard snapshots */ + shard_snapshots: integer + /** Snapshots deletions in progress */ + snapshot_deletions: integer + /** Sum of snapshots and snapshot_deletions */ + concurrent_operations: integer + /** Cleanups in progress, not counted in concurrent_operations as they are not concurrent */ + cleanups: integer +} + +export interface ClusterStatsSparseVectorStats { + value_count: long +} + +export interface ClusterStatsStatsResponseBase extends NodesNodesResponseBase { + /** Name of the cluster, based on the cluster name setting. */ + cluster_name: Name + /** Unique identifier for the cluster. */ + cluster_uuid: Uuid + /** Contains statistics about indices with shards assigned to selected nodes. */ + indices: ClusterStatsClusterIndices + /** Contains statistics about nodes selected by the request’s node filters. */ + nodes: ClusterStatsClusterNodes + /** Contains stats on repository feature usage exposed in cluster stats for telemetry. */ + repositories: Record> + /** Contains stats cluster snapshots. */ + snapshots: ClusterStatsClusterSnapshotStats + /** Health status of the cluster, based on the state of its primary and replica shards. */ + status?: HealthStatus + /** Unix timestamp, in milliseconds, for the last time the cluster statistics were refreshed. */ + timestamp: long + /** Cross-cluster stats */ + ccs: ClusterStatsCCSStats +} + +export interface ClusterStatsSynonymsStats { + count: integer + index_count: integer +} + +export interface ConnectorConnector { + api_key_id?: string + api_key_secret_id?: string + configuration: ConnectorConnectorConfiguration + custom_scheduling: ConnectorConnectorCustomScheduling + deleted: boolean + description?: string + error?: string | null + features?: ConnectorConnectorFeatures + filtering: ConnectorFilteringConfig[] + id?: Id + index_name?: IndexName | null + is_native: boolean + language?: string + last_access_control_sync_error?: string + last_access_control_sync_scheduled_at?: DateTime + last_access_control_sync_status?: ConnectorSyncStatus + last_deleted_document_count?: long + last_incremental_sync_scheduled_at?: DateTime + last_indexed_document_count?: long + last_seen?: DateTime + last_sync_error?: string + last_sync_scheduled_at?: DateTime + last_sync_status?: ConnectorSyncStatus + last_synced?: DateTime + name?: string + pipeline?: ConnectorIngestPipelineParams + scheduling: ConnectorSchedulingConfiguration + service_type?: string + status: ConnectorConnectorStatus + sync_cursor?: any + sync_now: boolean +} + +export interface ConnectorConnectorConfigProperties { + category?: string + default_value: ScalarValue + depends_on: ConnectorDependency[] + display: ConnectorDisplayType + label: string + options: ConnectorSelectOption[] + order?: integer + placeholder?: string + required: boolean + sensitive: boolean + tooltip?: string | null + type?: ConnectorConnectorFieldType + ui_restrictions?: string[] + validations?: ConnectorValidation[] + value: any +} + +export type ConnectorConnectorConfiguration = Record + +export type ConnectorConnectorCustomScheduling = Record + +export interface ConnectorConnectorFeatures { + /** Indicates whether document-level security is enabled. */ + document_level_security?: ConnectorFeatureEnabled + /** Indicates whether incremental syncs are enabled. */ + incremental_sync?: ConnectorFeatureEnabled + /** Indicates whether managed connector API keys are enabled. */ + native_connector_api_keys?: ConnectorFeatureEnabled + sync_rules?: ConnectorSyncRulesFeature +} + +export type ConnectorConnectorFieldType = 'str' | 'int' | 'list' | 'bool' + +export interface ConnectorConnectorScheduling { + enabled: boolean + /** The interval is expressed using the crontab syntax */ + interval: string +} + +export type ConnectorConnectorStatus = 'created' | 'needs_configuration' | 'configured' | 'connected' | 'error' + +export interface ConnectorConnectorSyncJob { + cancelation_requested_at?: DateTime + canceled_at?: DateTime + completed_at?: DateTime + connector: ConnectorSyncJobConnectorReference + created_at: DateTime + deleted_document_count: long + error?: string + id: Id + indexed_document_count: long + indexed_document_volume: long + job_type: ConnectorSyncJobType + last_seen?: DateTime + metadata: Record + started_at?: DateTime + status: ConnectorSyncStatus + total_document_count: long + trigger_method: ConnectorSyncJobTriggerMethod + worker_hostname?: string +} + +export interface ConnectorCustomScheduling { + configuration_overrides: ConnectorCustomSchedulingConfigurationOverrides + enabled: boolean + interval: string + last_synced?: DateTime + name: string +} + +export interface ConnectorCustomSchedulingConfigurationOverrides { + max_crawl_depth?: integer + sitemap_discovery_disabled?: boolean + domain_allowlist?: string[] + sitemap_urls?: string[] + seed_urls?: string[] +} + +export interface ConnectorDependency { + field: string + value: ScalarValue +} + +export type ConnectorDisplayType = 'textbox' | 'textarea' | 'numeric' | 'toggle' | 'dropdown' + +export interface ConnectorFeatureEnabled { + enabled: boolean +} + +export interface ConnectorFilteringAdvancedSnippet { + created_at?: DateTime + updated_at?: DateTime + value: any +} + +export interface ConnectorFilteringConfig { + active: ConnectorFilteringRules + domain?: string + draft: ConnectorFilteringRules +} + +export type ConnectorFilteringPolicy = 'exclude' | 'include' + +export interface ConnectorFilteringRule { + created_at?: DateTime + field: Field + id: Id + order: integer + policy: ConnectorFilteringPolicy + rule: ConnectorFilteringRuleRule + updated_at?: DateTime + value: string +} + +export type ConnectorFilteringRuleRule = 'contains' | 'ends_with' | 'equals' | 'regex' | 'starts_with' | '>' | '<' + +export interface ConnectorFilteringRules { + advanced_snippet: ConnectorFilteringAdvancedSnippet + rules: ConnectorFilteringRule[] + validation: ConnectorFilteringRulesValidation +} + +export interface ConnectorFilteringRulesValidation { + errors: ConnectorFilteringValidation[] + state: ConnectorFilteringValidationState +} + +export interface ConnectorFilteringValidation { + ids: Id[] + messages: string[] +} + +export type ConnectorFilteringValidationState = 'edited' | 'invalid' | 'valid' + +export interface ConnectorGreaterThanValidation { + type: 'greater_than' + constraint: double +} + +export interface ConnectorIncludedInValidation { + type: 'included_in' + constraint: ScalarValue[] +} + +export interface ConnectorIngestPipelineParams { + extract_binary_content: boolean + name: string + reduce_whitespace: boolean + run_ml_inference: boolean +} + +export interface ConnectorLessThanValidation { + type: 'less_than' + constraint: double +} + +export interface ConnectorListTypeValidation { + type: 'list_type' + constraint: string +} + +export interface ConnectorRegexValidation { + type: 'regex' + constraint: string +} + +export interface ConnectorSchedulingConfiguration { + access_control?: ConnectorConnectorScheduling + full?: ConnectorConnectorScheduling + incremental?: ConnectorConnectorScheduling +} + +export interface ConnectorSelectOption { + label: string + value: ScalarValue +} + +export interface ConnectorSyncJobConnectorReference { + configuration: ConnectorConnectorConfiguration + filtering: ConnectorFilteringRules + id: Id + index_name: string + language?: string + pipeline?: ConnectorIngestPipelineParams + service_type: string + sync_cursor?: any +} + +export type ConnectorSyncJobTriggerMethod = 'on_demand' | 'scheduled' + +export type ConnectorSyncJobType = 'full' | 'incremental' | 'access_control' + +export interface ConnectorSyncRulesFeature { + /** Indicates whether advanced sync rules are enabled. */ + advanced?: ConnectorFeatureEnabled + /** Indicates whether basic sync rules are enabled. */ + basic?: ConnectorFeatureEnabled +} + +export type ConnectorSyncStatus = 'canceling' | 'canceled' | 'completed' | 'error' | 'in_progress' | 'pending' | 'suspended' + +export type ConnectorValidation = ConnectorLessThanValidation | ConnectorGreaterThanValidation | ConnectorListTypeValidation | ConnectorIncludedInValidation | ConnectorRegexValidation + +export interface ConnectorCheckInRequest extends RequestBase { + /** The unique identifier of the connector to be checked in */ + connector_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never } +} + +export interface ConnectorCheckInResponse { + result: Result +} + +export interface ConnectorDeleteRequest extends RequestBase { + /** The unique identifier of the connector to be deleted */ + connector_id: Id + /** A flag indicating if associated sync jobs should be also removed. */ + delete_sync_jobs?: boolean + /** A flag indicating if the connector should be hard deleted. */ + hard?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, delete_sync_jobs?: never, hard?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, delete_sync_jobs?: never, hard?: never } +} + +export type ConnectorDeleteResponse = AcknowledgedResponseBase + +export interface ConnectorGetRequest extends RequestBase { + /** The unique identifier of the connector */ + connector_id: Id + /** A flag to indicate if the desired connector should be fetched, even if it was soft-deleted. */ + include_deleted?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, include_deleted?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, include_deleted?: never } +} + +export type ConnectorGetResponse = ConnectorConnector + +export interface ConnectorLastSyncRequest extends RequestBase { + /** The unique identifier of the connector to be updated */ + connector_id: Id + last_access_control_sync_error?: string + last_access_control_sync_scheduled_at?: DateTime + last_access_control_sync_status?: ConnectorSyncStatus + last_deleted_document_count?: long + last_incremental_sync_scheduled_at?: DateTime + last_indexed_document_count?: long + last_seen?: DateTime + last_sync_error?: string + last_sync_scheduled_at?: DateTime + last_sync_status?: ConnectorSyncStatus + last_synced?: DateTime + sync_cursor?: any + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, last_access_control_sync_error?: never, last_access_control_sync_scheduled_at?: never, last_access_control_sync_status?: never, last_deleted_document_count?: never, last_incremental_sync_scheduled_at?: never, last_indexed_document_count?: never, last_seen?: never, last_sync_error?: never, last_sync_scheduled_at?: never, last_sync_status?: never, last_synced?: never, sync_cursor?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, last_access_control_sync_error?: never, last_access_control_sync_scheduled_at?: never, last_access_control_sync_status?: never, last_deleted_document_count?: never, last_incremental_sync_scheduled_at?: never, last_indexed_document_count?: never, last_seen?: never, last_sync_error?: never, last_sync_scheduled_at?: never, last_sync_status?: never, last_synced?: never, sync_cursor?: never } +} + +export interface ConnectorLastSyncResponse { + result: Result +} + +export interface ConnectorListRequest extends RequestBase { + /** Starting offset */ + from?: integer + /** Specifies a max number of results to get */ + size?: integer + /** A comma-separated list of connector index names to fetch connector documents for */ + index_name?: Indices + /** A comma-separated list of connector names to fetch connector documents for */ + connector_name?: Names + /** A comma-separated list of connector service types to fetch connector documents for */ + service_type?: Names + /** A flag to indicate if the desired connector should be fetched, even if it was soft-deleted. */ + include_deleted?: boolean + /** A wildcard query string that filters connectors with matching name, description or index name */ + query?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { from?: never, size?: never, index_name?: never, connector_name?: never, service_type?: never, include_deleted?: never, query?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { from?: never, size?: never, index_name?: never, connector_name?: never, service_type?: never, include_deleted?: never, query?: never } +} + +export interface ConnectorListResponse { + count: long + results: ConnectorConnector[] +} + +export interface ConnectorPostRequest extends RequestBase { + description?: string + index_name?: IndexName + is_native?: boolean + language?: string + name?: string + service_type?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { description?: never, index_name?: never, is_native?: never, language?: never, name?: never, service_type?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { description?: never, index_name?: never, is_native?: never, language?: never, name?: never, service_type?: never } +} + +export interface ConnectorPostResponse { + result: Result + id: Id +} + +export interface ConnectorPutRequest extends RequestBase { + /** The unique identifier of the connector to be created or updated. ID is auto-generated if not provided. */ + connector_id?: Id + description?: string + index_name?: IndexName + is_native?: boolean + language?: string + name?: string + service_type?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, description?: never, index_name?: never, is_native?: never, language?: never, name?: never, service_type?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, description?: never, index_name?: never, is_native?: never, language?: never, name?: never, service_type?: never } +} + +export interface ConnectorPutResponse { + result: Result + id: Id +} + +export interface ConnectorSyncJobCancelRequest extends RequestBase { + /** The unique identifier of the connector sync job */ + connector_sync_job_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_sync_job_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_sync_job_id?: never } +} + +export interface ConnectorSyncJobCancelResponse { + result: Result +} + +export interface ConnectorSyncJobCheckInRequest extends RequestBase { + /** The unique identifier of the connector sync job to be checked in. */ + connector_sync_job_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_sync_job_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_sync_job_id?: never } +} + +export interface ConnectorSyncJobCheckInResponse { +} + +export interface ConnectorSyncJobClaimRequest extends RequestBase { + /** The unique identifier of the connector sync job. */ + connector_sync_job_id: Id + /** The cursor object from the last incremental sync job. + * This should reference the `sync_cursor` field in the connector state for which the job runs. */ + sync_cursor?: any + /** The host name of the current system that will run the job. */ + worker_hostname: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_sync_job_id?: never, sync_cursor?: never, worker_hostname?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_sync_job_id?: never, sync_cursor?: never, worker_hostname?: never } +} + +export interface ConnectorSyncJobClaimResponse { +} + +export interface ConnectorSyncJobDeleteRequest extends RequestBase { + /** The unique identifier of the connector sync job to be deleted */ + connector_sync_job_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_sync_job_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_sync_job_id?: never } +} + +export type ConnectorSyncJobDeleteResponse = AcknowledgedResponseBase + +export interface ConnectorSyncJobErrorRequest extends RequestBase { + /** The unique identifier for the connector sync job. */ + connector_sync_job_id: Id + /** The error for the connector sync job error field. */ + error: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_sync_job_id?: never, error?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_sync_job_id?: never, error?: never } +} + +export interface ConnectorSyncJobErrorResponse { +} + +export interface ConnectorSyncJobGetRequest extends RequestBase { + /** The unique identifier of the connector sync job */ + connector_sync_job_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_sync_job_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_sync_job_id?: never } +} + +export type ConnectorSyncJobGetResponse = ConnectorConnectorSyncJob + +export interface ConnectorSyncJobListRequest extends RequestBase { + /** Starting offset */ + from?: integer + /** Specifies a max number of results to get */ + size?: integer + /** A sync job status to fetch connector sync jobs for */ + status?: ConnectorSyncStatus + /** A connector id to fetch connector sync jobs for */ + connector_id?: Id + /** A comma-separated list of job types to fetch the sync jobs for */ + job_type?: ConnectorSyncJobType | ConnectorSyncJobType[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { from?: never, size?: never, status?: never, connector_id?: never, job_type?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { from?: never, size?: never, status?: never, connector_id?: never, job_type?: never } +} + +export interface ConnectorSyncJobListResponse { + count: long + results: ConnectorConnectorSyncJob[] +} + +export interface ConnectorSyncJobPostRequest extends RequestBase { + /** The id of the associated connector */ + id: Id + job_type?: ConnectorSyncJobType + trigger_method?: ConnectorSyncJobTriggerMethod + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, job_type?: never, trigger_method?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, job_type?: never, trigger_method?: never } +} + +export interface ConnectorSyncJobPostResponse { + id: Id +} + +export interface ConnectorSyncJobUpdateStatsRequest extends RequestBase { + /** The unique identifier of the connector sync job. */ + connector_sync_job_id: Id + /** The number of documents the sync job deleted. */ + deleted_document_count: long + /** The number of documents the sync job indexed. */ + indexed_document_count: long + /** The total size of the data (in MiB) the sync job indexed. */ + indexed_document_volume: long + /** The timestamp to use in the `last_seen` property for the connector sync job. */ + last_seen?: Duration + /** The connector-specific metadata. */ + metadata?: Metadata + /** The total number of documents in the target index after the sync job finished. */ + total_document_count?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_sync_job_id?: never, deleted_document_count?: never, indexed_document_count?: never, indexed_document_volume?: never, last_seen?: never, metadata?: never, total_document_count?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_sync_job_id?: never, deleted_document_count?: never, indexed_document_count?: never, indexed_document_volume?: never, last_seen?: never, metadata?: never, total_document_count?: never } +} + +export interface ConnectorSyncJobUpdateStatsResponse { +} + +export interface ConnectorUpdateActiveFilteringRequest extends RequestBase { + /** The unique identifier of the connector to be updated */ + connector_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never } +} + +export interface ConnectorUpdateActiveFilteringResponse { + result: Result +} + +export interface ConnectorUpdateApiKeyIdRequest extends RequestBase { + /** The unique identifier of the connector to be updated */ + connector_id: Id + api_key_id?: string + api_key_secret_id?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, api_key_id?: never, api_key_secret_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, api_key_id?: never, api_key_secret_id?: never } +} + +export interface ConnectorUpdateApiKeyIdResponse { + result: Result +} + +export interface ConnectorUpdateConfigurationRequest extends RequestBase { + /** The unique identifier of the connector to be updated */ + connector_id: Id + configuration?: ConnectorConnectorConfiguration + values?: Record + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, configuration?: never, values?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, configuration?: never, values?: never } +} + +export interface ConnectorUpdateConfigurationResponse { + result: Result +} + +export interface ConnectorUpdateErrorRequest extends RequestBase { + /** The unique identifier of the connector to be updated */ + connector_id: Id + error: SpecUtilsWithNullValue + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, error?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, error?: never } +} + +export interface ConnectorUpdateErrorResponse { + result: Result +} + +export interface ConnectorUpdateFeaturesRequest extends RequestBase { + /** The unique identifier of the connector to be updated. */ + connector_id: Id + features: ConnectorConnectorFeatures + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, features?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, features?: never } +} + +export interface ConnectorUpdateFeaturesResponse { + result: Result +} + +export interface ConnectorUpdateFilteringRequest extends RequestBase { + /** The unique identifier of the connector to be updated */ + connector_id: Id + filtering?: ConnectorFilteringConfig[] + rules?: ConnectorFilteringRule[] + advanced_snippet?: ConnectorFilteringAdvancedSnippet + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, filtering?: never, rules?: never, advanced_snippet?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, filtering?: never, rules?: never, advanced_snippet?: never } +} + +export interface ConnectorUpdateFilteringResponse { + result: Result +} + +export interface ConnectorUpdateFilteringValidationRequest extends RequestBase { + /** The unique identifier of the connector to be updated */ + connector_id: Id + validation: ConnectorFilteringRulesValidation + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, validation?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, validation?: never } +} + +export interface ConnectorUpdateFilteringValidationResponse { + result: Result +} + +export interface ConnectorUpdateIndexNameRequest extends RequestBase { + /** The unique identifier of the connector to be updated */ + connector_id: Id + index_name: SpecUtilsWithNullValue + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, index_name?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, index_name?: never } +} + +export interface ConnectorUpdateIndexNameResponse { + result: Result +} + +export interface ConnectorUpdateNameRequest extends RequestBase { + /** The unique identifier of the connector to be updated */ + connector_id: Id + name?: string + description?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, name?: never, description?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, name?: never, description?: never } +} + +export interface ConnectorUpdateNameResponse { + result: Result +} + +export interface ConnectorUpdateNativeRequest extends RequestBase { + /** The unique identifier of the connector to be updated */ + connector_id: Id + is_native: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, is_native?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, is_native?: never } +} + +export interface ConnectorUpdateNativeResponse { + result: Result +} + +export interface ConnectorUpdatePipelineRequest extends RequestBase { + /** The unique identifier of the connector to be updated */ + connector_id: Id + pipeline: ConnectorIngestPipelineParams + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, pipeline?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, pipeline?: never } +} + +export interface ConnectorUpdatePipelineResponse { + result: Result +} + +export interface ConnectorUpdateSchedulingRequest extends RequestBase { + /** The unique identifier of the connector to be updated */ + connector_id: Id + scheduling: ConnectorSchedulingConfiguration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, scheduling?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, scheduling?: never } +} + +export interface ConnectorUpdateSchedulingResponse { + result: Result +} + +export interface ConnectorUpdateServiceTypeRequest extends RequestBase { + /** The unique identifier of the connector to be updated */ + connector_id: Id + service_type: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, service_type?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, service_type?: never } +} + +export interface ConnectorUpdateServiceTypeResponse { + result: Result +} + +export interface ConnectorUpdateStatusRequest extends RequestBase { + /** The unique identifier of the connector to be updated */ + connector_id: Id + status: ConnectorConnectorStatus + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { connector_id?: never, status?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { connector_id?: never, status?: never } +} + +export interface ConnectorUpdateStatusResponse { + result: Result +} + +export interface DanglingIndicesDeleteDanglingIndexRequest extends RequestBase { + /** The UUID of the index to delete. Use the get dangling indices API to find the UUID. */ + index_uuid: Uuid + /** This parameter must be set to true to acknowledge that it will no longer be possible to recove data from the dangling index. */ + accept_data_loss: boolean + /** Specify timeout for connection to master */ + master_timeout?: Duration + /** Explicit operation timeout */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index_uuid?: never, accept_data_loss?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index_uuid?: never, accept_data_loss?: never, master_timeout?: never, timeout?: never } +} + +export type DanglingIndicesDeleteDanglingIndexResponse = AcknowledgedResponseBase + +export interface DanglingIndicesImportDanglingIndexRequest extends RequestBase { + /** The UUID of the index to import. Use the get dangling indices API to locate the UUID. */ + index_uuid: Uuid + /** This parameter must be set to true to import a dangling index. + * Because Elasticsearch cannot know where the dangling index data came from or determine which shard copies are fresh and which are stale, it cannot guarantee that the imported data represents the latest state of the index when it was last in the cluster. */ + accept_data_loss: boolean + /** Specify timeout for connection to master */ + master_timeout?: Duration + /** Explicit operation timeout */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index_uuid?: never, accept_data_loss?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index_uuid?: never, accept_data_loss?: never, master_timeout?: never, timeout?: never } +} + +export type DanglingIndicesImportDanglingIndexResponse = AcknowledgedResponseBase + +export interface DanglingIndicesListDanglingIndicesDanglingIndex { + index_name: string + index_uuid: string + creation_date_millis: EpochTime + node_ids: Ids +} + +export interface DanglingIndicesListDanglingIndicesRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } +} + +export interface DanglingIndicesListDanglingIndicesResponse { + dangling_indices: DanglingIndicesListDanglingIndicesDanglingIndex[] +} + +export interface EnrichPolicy { + enrich_fields: Fields + indices: Indices + match_field: Field + query?: QueryDslQueryContainer + name?: Name + elasticsearch_version?: string +} + +export type EnrichPolicyType = 'geo_match' | 'match' | 'range' + +export interface EnrichSummary { + config: Partial> +} + +export interface EnrichDeletePolicyRequest extends RequestBase { + /** Enrich policy to delete. */ + name: Name + /** Period to wait for a connection to the master node. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never } +} + +export type EnrichDeletePolicyResponse = AcknowledgedResponseBase + +export type EnrichExecutePolicyEnrichPolicyPhase = 'SCHEDULED' | 'RUNNING' | 'COMPLETE' | 'FAILED' | 'CANCELLED' + +export interface EnrichExecutePolicyExecuteEnrichPolicyStatus { + phase: EnrichExecutePolicyEnrichPolicyPhase + step?: string +} + +export interface EnrichExecutePolicyRequest extends RequestBase { + /** Enrich policy to execute. */ + name: Name + /** Period to wait for a connection to the master node. */ + master_timeout?: Duration + /** If `true`, the request blocks other enrich policy execution requests until complete. */ + wait_for_completion?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, wait_for_completion?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, wait_for_completion?: never } +} + +export interface EnrichExecutePolicyResponse { + status?: EnrichExecutePolicyExecuteEnrichPolicyStatus + task?: TaskId +} + +export interface EnrichGetPolicyRequest extends RequestBase { + /** Comma-separated list of enrich policy names used to limit the request. + * To return information for all enrich policies, omit this parameter. */ + name?: Names + /** Period to wait for a connection to the master node. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never } +} + +export interface EnrichGetPolicyResponse { + policies: EnrichSummary[] +} + +export interface EnrichPutPolicyRequest extends RequestBase { + /** Name of the enrich policy to create or update. */ + name: Name + /** Period to wait for a connection to the master node. */ + master_timeout?: Duration + /** Matches enrich data to incoming documents based on a `geo_shape` query. */ + geo_match?: EnrichPolicy + /** Matches enrich data to incoming documents based on a `term` query. */ + match?: EnrichPolicy + /** Matches a number, date, or IP address in incoming documents to a range in the enrich index based on a `term` query. */ + range?: EnrichPolicy + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, geo_match?: never, match?: never, range?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, geo_match?: never, match?: never, range?: never } +} + +export type EnrichPutPolicyResponse = AcknowledgedResponseBase + +export interface EnrichStatsCacheStats { + node_id: Id + count: integer + hits: integer + hits_time_in_millis: DurationValue + misses: integer + misses_time_in_millis: DurationValue + evictions: integer + size_in_bytes: long +} + +export interface EnrichStatsCoordinatorStats { + executed_searches_total: long + node_id: Id + queue_size: integer + remote_requests_current: integer + remote_requests_total: long +} + +export interface EnrichStatsExecutingPolicy { + name: Name + task: TasksTaskInfo +} + +export interface EnrichStatsRequest extends RequestBase { + /** Period to wait for a connection to the master node. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never } +} + +export interface EnrichStatsResponse { + /** Objects containing information about each coordinating ingest node for configured enrich processors. */ + coordinator_stats: EnrichStatsCoordinatorStats[] + /** Objects containing information about each enrich policy that is currently executing. */ + executing_policies: EnrichStatsExecutingPolicy[] + /** Objects containing information about the enrich cache stats on each ingest node. */ + cache_stats?: EnrichStatsCacheStats[] +} + +export interface EqlEqlHits { + /** Metadata about the number of matching events or sequences. */ + total?: SearchTotalHits + /** Contains events matching the query. Each object represents a matching event. */ + events?: EqlHitsEvent[] + /** Contains event sequences matching the query. Each object represents a matching sequence. This parameter is only returned for EQL queries containing a sequence. */ + sequences?: EqlHitsSequence[] +} + +export interface EqlEqlSearchResponseBase { + /** Identifier for the search. */ + id?: Id + /** If true, the response does not contain complete search results. */ + is_partial?: boolean + /** If true, the search request is still executing. */ + is_running?: boolean + /** Milliseconds it took Elasticsearch to execute the request. */ + took?: DurationValue + /** If true, the request timed out before completion. */ + timed_out?: boolean + /** Contains matching events and sequences. Also contains related metadata. */ + hits: EqlEqlHits + /** Contains information about shard failures (if any), in case allow_partial_search_results=true */ + shard_failures?: ShardFailure[] +} + +export interface EqlHitsEvent { + /** Name of the index containing the event. */ + _index: IndexName + /** Unique identifier for the event. This ID is only unique within the index. */ + _id: Id + /** Original JSON body passed for the event at index time. */ + _source: TEvent + /** Set to `true` for events in a timespan-constrained sequence that do not meet a given condition. */ + missing?: boolean + fields?: Record +} + +export interface EqlHitsSequence { + /** Contains events matching the query. Each object represents a matching event. */ + events: EqlHitsEvent[] + /** Shared field values used to constrain matches in the sequence. These are defined using the by keyword in the EQL query syntax. */ + join_keys?: any[] +} + +export interface EqlDeleteRequest extends RequestBase { + /** Identifier for the search to delete. + * A search ID is provided in the EQL search API's response for an async search. + * A search ID is also provided if the request’s `keep_on_completion` parameter is `true`. */ + id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } +} + +export type EqlDeleteResponse = AcknowledgedResponseBase + +export interface EqlGetRequest extends RequestBase { + /** Identifier for the search. */ + id: Id + /** Period for which the search and its results are stored on the cluster. + * Defaults to the keep_alive value set by the search’s EQL search API request. */ + keep_alive?: Duration + /** Timeout duration to wait for the request to finish. + * Defaults to no timeout, meaning the request waits for complete search results. */ + wait_for_completion_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, keep_alive?: never, wait_for_completion_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, keep_alive?: never, wait_for_completion_timeout?: never } +} + +export type EqlGetResponse = EqlEqlSearchResponseBase + +export interface EqlGetStatusRequest extends RequestBase { + /** Identifier for the search. */ + id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } +} + +export interface EqlGetStatusResponse { + /** Identifier for the search. */ + id: Id + /** If true, the search request is still executing. If false, the search is completed. */ + is_partial: boolean + /** If true, the response does not contain complete search results. This could be because either the search is still running (is_running status is false), or because it is already completed (is_running status is true) and results are partial due to failures or timeouts. */ + is_running: boolean + /** For a running search shows a timestamp when the eql search started, in milliseconds since the Unix epoch. */ + start_time_in_millis?: EpochTime + /** Shows a timestamp when the eql search will be expired, in milliseconds since the Unix epoch. When this time is reached, the search and its results are deleted, even if the search is still ongoing. */ + expiration_time_in_millis?: EpochTime + /** For a completed search shows the http status code of the completed search. */ + completion_status?: integer +} + +export interface EqlSearchRequest extends RequestBase { + /** The name of the index to scope the operation */ + index: Indices + /** Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) */ + allow_no_indices?: boolean + /** Whether to expand wildcard expression to concrete indices that are open, closed or both. */ + expand_wildcards?: ExpandWildcards + /** Indicates whether network round-trips should be minimized as part of cross-cluster search requests execution */ + ccs_minimize_roundtrips?: boolean + /** If true, missing or closed indices are not included in the response. */ + ignore_unavailable?: boolean + /** Specifies a subset of projects to target for the search using project + * metadata tags in a subset of Lucene query syntax. + * Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). + * Examples: + * _alias:my-project + * _alias:_origin + * _alias:*pr* + * Supported in serverless only. */ + project_routing?: ProjectRouting + /** EQL query you wish to run. */ + query: string + case_sensitive?: boolean + /** Field containing the event classification, such as process, file, or network. */ + event_category_field?: Field + /** Field used to sort hits with the same timestamp in ascending order */ + tiebreaker_field?: Field + /** Field containing event timestamp. Default "@timestamp" */ + timestamp_field?: Field + /** Maximum number of events to search at a time for sequence queries. */ + fetch_size?: uint + /** Query, written in Query DSL, used to filter the events on which the EQL query runs. */ + filter?: QueryDslQueryContainer | QueryDslQueryContainer[] + keep_alive?: Duration + keep_on_completion?: boolean + wait_for_completion_timeout?: Duration + /** Allow query execution also in case of shard failures. + * If true, the query will keep running and will return results based on the available shards. + * For sequences, the behavior can be further refined using allow_partial_sequence_results */ + allow_partial_search_results?: boolean + /** This flag applies only to sequences and has effect only if allow_partial_search_results=true. + * If true, the sequence query will return results based on the available shards, ignoring the others. + * If false, the sequence query will return successfully, but will always have empty results. */ + allow_partial_sequence_results?: boolean + /** For basic queries, the maximum number of matching events to return. Defaults to 10 */ + size?: uint + /** Array of wildcard (*) patterns. The response returns values for field names matching these patterns in the fields property of each hit. */ + fields?: QueryDslFieldAndFormat | Field | (QueryDslFieldAndFormat | Field)[] + result_position?: EqlSearchResultPosition + runtime_mappings?: MappingRuntimeFields + /** By default, the response of a sample query contains up to `10` samples, with one sample per unique set of join keys. Use the `size` + * parameter to get a smaller or larger set of samples. To retrieve more than one sample per set of join keys, use the + * `max_samples_per_key` parameter. Pipes are not supported for sample queries. */ + max_samples_per_key?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ccs_minimize_roundtrips?: never, ignore_unavailable?: never, project_routing?: never, query?: never, case_sensitive?: never, event_category_field?: never, tiebreaker_field?: never, timestamp_field?: never, fetch_size?: never, filter?: never, keep_alive?: never, keep_on_completion?: never, wait_for_completion_timeout?: never, allow_partial_search_results?: never, allow_partial_sequence_results?: never, size?: never, fields?: never, result_position?: never, runtime_mappings?: never, max_samples_per_key?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ccs_minimize_roundtrips?: never, ignore_unavailable?: never, project_routing?: never, query?: never, case_sensitive?: never, event_category_field?: never, tiebreaker_field?: never, timestamp_field?: never, fetch_size?: never, filter?: never, keep_alive?: never, keep_on_completion?: never, wait_for_completion_timeout?: never, allow_partial_search_results?: never, allow_partial_sequence_results?: never, size?: never, fields?: never, result_position?: never, runtime_mappings?: never, max_samples_per_key?: never } +} + +export type EqlSearchResponse = EqlEqlSearchResponseBase + +export type EqlSearchResultPosition = 'tail' | 'head' + +export interface EsqlAsyncEsqlResult extends EsqlEsqlResult { + /** The ID of the async query, to be used in subsequent requests to check the status or retrieve results. + * + * Also available in the `X-Elasticsearch-Async-Id` HTTP header. */ + id?: string + /** Indicates whether the async query is still running or has completed. + * + * Also available in the `X-Elasticsearch-Async-Is-Running` HTTP header. */ + is_running: boolean +} + +export type EsqlESQLParam = FieldValue | FieldValue[] + +export interface EsqlEsqlClusterDetails { + status: EsqlEsqlClusterStatus + indices: string + took?: DurationValue + _shards?: EsqlEsqlShardInfo + failures?: EsqlEsqlShardFailure[] +} + +export interface EsqlEsqlClusterInfo { + total: integer + successful: integer + running: integer + skipped: integer + partial: integer + failed: integer + details: Record +} + +export type EsqlEsqlClusterStatus = 'running' | 'successful' | 'partial' | 'skipped' | 'failed' + +export interface EsqlEsqlColumnInfo { + name: string + type: string +} + +export type EsqlEsqlFormat = 'csv' | 'json' | 'tsv' | 'txt' | 'yaml' | 'cbor' | 'smile' | 'arrow' + +export interface EsqlEsqlResult { + took?: DurationValue + is_partial?: boolean + all_columns?: EsqlEsqlColumnInfo[] + columns: EsqlEsqlColumnInfo[] + values: FieldValue[][] + /** Cross-cluster search information. Present if `include_ccs_metadata` was `true` in the request + * and a cross-cluster search was performed. */ + _clusters?: EsqlEsqlClusterInfo + /** Profiling information. Present if `profile` was `true` in the request. + * The contents of this field are currently unstable. */ + profile?: any +} + +export interface EsqlEsqlShardFailure { + shard: integer + index: IndexName | null + node?: NodeId + reason: ErrorCause +} + +export interface EsqlEsqlShardInfo { + total: integer + successful?: integer + skipped?: integer + failed?: integer +} + +export interface EsqlTableValuesContainer { + integer?: EsqlTableValuesIntegerValue[] + keyword?: EsqlTableValuesKeywordValue[] + long?: EsqlTableValuesLongValue[] + double?: EsqlTableValuesLongDouble[] +} + +export type EsqlTableValuesIntegerValue = integer | integer[] + +export type EsqlTableValuesKeywordValue = string | string[] + +export type EsqlTableValuesLongDouble = double | double[] + +export type EsqlTableValuesLongValue = long | long[] + +export interface EsqlAsyncQueryRequest extends RequestBase { + /** If `true`, partial results will be returned if there are shard failures, but the query can continue to execute on other clusters and shards. + * If `false`, the query will fail if there are any failures. + * + * To override the default behavior, you can set the `esql.query.allow_partial_results` cluster setting to `false`. */ + allow_partial_results?: boolean + /** The character to use between values within a CSV row. + * It is valid only for the CSV format. */ + delimiter?: string + /** Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. + * If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. */ + drop_null_columns?: boolean + /** A short version of the Accept header, e.g. json, yaml. + * + * `csv`, `tsv`, and `txt` formats will return results in a tabular format, excluding other metadata fields from the response. + * + * For async requests, nothing will be returned if the async query doesn't finish within the timeout. + * The query ID and running status are available in the `X-Elasticsearch-Async-Id` and `X-Elasticsearch-Async-Is-Running` HTTP headers of the response, respectively. */ + format?: EsqlEsqlFormat + /** By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results. */ + columnar?: boolean + /** Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. */ + filter?: QueryDslQueryContainer + locale?: string + /** To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. */ + params?: FieldValue[] + /** If provided and `true` the response will include an extra `profile` object + * with information on how the query was executed. This information is for human debugging + * and its format can change at any time but it can give some insight into the performance + * of each part of the query. */ + profile?: boolean + /** The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. */ + query: string + /** Tables to use with the LOOKUP operation. The top level key is the table + * name and the next level key is the column name. */ + tables?: Record> + /** When set to `true` and performing a cross-cluster/cross-project query, the response will include an extra `_clusters` + * object with information about the clusters that participated in the search along with info such as shards + * count. */ + include_ccs_metadata?: boolean + /** When set to `true` and performing a cross-cluster/cross-project query, the response will include an extra `_clusters` + * object with information about the clusters that participated in the search along with info such as shards + * count. + * @alias include_ccs_metadata */ + include_execution_metadata?: boolean + /** The period to wait for the request to finish. + * By default, the request waits for 1 second for the query results. + * If the query completes during this period, results are returned + * Otherwise, a query ID is returned that can later be used to retrieve the results. */ + wait_for_completion_timeout?: Duration + /** The period for which the query and its results are stored in the cluster. + * The default period is five days. + * When this period expires, the query and its results are deleted, even if the query is still ongoing. + * If the `keep_on_completion` parameter is false, Elasticsearch only stores async queries that do not complete within the period set by the `wait_for_completion_timeout` parameter, regardless of this value. */ + keep_alive?: Duration + /** Indicates whether the query and its results are stored in the cluster. + * If false, the query and its results are stored in the cluster only if the request does not complete during the period set by the `wait_for_completion_timeout` parameter. */ + keep_on_completion?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { allow_partial_results?: never, delimiter?: never, drop_null_columns?: never, format?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never, include_ccs_metadata?: never, include_execution_metadata?: never, wait_for_completion_timeout?: never, keep_alive?: never, keep_on_completion?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { allow_partial_results?: never, delimiter?: never, drop_null_columns?: never, format?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never, include_ccs_metadata?: never, include_execution_metadata?: never, wait_for_completion_timeout?: never, keep_alive?: never, keep_on_completion?: never } +} + +export type EsqlAsyncQueryResponse = EsqlAsyncEsqlResult + +export interface EsqlAsyncQueryDeleteRequest extends RequestBase { + /** The unique identifier of the query. + * A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. + * A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. */ + id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } +} + +export type EsqlAsyncQueryDeleteResponse = AcknowledgedResponseBase + +export interface EsqlAsyncQueryGetRequest extends RequestBase { + /** The unique identifier of the query. + * A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. + * A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. */ + id: Id + /** Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. + * If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. */ + drop_null_columns?: boolean + /** A short version of the Accept header, for example `json` or `yaml`. */ + format?: EsqlEsqlFormat + /** The period for which the query and its results are stored in the cluster. + * When this period expires, the query and its results are deleted, even if the query is still ongoing. */ + keep_alive?: Duration + /** The period to wait for the request to finish. + * By default, the request waits for complete query results. + * If the request completes during the period specified in this parameter, complete query results are returned. + * Otherwise, the response returns an `is_running` value of `true` and no results. */ + wait_for_completion_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, drop_null_columns?: never, format?: never, keep_alive?: never, wait_for_completion_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, drop_null_columns?: never, format?: never, keep_alive?: never, wait_for_completion_timeout?: never } +} + +export type EsqlAsyncQueryGetResponse = EsqlAsyncEsqlResult + +export interface EsqlAsyncQueryStopRequest extends RequestBase { + /** The unique identifier of the query. + * A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. + * A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. */ + id: Id + /** Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. + * If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. */ + drop_null_columns?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, drop_null_columns?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, drop_null_columns?: never } +} + +export type EsqlAsyncQueryStopResponse = EsqlEsqlResult + +export interface EsqlGetQueryRequest extends RequestBase { + /** The query ID */ + id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } +} + +export interface EsqlGetQueryResponse { + id: long + node: NodeId + start_time_millis: long + running_time_nanos: long + query: string + coordinating_node: NodeId + data_nodes: NodeId[] +} + +export interface EsqlListQueriesBody { + id: long + node: NodeId + start_time_millis: long + running_time_nanos: long + query: string +} + +export interface EsqlListQueriesRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } +} + +export interface EsqlListQueriesResponse { + queries: Record +} + +export interface EsqlQueryRequest extends RequestBase { + /** A short version of the Accept header, e.g. json, yaml. + * + * `csv`, `tsv`, and `txt` formats will return results in a tabular format, excluding other metadata fields from the response. */ + format?: EsqlEsqlFormat + /** The character to use between values within a CSV row. Only valid for the CSV format. */ + delimiter?: string + /** Should columns that are entirely `null` be removed from the `columns` and `values` portion of the results? + * Defaults to `false`. If `true` then the response will include an extra section under the name `all_columns` which has the name of all columns. */ + drop_null_columns?: boolean + /** If `true`, partial results will be returned if there are shard failures, but the query can continue to execute on other clusters and shards. + * If `false`, the query will fail if there are any failures. + * + * To override the default behavior, you can set the `esql.query.allow_partial_results` cluster setting to `false`. */ + allow_partial_results?: boolean + /** By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results. */ + columnar?: boolean + /** Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. */ + filter?: QueryDslQueryContainer + locale?: string + /** To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. */ + params?: EsqlESQLParam[] + /** If provided and `true` the response will include an extra `profile` object + * with information on how the query was executed. This information is for human debugging + * and its format can change at any time but it can give some insight into the performance + * of each part of the query. */ + profile?: boolean + /** The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. */ + query: string + /** Tables to use with the LOOKUP operation. The top level key is the table + * name and the next level key is the column name. */ + tables?: Record> + /** When set to `true` and performing a cross-cluster/cross-project query, the response will include an extra `_clusters` + * object with information about the clusters that participated in the search along with info such as shards + * count. */ + include_ccs_metadata?: boolean + /** When set to `true` and performing a cross-cluster/cross-project query, the response will include an extra `_clusters` + * object with information about the clusters that participated in the search along with info such as shards + * count. + * @alias include_ccs_metadata */ + include_execution_metadata?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { format?: never, delimiter?: never, drop_null_columns?: never, allow_partial_results?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never, include_ccs_metadata?: never, include_execution_metadata?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { format?: never, delimiter?: never, drop_null_columns?: never, allow_partial_results?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never, include_ccs_metadata?: never, include_execution_metadata?: never } +} + +export type EsqlQueryResponse = EsqlEsqlResult + +export interface FeaturesFeature { + name: string + description: string +} + +export interface FeaturesGetFeaturesRequest extends RequestBase { + /** Period to wait for a connection to the master node. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never } +} + +export interface FeaturesGetFeaturesResponse { + features: FeaturesFeature[] +} + +export interface FeaturesResetFeaturesRequest extends RequestBase { + /** Period to wait for a connection to the master node. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never } +} + +export interface FeaturesResetFeaturesResponse { + features: FeaturesFeature[] +} + +export type FleetCheckpoint = long + +export interface FleetGlobalCheckpointsRequest extends RequestBase { + /** A single index or index alias that resolves to a single index. */ + index: IndexName | IndexAlias + /** A boolean value which controls whether to wait (until the timeout) for the global checkpoints + * to advance past the provided `checkpoints`. */ + wait_for_advance?: boolean + /** A boolean value which controls whether to wait (until the timeout) for the target index to exist + * and all primary shards be active. Can only be true when `wait_for_advance` is true. */ + wait_for_index?: boolean + /** A comma separated list of previous global checkpoints. When used in combination with `wait_for_advance`, + * the API will only return once the global checkpoints advances past the checkpoints. Providing an empty list + * will cause Elasticsearch to immediately return the current global checkpoints. */ + checkpoints?: FleetCheckpoint[] + /** Period to wait for a global checkpoints to advance past `checkpoints`. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, wait_for_advance?: never, wait_for_index?: never, checkpoints?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, wait_for_advance?: never, wait_for_index?: never, checkpoints?: never, timeout?: never } +} + +export interface FleetGlobalCheckpointsResponse { + global_checkpoints: FleetCheckpoint[] + timed_out: boolean +} + +export interface FleetMsearchRequest extends RequestBase { + /** A single target to search. If the target is an index alias, it must resolve to a single index. */ + index?: IndexName | IndexAlias + /** If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. */ + allow_no_indices?: boolean + /** If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests. */ + ccs_minimize_roundtrips?: boolean + /** Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. */ + expand_wildcards?: ExpandWildcards + /** If true, concrete, expanded or aliased indices are ignored when frozen. */ + ignore_throttled?: boolean + /** If true, missing or closed indices are not included in the response. */ + ignore_unavailable?: boolean + /** Maximum number of concurrent searches the multi search API can execute. */ + max_concurrent_searches?: integer + /** Maximum number of concurrent shard requests that each sub-search request executes per node. */ + max_concurrent_shard_requests?: integer + /** Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint. */ + pre_filter_shard_size?: long + /** Indicates whether global term and document frequencies should be used when scoring returned documents. */ + search_type?: SearchType + /** If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object. */ + rest_total_hits_as_int?: boolean + /** Specifies whether aggregation and suggester names should be prefixed by their respective types in the response. */ + typed_keys?: boolean + /** A comma separated list of checkpoints. When configured, the search API will only be executed on a shard + * after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause + * Elasticsearch to immediately execute the search. */ + wait_for_checkpoints?: FleetCheckpoint[] + /** If true, returns partial results if there are shard request timeouts or shard failures. + * If false, returns an error with no partial results. + * Defaults to the configured cluster setting `search.default_allow_partial_results`, which is true by default. */ + allow_partial_search_results?: boolean + searches?: MsearchRequestItem[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, ccs_minimize_roundtrips?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, max_concurrent_searches?: never, max_concurrent_shard_requests?: never, pre_filter_shard_size?: never, search_type?: never, rest_total_hits_as_int?: never, typed_keys?: never, wait_for_checkpoints?: never, allow_partial_search_results?: never, searches?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, ccs_minimize_roundtrips?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, max_concurrent_searches?: never, max_concurrent_shard_requests?: never, pre_filter_shard_size?: never, search_type?: never, rest_total_hits_as_int?: never, typed_keys?: never, wait_for_checkpoints?: never, allow_partial_search_results?: never, searches?: never } +} + +export interface FleetMsearchResponse { + docs: MsearchResponseItem[] +} + +export interface FleetSearchRequest extends RequestBase { + /** A single target to search. If the target is an index alias, it must resolve to a single index. */ + index: IndexName | IndexAlias + allow_no_indices?: boolean + analyzer?: string + analyze_wildcard?: boolean + batched_reduce_size?: long + ccs_minimize_roundtrips?: boolean + default_operator?: QueryDslOperator + df?: string + expand_wildcards?: ExpandWildcards + ignore_throttled?: boolean + ignore_unavailable?: boolean + lenient?: boolean + max_concurrent_shard_requests?: integer + preference?: string + pre_filter_shard_size?: long + request_cache?: boolean + routing?: Routing + scroll?: Duration + search_type?: SearchType + /** Specifies which field to use for suggestions. */ + suggest_field?: Field + suggest_mode?: SuggestMode + suggest_size?: long + /** The source text for which the suggestions should be returned. */ + suggest_text?: string + typed_keys?: boolean + rest_total_hits_as_int?: boolean + _source_excludes?: Fields + _source_includes?: Fields + q?: string + /** A comma separated list of checkpoints. When configured, the search API will only be executed on a shard + * after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause + * Elasticsearch to immediately execute the search. */ + wait_for_checkpoints?: FleetCheckpoint[] + /** If true, returns partial results if there are shard request timeouts or shard failures. + * If false, returns an error with no partial results. + * Defaults to the configured cluster setting `search.default_allow_partial_results`, which is true by default. */ + allow_partial_search_results?: boolean + aggregations?: Record + /** @alias aggregations */ + aggs?: Record + collapse?: SearchFieldCollapse + /** If true, returns detailed information about score computation as part of a hit. */ + explain?: boolean + /** Configuration of search extensions defined by Elasticsearch plugins. */ + ext?: Record + /** Starting document offset. By default, you cannot page through more than 10,000 + * hits using the from and size parameters. To page through more hits, use the + * search_after parameter. */ + from?: integer + highlight?: SearchHighlight + /** Number of hits matching the query to count accurately. If true, the exact + * number of hits is returned at the cost of some performance. If false, the + * response does not include the total number of hits matching the query. + * Defaults to 10,000 hits. */ + track_total_hits?: SearchTrackHits + /** Boosts the _score of documents from specified indices. */ + indices_boost?: Partial>[] + /** Array of wildcard (*) patterns. The request returns doc values for field + * names matching these patterns in the hits.fields property of the response. */ + docvalue_fields?: (QueryDslFieldAndFormat | Field)[] + /** Minimum _score for matching documents. Documents with a lower _score are + * not included in search results and results collected by aggregations. */ + min_score?: double + post_filter?: QueryDslQueryContainer + profile?: boolean + /** Defines the search definition using the Query DSL. */ + query?: QueryDslQueryContainer + rescore?: SearchRescore | SearchRescore[] + /** Retrieve a script evaluation (based on different fields) for each hit. */ + script_fields?: Record + search_after?: SortResults + /** The number of hits to return. By default, you cannot page through more + * than 10,000 hits using the from and size parameters. To page through more + * hits, use the search_after parameter. */ + size?: integer + slice?: SlicedScroll + sort?: Sort + /** Indicates which source fields are returned for matching documents. These + * fields are returned in the hits._source property of the search response. */ + _source?: SearchSourceConfig + /** Array of wildcard (*) patterns. The request returns values for field names + * matching these patterns in the hits.fields property of the response. */ + fields?: (QueryDslFieldAndFormat | Field)[] + suggest?: SearchSuggester + /** Maximum number of documents to collect for each shard. If a query reaches this + * limit, Elasticsearch terminates the query early. Elasticsearch collects documents + * before sorting. Defaults to 0, which does not terminate query execution early. */ + terminate_after?: long + /** Specifies the period of time to wait for a response from each shard. If no response + * is received before the timeout expires, the request fails and returns an error. + * Defaults to no timeout. */ + timeout?: string + /** If true, calculate and return document scores, even if the scores are not used for sorting. */ + track_scores?: boolean + /** If true, returns document version as part of a hit. */ + version?: boolean + /** If true, returns sequence number and primary term of the last modification + * of each hit. See Optimistic concurrency control. */ + seq_no_primary_term?: boolean + /** List of stored fields to return as part of a hit. If no fields are specified, + * no stored fields are included in the response. If this field is specified, the _source + * parameter defaults to false. You can pass _source: true to return both source fields + * and stored fields in the search response. */ + stored_fields?: Fields + /** Limits the search to a point in time (PIT). If you provide a PIT, you + * cannot specify an in the request path. */ + pit?: SearchPointInTimeReference + /** Defines one or more runtime fields in the search request. These fields take + * precedence over mapped fields with the same name. */ + runtime_mappings?: MappingRuntimeFields + /** Stats groups to associate with the search. Each group maintains a statistics + * aggregation for its associated searches. You can retrieve these stats using + * the indices stats API. */ + stats?: string[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, analyzer?: never, analyze_wildcard?: never, batched_reduce_size?: never, ccs_minimize_roundtrips?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, lenient?: never, max_concurrent_shard_requests?: never, preference?: never, pre_filter_shard_size?: never, request_cache?: never, routing?: never, scroll?: never, search_type?: never, suggest_field?: never, suggest_mode?: never, suggest_size?: never, suggest_text?: never, typed_keys?: never, rest_total_hits_as_int?: never, _source_excludes?: never, _source_includes?: never, q?: never, wait_for_checkpoints?: never, allow_partial_search_results?: never, aggregations?: never, aggs?: never, collapse?: never, explain?: never, ext?: never, from?: never, highlight?: never, track_total_hits?: never, indices_boost?: never, docvalue_fields?: never, min_score?: never, post_filter?: never, profile?: never, query?: never, rescore?: never, script_fields?: never, search_after?: never, size?: never, slice?: never, sort?: never, _source?: never, fields?: never, suggest?: never, terminate_after?: never, timeout?: never, track_scores?: never, version?: never, seq_no_primary_term?: never, stored_fields?: never, pit?: never, runtime_mappings?: never, stats?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, analyzer?: never, analyze_wildcard?: never, batched_reduce_size?: never, ccs_minimize_roundtrips?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, lenient?: never, max_concurrent_shard_requests?: never, preference?: never, pre_filter_shard_size?: never, request_cache?: never, routing?: never, scroll?: never, search_type?: never, suggest_field?: never, suggest_mode?: never, suggest_size?: never, suggest_text?: never, typed_keys?: never, rest_total_hits_as_int?: never, _source_excludes?: never, _source_includes?: never, q?: never, wait_for_checkpoints?: never, allow_partial_search_results?: never, aggregations?: never, aggs?: never, collapse?: never, explain?: never, ext?: never, from?: never, highlight?: never, track_total_hits?: never, indices_boost?: never, docvalue_fields?: never, min_score?: never, post_filter?: never, profile?: never, query?: never, rescore?: never, script_fields?: never, search_after?: never, size?: never, slice?: never, sort?: never, _source?: never, fields?: never, suggest?: never, terminate_after?: never, timeout?: never, track_scores?: never, version?: never, seq_no_primary_term?: never, stored_fields?: never, pit?: never, runtime_mappings?: never, stats?: never } +} + +export interface FleetSearchResponse { + took: long + timed_out: boolean + _shards: ShardStatistics + hits: SearchHitsMetadata + aggregations?: Record + _clusters?: ClusterStatistics + fields?: Record + max_score?: double + num_reduce_phases?: long + profile?: SearchProfile + pit_id?: Id + _scroll_id?: ScrollId + suggest?: Record[]> + terminated_early?: boolean +} + +export interface GraphConnection { + doc_count: long + source: long + target: long + weight: double +} + +export interface GraphExploreControls { + /** To avoid the top-matching documents sample being dominated by a single source of results, it is sometimes necessary to request diversity in the sample. + * You can do this by selecting a single-value field and setting a maximum number of documents per value for that field. */ + sample_diversity?: GraphSampleDiversity + /** Each hop considers a sample of the best-matching documents on each shard. + * Using samples improves the speed of execution and keeps exploration focused on meaningfully-connected terms. + * Very small values (less than 50) might not provide sufficient weight-of-evidence to identify significant connections between terms. + * Very large sample sizes can dilute the quality of the results and increase execution times. */ + sample_size?: integer + /** The length of time in milliseconds after which exploration will be halted and the results gathered so far are returned. + * This timeout is honored on a best-effort basis. + * Execution might overrun this timeout if, for example, a long pause is encountered while FieldData is loaded for a field. */ + timeout?: Duration + /** Filters associated terms so only those that are significantly associated with your query are included. */ + use_significance: boolean +} + +export interface GraphHop { + /** Specifies one or more fields from which you want to extract terms that are associated with the specified vertices. */ + connections?: GraphHop + /** An optional guiding query that constrains the Graph API as it explores connected terms. */ + query?: QueryDslQueryContainer + /** Contains the fields you are interested in. */ + vertices: GraphVertexDefinition[] +} + +export interface GraphSampleDiversity { + field: Field + max_docs_per_value: integer +} + +export interface GraphVertex { + depth: long + field: Field + term: string + weight: double +} + +export interface GraphVertexDefinition { + /** Prevents the specified terms from being included in the results. */ + exclude?: string[] + /** Identifies a field in the documents of interest. */ + field: Field + /** Identifies the terms of interest that form the starting points from which you want to spider out. */ + include?: (GraphVertexInclude | string)[] + /** Specifies how many documents must contain a pair of terms before it is considered to be a useful connection. + * This setting acts as a certainty threshold. */ + min_doc_count?: long + /** Controls how many documents on a particular shard have to contain a pair of terms before the connection is returned for global consideration. */ + shard_min_doc_count?: long + /** Specifies the maximum number of vertex terms returned for each field. */ + size?: integer +} + +export interface GraphVertexInclude { + boost?: double + term: string +} + +export interface GraphExploreRequest extends RequestBase { + /** Name of the index. */ + index: Indices + /** Custom value used to route operations to a specific shard. */ + routing?: Routing + /** Specifies the period of time to wait for a response from each shard. + * If no response is received before the timeout expires, the request fails and returns an error. + * Defaults to no timeout. */ + timeout?: Duration + /** Specifies or more fields from which you want to extract terms that are associated with the specified vertices. */ + connections?: GraphHop + /** Direct the Graph API how to build the graph. */ + controls?: GraphExploreControls + /** A seed query that identifies the documents of interest. Can be any valid Elasticsearch query. */ + query?: QueryDslQueryContainer + /** Specifies one or more fields that contain the terms you want to include in the graph as vertices. */ + vertices?: GraphVertexDefinition[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, routing?: never, timeout?: never, connections?: never, controls?: never, query?: never, vertices?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, routing?: never, timeout?: never, connections?: never, controls?: never, query?: never, vertices?: never } +} + +export interface GraphExploreResponse { + connections: GraphConnection[] + failures: ShardFailure[] + timed_out: boolean + took: long + vertices: GraphVertex[] +} + +export interface IlmActions { + /** Phases allowed: warm, cold. */ + allocate?: IlmAllocateAction + /** Phases allowed: delete. */ + delete?: IlmDeleteAction + /** Phases allowed: hot, warm, cold. */ + downsample?: IlmDownsampleAction + /** The freeze action is a noop in 8.x */ + freeze?: EmptyObject + /** Phases allowed: hot, warm. */ + forcemerge?: IlmForceMergeAction + /** Phases allowed: warm, cold. */ + migrate?: IlmMigrateAction + /** Phases allowed: hot, warm, cold. */ + readonly?: EmptyObject + /** Phases allowed: hot. */ + rollover?: IlmRolloverAction + /** Phases allowed: hot, warm, cold. */ + set_priority?: IlmSetPriorityAction + /** Phases allowed: hot, cold, frozen. */ + searchable_snapshot?: IlmSearchableSnapshotAction + /** Phases allowed: hot, warm. */ + shrink?: IlmShrinkAction + /** Phases allowed: hot, warm, cold, frozen. */ + unfollow?: EmptyObject + /** Phases allowed: delete. */ + wait_for_snapshot?: IlmWaitForSnapshotAction +} + +export interface IlmAllocateAction { + number_of_replicas?: integer + total_shards_per_node?: integer + include?: Record + exclude?: Record + require?: Record +} + +export interface IlmDeleteAction { + delete_searchable_snapshot?: boolean +} + +export interface IlmDownsampleAction { + fixed_interval: DurationLarge + wait_timeout?: Duration +} + +export interface IlmForceMergeAction { + max_num_segments: integer + index_codec?: string +} + +export interface IlmMigrateAction { + enabled?: boolean +} + +export interface IlmPhase { + actions?: IlmActions + min_age?: Duration +} + +export interface IlmPhases { + cold?: IlmPhase + delete?: IlmPhase + frozen?: IlmPhase + hot?: IlmPhase + warm?: IlmPhase +} + +export interface IlmPolicy { + phases: IlmPhases + /** Arbitrary metadata that is not automatically generated or used by Elasticsearch. */ + _meta?: Metadata +} + +export interface IlmRolloverAction { + /** The `max_size` condition has been deprecated in 9.3.0 and `max_primary_shard_size` should be used instead */ + max_size?: ByteSize + max_primary_shard_size?: ByteSize + max_age?: Duration + max_docs?: long + max_primary_shard_docs?: long + min_size?: ByteSize + min_primary_shard_size?: ByteSize + min_age?: Duration + min_docs?: long + min_primary_shard_docs?: long +} + +export interface IlmSearchableSnapshotAction { + snapshot_repository: string + force_merge_index?: boolean +} + +export interface IlmSetPriorityAction { + priority?: integer +} + +export interface IlmShrinkAction { + number_of_shards?: integer + max_primary_shard_size?: ByteSize + allow_write_after_shrink?: boolean +} + +export interface IlmWaitForSnapshotAction { + policy: string +} + +export interface IlmDeleteLifecycleRequest extends RequestBase { + /** Identifier for the policy. */ + name: Name + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } +} + +export type IlmDeleteLifecycleResponse = AcknowledgedResponseBase + +export type IlmExplainLifecycleLifecycleExplain = IlmExplainLifecycleLifecycleExplainManaged | IlmExplainLifecycleLifecycleExplainUnmanaged + +export interface IlmExplainLifecycleLifecycleExplainManaged { + action?: Name + action_time?: DateTime + action_time_millis?: EpochTime + age?: Duration + /** @remarks This property is not supported on Elastic Cloud Serverless. */ + age_in_millis?: DurationValue + failed_step?: Name + failed_step_retry_count?: integer + index: IndexName + index_creation_date?: DateTime + index_creation_date_millis?: EpochTime + is_auto_retryable_error?: boolean + lifecycle_date?: DateTime + lifecycle_date_millis?: EpochTime + managed: true + phase?: Name + phase_time?: DateTime + phase_time_millis?: EpochTime + policy?: Name + previous_step_info?: Record + repository_name?: string + snapshot_name?: string + shrink_index_name?: string + step?: Name + step_info?: Record + step_time?: DateTime + step_time_millis?: EpochTime + phase_execution?: IlmExplainLifecycleLifecycleExplainPhaseExecution + time_since_index_creation?: Duration + skip: boolean +} + +export interface IlmExplainLifecycleLifecycleExplainPhaseExecution { + phase_definition?: IlmPhase + policy: Name + version: VersionNumber + modified_date_in_millis: EpochTime +} + +export interface IlmExplainLifecycleLifecycleExplainUnmanaged { + index: IndexName + managed: false +} + +export interface IlmExplainLifecycleRequest extends RequestBase { + /** Comma-separated list of data streams, indices, and aliases to target. Supports wildcards (`*`). + * To target all data streams and indices, use `*` or `_all`. */ + index: IndexName + /** Filters the returned indices to only indices that are managed by ILM and are in an error state, either due to an encountering an error while executing the policy, or attempting to use a policy that does not exist. */ + only_errors?: boolean + /** Filters the returned indices to only indices that are managed by ILM. */ + only_managed?: boolean + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, only_errors?: never, only_managed?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, only_errors?: never, only_managed?: never, master_timeout?: never } +} + +export interface IlmExplainLifecycleResponse { + indices: Record +} + +export interface IlmGetLifecycleLifecycle { + modified_date: DateTime + policy: IlmPolicy + version: VersionNumber +} + +export interface IlmGetLifecycleRequest extends RequestBase { + /** Identifier for the policy. */ + name?: Name + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } +} + +export type IlmGetLifecycleResponse = Record + +export interface IlmGetStatusRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } +} + +export interface IlmGetStatusResponse { + operation_mode: LifecycleOperationMode +} + +export interface IlmMigrateToDataTiersRequest extends RequestBase { + /** If true, simulates the migration from node attributes based allocation filters to data tiers, but does not perform the migration. + * This provides a way to retrieve the indices and ILM policies that need to be migrated. */ + dry_run?: boolean + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ + master_timeout?: Duration + legacy_template_to_delete?: string + node_attribute?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { dry_run?: never, master_timeout?: never, legacy_template_to_delete?: never, node_attribute?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { dry_run?: never, master_timeout?: never, legacy_template_to_delete?: never, node_attribute?: never } +} + +export interface IlmMigrateToDataTiersResponse { + dry_run: boolean + /** The name of the legacy index template that was deleted. + * This information is missing if no legacy index templates were deleted. */ + removed_legacy_template: string + /** The ILM policies that were updated. */ + migrated_ilm_policies: string[] + /** The indices that were migrated to tier preference routing. */ + migrated_indices: Indices + /** The legacy index templates that were updated to not contain custom routing settings for the provided data attribute. */ + migrated_legacy_templates: string[] + /** The composable index templates that were updated to not contain custom routing settings for the provided data attribute. */ + migrated_composable_templates: string[] + /** The component templates that were updated to not contain custom routing settings for the provided data attribute. */ + migrated_component_templates: string[] +} + +export interface IlmMoveToStepRequest extends RequestBase { + /** The name of the index whose lifecycle step is to change */ + index: IndexName + /** The step that the index is expected to be in. */ + current_step: IlmMoveToStepStepKey + /** The step that you want to run. */ + next_step: IlmMoveToStepStepKey + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, current_step?: never, next_step?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, current_step?: never, next_step?: never } +} + +export type IlmMoveToStepResponse = AcknowledgedResponseBase + +export interface IlmMoveToStepStepKey { + /** The optional action to which the index will be moved. */ + action?: string + /** The optional step name to which the index will be moved. */ + name?: string + phase: string +} + +export interface IlmPutLifecycleRequest extends RequestBase { + /** Identifier for the policy. */ + name: Name + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + policy?: IlmPolicy + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never, policy?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never, policy?: never } +} + +export type IlmPutLifecycleResponse = AcknowledgedResponseBase + +export interface IlmRemovePolicyRequest extends RequestBase { + /** The name of the index to remove policy on */ + index: IndexName + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never } +} + +export interface IlmRemovePolicyResponse { + failed_indexes: IndexName[] + has_failures: boolean +} + +export interface IlmRetryRequest extends RequestBase { + /** The name of the indices (comma-separated) whose failed lifecycle step is to be retry */ + index: IndexName + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never } +} + +export type IlmRetryResponse = AcknowledgedResponseBase + +export interface IlmStartRequest extends RequestBase { + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never, timeout?: never } +} + +export type IlmStartResponse = AcknowledgedResponseBase + +export interface IlmStopRequest extends RequestBase { + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never, timeout?: never } +} + +export type IlmStopResponse = AcknowledgedResponseBase + +export interface IndicesAlias { + /** Query used to limit documents the alias can access. */ + filter?: QueryDslQueryContainer + /** Value used to route indexing operations to a specific shard. + * If specified, this overwrites the `routing` value for indexing operations. */ + index_routing?: Routing + /** If `true`, the alias is hidden. + * All indices for the alias must have the same `is_hidden` value. */ + is_hidden?: boolean + /** If `true`, the index is the write index for the alias. */ + is_write_index?: boolean + /** Value used to route indexing and search operations to a specific shard. */ + routing?: Routing + /** Value used to route search operations to a specific shard. + * If specified, this overwrites the `routing` value for search operations. */ + search_routing?: Routing +} + +export interface IndicesAliasDefinition { + /** Query used to limit documents the alias can access. */ + filter?: QueryDslQueryContainer + /** Value used to route indexing operations to a specific shard. + * If specified, this overwrites the `routing` value for indexing operations. */ + index_routing?: string + /** If `true`, the index is the write index for the alias. */ + is_write_index?: boolean + /** Value used to route indexing and search operations to a specific shard. */ + routing?: string + /** Value used to route search operations to a specific shard. + * If specified, this overwrites the `routing` value for search operations. */ + search_routing?: string + /** If `true`, the alias is hidden. + * All indices for the alias must have the same `is_hidden` value. */ + is_hidden?: boolean +} + +export interface IndicesCacheQueries { + enabled: boolean +} + +export interface IndicesDataStream { + /** Custom metadata for the stream, copied from the `_meta` object of the stream’s matching index template. + * If empty, the response omits this property. */ + _meta?: Metadata + /** If `true`, the data stream allows custom routing on write request. */ + allow_custom_routing?: boolean + /** Information about failure store backing indices */ + failure_store?: IndicesFailureStore + /** Current generation for the data stream. This number acts as a cumulative count of the stream’s rollovers, starting at 1. */ + generation: integer + /** If `true`, the data stream is hidden. */ + hidden: boolean + /** Name of the current ILM lifecycle policy in the stream’s matching index template. + * This lifecycle policy is set in the `index.lifecycle.name` setting. + * If the template does not include a lifecycle policy, this property is not included in the response. + * NOTE: A data stream’s backing indices may be assigned different lifecycle policies. To retrieve the lifecycle policy for individual backing indices, use the get index settings API. */ + ilm_policy?: Name + /** Name of the lifecycle system that'll manage the next generation of the data stream. */ + next_generation_managed_by: IndicesManagedBy + /** Indicates if ILM should take precedence over DSL in case both are configured to managed this data stream. */ + prefer_ilm: boolean + /** Array of objects containing information about the data stream’s backing indices. + * The last item in this array contains information about the stream’s current write index. */ + indices: IndicesDataStreamIndex[] + /** Contains the configuration for the data stream lifecycle of this data stream. */ + lifecycle?: IndicesDataStreamLifecycleWithRollover + /** Name of the data stream. */ + name: DataStreamName + /** If `true`, the data stream is created and managed by cross-cluster replication and the local cluster can not write into this data stream or change its mappings. */ + replicated?: boolean + /** If `true`, the next write to this data stream will trigger a rollover first and the document will be indexed in the new backing index. If the rollover fails the indexing request will fail too. */ + rollover_on_write: boolean + /** The settings specific to this data stream that will take precedence over the settings in the matching index + * template. */ + settings: IndicesIndexSettings + /** The mappings specific to this data stream that will take precedence over the mappings in the matching index + * template. */ + mappings?: MappingTypeMapping + /** Health status of the data stream. + * This health status is based on the state of the primary and replica shards of the stream’s backing indices. */ + status: HealthStatus + /** If `true`, the data stream is created and managed by an Elastic stack component and cannot be modified through normal user interaction. */ + system?: boolean + /** Name of the index template used to create the data stream’s backing indices. + * The template’s index pattern must match the name of this data stream. */ + template: Name + /** Information about the `@timestamp` field in the data stream. */ + timestamp_field: IndicesDataStreamTimestampField + /** The index mode for the data stream that will be used for newly created backing indices. */ + index_mode?: IndicesIndexMode +} + +export interface IndicesDataStreamFailureStore { + /** If defined, it turns the failure store on/off (`true`/`false`) for this data stream. A data stream failure store + * that's disabled (enabled: `false`) will redirect no new failed indices to the failure store; however, it will + * not remove any existing data from the failure store. */ + enabled?: boolean + /** If defined, it specifies the lifecycle configuration for the failure store of this data stream. */ + lifecycle?: IndicesFailureStoreLifecycle +} + +export interface IndicesDataStreamFailureStoreTemplate { + /** If defined, it turns the failure store on/off (`true`/`false`) for this data stream. A data stream failure store + * that's disabled (enabled: `false`) will redirect no new failed indices to the failure store; however, it will + * not remove any existing data from the failure store. */ + enabled?: boolean | null + /** If defined, it specifies the lifecycle configuration for the failure store of this data stream. */ + lifecycle?: IndicesFailureStoreLifecycleTemplate | null +} + +export interface IndicesDataStreamIndex { + /** Name of the backing index. */ + index_name: IndexName + /** Universally unique identifier (UUID) for the index. */ + index_uuid: Uuid + /** Name of the current ILM lifecycle policy configured for this backing index. */ + ilm_policy?: Name + /** Name of the lifecycle system that's currently managing this backing index. */ + managed_by?: IndicesManagedBy + /** Indicates if ILM should take precedence over DSL in case both are configured to manage this index. */ + prefer_ilm?: boolean + /** The index mode of this backing index of the data stream. */ + index_mode?: IndicesIndexMode +} + +export interface IndicesDataStreamLifecycle { + /** If defined, every document added to this data stream will be stored at least for this time frame. + * Any time after this duration the document could be deleted. + * When empty, every document in this data stream will be stored indefinitely. */ + data_retention?: Duration + /** The downsampling configuration to execute for the managed backing index after rollover. */ + downsampling?: IndicesDataStreamLifecycleDownsampling + /** If defined, it turns data stream lifecycle on/off (`true`/`false`) for this data stream. A data stream lifecycle + * that's disabled (enabled: `false`) will have no effect on the data stream. */ + enabled?: boolean +} + +export interface IndicesDataStreamLifecycleDownsampling { + /** The list of downsampling rounds to execute as part of this downsampling configuration */ + rounds: IndicesDownsamplingRound[] +} + +export interface IndicesDataStreamLifecycleRolloverConditions { + min_age?: Duration + max_age?: string + min_docs?: long + max_docs?: long + min_size?: ByteSize + max_size?: ByteSize + min_primary_shard_size?: ByteSize + max_primary_shard_size?: ByteSize + min_primary_shard_docs?: long + max_primary_shard_docs?: long +} + +export interface IndicesDataStreamLifecycleWithRollover extends IndicesDataStreamLifecycle { + /** The conditions which will trigger the rollover of a backing index as configured by the cluster setting `cluster.lifecycle.default.rollover`. + * This property is an implementation detail and it will only be retrieved when the query param `include_defaults` is set to true. + * The contents of this field are subject to change. */ + rollover?: IndicesDataStreamLifecycleRolloverConditions +} + +export interface IndicesDataStreamOptions { + /** If defined, it specifies configuration for the failure store of this data stream. */ + failure_store?: IndicesDataStreamFailureStore +} + +export interface IndicesDataStreamOptionsTemplate { + failure_store?: IndicesDataStreamFailureStoreTemplate | null +} + +export interface IndicesDataStreamTimestampField { + /** Name of the timestamp field for the data stream, which must be `@timestamp`. The `@timestamp` field must be included in every document indexed to the data stream. */ + name: Field +} + +export interface IndicesDataStreamVisibility { + hidden?: boolean + allow_custom_routing?: boolean +} + +export interface IndicesDownsampleConfig { + /** The interval at which to aggregate the original time series index. */ + fixed_interval: DurationLarge + /** The sampling method used to reduce the documents; it can be either `aggregate` or `last_value`. Defaults to `aggregate`. */ + sampling_method?: IndicesSamplingMethod +} + +export interface IndicesDownsamplingRound { + /** The duration since rollover when this downsampling round should execute */ + after: Duration + /** The downsample interval. */ + fixed_interval: DurationLarge +} + +export interface IndicesFailureStore { + enabled: boolean + indices: IndicesDataStreamIndex[] + rollover_on_write: boolean +} + +export interface IndicesFailureStoreLifecycle { + /** If defined, every document added to this data stream will be stored at least for this time frame. + * Any time after this duration the document could be deleted. + * When empty, every document in this data stream will be stored indefinitely. */ + data_retention?: Duration + /** If defined, it turns data stream lifecycle on/off (`true`/`false`) for this data stream. A data stream lifecycle + * that's disabled (enabled: `false`) will have no effect on the data stream. */ + enabled?: boolean +} + +export interface IndicesFailureStoreLifecycleTemplate { + /** If defined, every document added to this data stream will be stored at least for this time frame. + * Any time after this duration the document could be deleted. + * When empty, every document in this data stream will be stored indefinitely. */ + data_retention?: Duration | null + /** If defined, it turns data stream lifecycle on/off (`true`/`false`) for this data stream. A data stream lifecycle + * that's disabled (enabled: `false`) will have no effect on the data stream. */ + enabled?: boolean +} + +export interface IndicesFielddataFrequencyFilter { + max: double + min: double + min_segment_size: integer +} + +export type IndicesIndexCheckOnStartup = boolean | 'true' | 'false' | 'checksum' + +export type IndicesIndexMode = 'standard' | 'time_series' | 'logsdb' | 'lookup' + +export interface IndicesIndexRouting { + allocation?: IndicesIndexRoutingAllocation + rebalance?: IndicesIndexRoutingRebalance +} + +export interface IndicesIndexRoutingAllocation { + enable?: IndicesIndexRoutingAllocationOptions + include?: IndicesIndexRoutingAllocationInclude + initial_recovery?: IndicesIndexRoutingAllocationInitialRecovery + disk?: IndicesIndexRoutingAllocationDisk +} + +export interface IndicesIndexRoutingAllocationDisk { + threshold_enabled?: boolean | string +} + +export interface IndicesIndexRoutingAllocationInclude { + _tier_preference?: string + _id?: Id +} + +export interface IndicesIndexRoutingAllocationInitialRecovery { + _id?: Id +} + +export type IndicesIndexRoutingAllocationOptions = 'all' | 'primaries' | 'new_primaries' | 'none' + +export interface IndicesIndexRoutingRebalance { + enable: IndicesIndexRoutingRebalanceOptions +} + +export type IndicesIndexRoutingRebalanceOptions = 'all' | 'primaries' | 'replicas' | 'none' + +export interface IndicesIndexSegmentSort { + field?: Fields + order?: IndicesSegmentSortOrder | IndicesSegmentSortOrder[] + mode?: IndicesSegmentSortMode | IndicesSegmentSortMode[] + missing?: IndicesSegmentSortMissing | IndicesSegmentSortMissing[] +} + +export interface IndicesIndexSettingBlocks { + read_only?: SpecUtilsStringified + read_only_allow_delete?: SpecUtilsStringified + read?: SpecUtilsStringified + write?: SpecUtilsStringified + metadata?: SpecUtilsStringified +} + +export interface IndicesIndexSettingsKeys { + index?: IndicesIndexSettings + mode?: string + routing_path?: string | string[] + soft_deletes?: IndicesSoftDeletes + sort?: IndicesIndexSegmentSort + /** @remarks This property is not supported on Elastic Cloud Serverless. */ + number_of_shards?: integer | string + /** @remarks This property is not supported on Elastic Cloud Serverless. */ + number_of_replicas?: integer | string + number_of_routing_shards?: integer + check_on_startup?: IndicesIndexCheckOnStartup + codec?: string + routing_partition_size?: SpecUtilsStringified + load_fixed_bitset_filters_eagerly?: boolean + hidden?: boolean | string + auto_expand_replicas?: SpecUtilsWithNullValue + merge?: IndicesMerge + search?: IndicesSettingsSearch + refresh_interval?: Duration + max_result_window?: integer + max_inner_result_window?: integer + max_rescore_window?: integer + max_docvalue_fields_search?: integer + max_script_fields?: integer + max_ngram_diff?: integer + max_shingle_diff?: integer + blocks?: IndicesIndexSettingBlocks + max_refresh_listeners?: integer + /** Settings to define analyzers, tokenizers, token filters and character filters. + * Refer to the linked documentation for step-by-step examples of updating analyzers on existing indices. */ + analyze?: IndicesSettingsAnalyze + highlight?: IndicesSettingsHighlight + max_terms_count?: integer + max_regex_length?: integer + routing?: IndicesIndexRouting + gc_deletes?: Duration + default_pipeline?: PipelineName + final_pipeline?: PipelineName + lifecycle?: IndicesIndexSettingsLifecycle + provided_name?: Name + creation_date?: SpecUtilsStringified> + creation_date_string?: DateTime + uuid?: Uuid + version?: IndicesIndexVersioning + verified_before_close?: boolean | string + format?: string | integer + max_slices_per_scroll?: integer + translog?: IndicesTranslog + query_string?: IndicesSettingsQueryString + priority?: integer | string + top_metrics_max_size?: integer + analysis?: IndicesIndexSettingsAnalysis + settings?: IndicesIndexSettings + time_series?: IndicesIndexSettingsTimeSeries + queries?: IndicesQueries + /** Configure custom similarity settings to customize how search results are scored. */ + similarity?: Record + /** Enable or disable dynamic mapping for an index. */ + mapping?: IndicesMappingLimitSettings + 'indexing.slowlog'?: IndicesIndexingSlowlogSettings + /** Configure indexing back pressure limits. */ + indexing_pressure?: IndicesIndexingPressure + /** The store module allows you to control how index data is stored and accessed on disk. */ + store?: IndicesStorage +} +export type IndicesIndexSettings = IndicesIndexSettingsKeys +& { [property: string]: any } + +export interface IndicesIndexSettingsAnalysis { + analyzer?: Record + char_filter?: Record + filter?: Record + normalizer?: Record + tokenizer?: Record +} + +export interface IndicesIndexSettingsLifecycle { + /** The name of the policy to use to manage the index. For information about how Elasticsearch applies policy changes, see Policy updates. */ + name?: Name + /** Indicates whether or not the index has been rolled over. Automatically set to true when ILM completes the rollover action. + * You can explicitly set it to skip rollover. */ + indexing_complete?: SpecUtilsStringified + /** If specified, this is the timestamp used to calculate the index age for its phase transitions. Use this setting + * if you create a new index that contains old data and want to use the original creation date to calculate the index + * age. Specified as a Unix epoch value in milliseconds. */ + origination_date?: long + /** Set to true to parse the origination date from the index name. This origination date is used to calculate the index age + * for its phase transitions. The index name must match the pattern ^.*-{date_format}-\\d+, where the date_format is + * yyyy.MM.dd and the trailing digits are optional. An index that was rolled over would normally match the full format, + * for example logs-2016.10.31-000002). If the index name doesn’t match the pattern, index creation fails. */ + parse_origination_date?: boolean + step?: IndicesIndexSettingsLifecycleStep + /** The index alias to update when the index rolls over. Specify when using a policy that contains a rollover action. + * When the index rolls over, the alias is updated to reflect that the index is no longer the write index. For more + * information about rolling indices, see Rollover. */ + rollover_alias?: string + /** Preference for the system that manages a data stream backing index (preferring ILM when both ILM and DLM are + * applicable for an index). */ + prefer_ilm?: boolean | string +} + +export interface IndicesIndexSettingsLifecycleStep { + /** Time to wait for the cluster to resolve allocation issues during an ILM shrink action. Must be greater than 1h (1 hour). + * See Shard allocation for shrink. */ + wait_time_threshold?: Duration +} + +export interface IndicesIndexSettingsTimeSeries { + end_time?: DateTime + start_time?: DateTime +} + +export interface IndicesIndexState { + aliases?: Record + mappings?: MappingTypeMapping + settings?: IndicesIndexSettings + /** Default settings, included when the request's `include_default` is `true`. */ + defaults?: IndicesIndexSettings + data_stream?: DataStreamName + /** Data stream lifecycle applicable if this is a data stream. */ + lifecycle?: IndicesDataStreamLifecycle +} + +export interface IndicesIndexTemplate { + /** Name of the index template. */ + index_patterns: Names + /** An ordered list of component template names. + * Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. */ + composed_of: Name[] + /** Template to be applied. + * It may optionally include an `aliases`, `mappings`, or `settings` configuration. */ + template?: IndicesIndexTemplateSummary + /** Version number used to manage index templates externally. + * This number is not automatically generated by Elasticsearch. */ + version?: VersionNumber + /** Priority to determine index template precedence when a new data stream or index is created. + * The index template with the highest priority is chosen. + * If no priority is specified the template is treated as though it is of priority 0 (lowest priority). + * This number is not automatically generated by Elasticsearch. */ + priority?: long + /** Optional user metadata about the index template. May have any contents. + * This map is not automatically generated by Elasticsearch. */ + _meta?: Metadata + allow_auto_create?: boolean + /** If this object is included, the template is used to create data streams and their backing indices. + * Supports an empty object. + * Data streams require a matching index template with a `data_stream` object. */ + data_stream?: IndicesIndexTemplateDataStreamConfiguration + /** Marks this index template as deprecated. + * When creating or updating a non-deprecated index template that uses deprecated components, + * Elasticsearch will emit a deprecation warning. */ + deprecated?: boolean + /** A list of component template names that are allowed to be absent. */ + ignore_missing_component_templates?: Names + /** Date and time when the index template was created. Only returned if the `human` query parameter is `true`. */ + created_date?: DateTime + /** Date and time when the index template was created, in milliseconds since the epoch. */ + created_date_millis?: EpochTime + /** Date and time when the index template was last modified. Only returned if the `human` query parameter is `true`. */ + modified_date?: DateTime + /** Date and time when the index template was last modified, in milliseconds since the epoch. */ + modified_date_millis?: EpochTime +} + +export interface IndicesIndexTemplateDataStreamConfiguration { + /** If true, the data stream is hidden. */ + hidden?: boolean + /** If true, the data stream supports custom routing. */ + allow_custom_routing?: boolean +} + +export interface IndicesIndexTemplateSummary { + /** Aliases to add. + * If the index template includes a `data_stream` object, these are data stream aliases. + * Otherwise, these are index aliases. + * Data stream aliases ignore the `index_routing`, `routing`, and `search_routing` options. */ + aliases?: Record + /** Mapping for fields in the index. + * If specified, this mapping can include field names, field data types, and mapping parameters. */ + mappings?: MappingTypeMapping + /** Configuration options for the index. */ + settings?: IndicesIndexSettings + lifecycle?: IndicesDataStreamLifecycleWithRollover + data_stream_options?: IndicesDataStreamOptionsTemplate | null +} + +export interface IndicesIndexVersioning { + created?: VersionString + created_string?: string +} + +export interface IndicesIndexingPressure { + memory: IndicesIndexingPressureMemory +} + +export interface IndicesIndexingPressureMemory { + /** Number of outstanding bytes that may be consumed by indexing requests. When this limit is reached or exceeded, + * the node will reject new coordinating and primary operations. When replica operations consume 1.5x this limit, + * the node will reject new replica operations. Defaults to 10% of the heap. */ + limit?: integer +} + +export interface IndicesIndexingSlowlogSettings { + level?: string + source?: integer + reformat?: boolean + threshold?: IndicesIndexingSlowlogTresholds +} + +export interface IndicesIndexingSlowlogTresholds { + /** The indexing slow log, similar in functionality to the search slow log. The log file name ends with `_index_indexing_slowlog.json`. + * Log and the thresholds are configured in the same way as the search slowlog. */ + index?: IndicesSlowlogTresholdLevels +} + +export type IndicesIndicesBlockOptions = 'metadata' | 'read' | 'read_only' | 'write' + +export type IndicesManagedBy = 'Index Lifecycle Management' | 'Data stream lifecycle' | 'Unmanaged' + +export interface IndicesMappingLimitSettings { + coerce?: boolean + total_fields?: IndicesMappingLimitSettingsTotalFields + depth?: IndicesMappingLimitSettingsDepth + nested_fields?: IndicesMappingLimitSettingsNestedFields + nested_objects?: IndicesMappingLimitSettingsNestedObjects + field_name_length?: IndicesMappingLimitSettingsFieldNameLength + dimension_fields?: IndicesMappingLimitSettingsDimensionFields + source?: IndicesMappingLimitSettingsSourceFields + ignore_malformed?: boolean | string +} + +export interface IndicesMappingLimitSettingsDepth { + /** The maximum depth for a field, which is measured as the number of inner objects. For instance, if all fields are defined + * at the root object level, then the depth is 1. If there is one object mapping, then the depth is 2, etc. */ + limit?: long +} + +export interface IndicesMappingLimitSettingsDimensionFields { + /** [preview] This functionality is in technical preview and may be changed or removed in a future release. + * Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features. */ + limit?: long +} + +export interface IndicesMappingLimitSettingsFieldNameLength { + /** Setting for the maximum length of a field name. This setting isn’t really something that addresses mappings explosion but + * might still be useful if you want to limit the field length. It usually shouldn’t be necessary to set this setting. The + * default is okay unless a user starts to add a huge number of fields with really long names. Default is `Long.MAX_VALUE` (no limit). */ + limit?: long +} + +export interface IndicesMappingLimitSettingsNestedFields { + /** The maximum number of distinct nested mappings in an index. The nested type should only be used in special cases, when + * arrays of objects need to be queried independently of each other. To safeguard against poorly designed mappings, this + * setting limits the number of unique nested types per index. */ + limit?: long +} + +export interface IndicesMappingLimitSettingsNestedObjects { + /** The maximum number of nested JSON objects that a single document can contain across all nested types. This limit helps + * to prevent out of memory errors when a document contains too many nested objects. */ + limit?: long +} + +export interface IndicesMappingLimitSettingsSourceFields { + mode: IndicesSourceMode +} + +export interface IndicesMappingLimitSettingsTotalFields { + /** The maximum number of fields in an index. Field and object mappings, as well as field aliases count towards this limit. + * The limit is in place to prevent mappings and searches from becoming too large. Higher values can lead to performance + * degradations and memory issues, especially in clusters with a high load or few resources. */ + limit?: long | string + /** This setting determines what happens when a dynamically mapped field would exceed the total fields limit. When set + * to false (the default), the index request of the document that tries to add a dynamic field to the mapping will fail + * with the message Limit of total fields [X] has been exceeded. When set to true, the index request will not fail. + * Instead, fields that would exceed the limit are not added to the mapping, similar to dynamic: false. + * The fields that were not added to the mapping will be added to the _ignored field. */ + ignore_dynamic_beyond_limit?: boolean | string +} + +export interface IndicesMerge { + scheduler?: IndicesMergeScheduler +} + +export interface IndicesMergeScheduler { + max_thread_count?: SpecUtilsStringified + max_merge_count?: SpecUtilsStringified +} + +export interface IndicesNumericFielddata { + format: IndicesNumericFielddataFormat +} + +export type IndicesNumericFielddataFormat = 'array' | 'disabled' + +export interface IndicesQueries { + cache?: IndicesCacheQueries +} + +export interface IndicesRetentionLease { + period: Duration +} + +export interface IndicesSamplingConfiguration { + /** The fraction of documents to sample between 0 and 1. */ + rate: double + /** The maximum number of documents to sample. */ + max_samples: integer + /** The maximum total size of sampled documents. */ + max_size?: ByteSize + /** The maximum total size of sampled documents in bytes. */ + max_size_in_bytes: long + /** The duration for which the sampled documents should be retained. */ + time_to_live?: Duration + /** The duration for which the sampled documents should be retained, in milliseconds. */ + time_to_live_in_millis: long + /** An optional condition script that sampled documents must satisfy. */ + if?: string + /** The time when the sampling configuration was created. */ + creation_time?: DateTime + /** The time when the sampling configuration was created, in milliseconds since epoch. */ + creation_time_in_millis: long +} + +export type IndicesSamplingMethod = 'aggregate' | 'last_value' + +export interface IndicesSearchIdle { + after?: Duration +} + +export type IndicesSegmentSortMissing = '_last' | '_first' + +export type IndicesSegmentSortMode = 'min' | 'MIN' | 'max' | 'MAX' + +export type IndicesSegmentSortOrder = 'asc' | 'ASC' | 'desc' | 'DESC' + +export interface IndicesSettingsAnalyze { + max_token_count?: SpecUtilsStringified +} + +export interface IndicesSettingsHighlight { + max_analyzed_offset?: integer +} + +export interface IndicesSettingsQueryString { + lenient: SpecUtilsStringified +} + +export interface IndicesSettingsSearch { + idle?: IndicesSearchIdle + slowlog?: IndicesSlowlogSettings +} + +export type IndicesSettingsSimilarity = IndicesSettingsSimilarityBm25 | IndicesSettingsSimilarityBoolean | IndicesSettingsSimilarityDfi | IndicesSettingsSimilarityDfr | IndicesSettingsSimilarityIb | IndicesSettingsSimilarityLmd | IndicesSettingsSimilarityLmj | IndicesSettingsSimilarityScripted + +export interface IndicesSettingsSimilarityBm25 { + type: 'BM25' + b?: double + discount_overlaps?: boolean + k1?: double +} + +export interface IndicesSettingsSimilarityBoolean { + type: 'boolean' +} + +export interface IndicesSettingsSimilarityDfi { + type: 'DFI' + independence_measure: DFIIndependenceMeasure +} + +export interface IndicesSettingsSimilarityDfr { + type: 'DFR' + after_effect: DFRAfterEffect + basic_model: DFRBasicModel + normalization: Normalization +} + +export interface IndicesSettingsSimilarityIb { + type: 'IB' + distribution: IBDistribution + lambda: IBLambda + normalization: Normalization +} + +export interface IndicesSettingsSimilarityLmd { + type: 'LMDirichlet' + mu?: double +} + +export interface IndicesSettingsSimilarityLmj { + type: 'LMJelinekMercer' + lambda?: double +} + +export interface IndicesSettingsSimilarityScripted { + type: 'scripted' + script: Script | ScriptSource + weight_script?: Script | ScriptSource +} + +export interface IndicesSlowlogSettings { + level?: string + source?: integer + reformat?: boolean + threshold?: IndicesSlowlogTresholds +} + +export interface IndicesSlowlogTresholdLevels { + warn?: Duration + info?: Duration + debug?: Duration + trace?: Duration +} + +export interface IndicesSlowlogTresholds { + query?: IndicesSlowlogTresholdLevels + fetch?: IndicesSlowlogTresholdLevels +} + +export interface IndicesSoftDeletes { + /** Indicates whether soft deletes are enabled on the index. */ + enabled?: boolean + /** The maximum period to retain a shard history retention lease before it is considered expired. + * Shard history retention leases ensure that soft deletes are retained during merges on the Lucene + * index. If a soft delete is merged away before it can be replicated to a follower the following + * process will fail due to incomplete history on the leader. */ + retention_lease?: IndicesRetentionLease +} + +export type IndicesSourceMode = 'disabled' | 'stored' | 'synthetic' + +export interface IndicesStorage { + type: IndicesStorageType + /** You can restrict the use of the mmapfs and the related hybridfs store type via the setting node.store.allow_mmap. + * This is a boolean setting indicating whether or not memory-mapping is allowed. The default is to allow it. This + * setting is useful, for example, if you are in an environment where you can not control the ability to create a lot + * of memory maps so you need disable the ability to use memory-mapping. */ + allow_mmap?: boolean + /** How often store statistics are refreshed */ + stats_refresh_interval?: Duration +} + +export type IndicesStorageType = 'fs' | 'niofs' | 'mmapfs' | 'hybridfs' | string + +export interface IndicesTemplateMapping { + aliases: Record + index_patterns: Name[] + mappings: MappingTypeMapping + order: integer + settings: Record + version?: VersionNumber +} + +export interface IndicesTranslog { + /** How often the translog is fsynced to disk and committed, regardless of write operations. + * Values less than 100ms are not allowed. */ + sync_interval?: Duration + /** Whether or not to `fsync` and commit the translog after every index, delete, update, or bulk request. */ + durability?: IndicesTranslogDurability + /** The translog stores all operations that are not yet safely persisted in Lucene (i.e., are not + * part of a Lucene commit point). Although these operations are available for reads, they will need + * to be replayed if the shard was stopped and had to be recovered. This setting controls the + * maximum total size of these operations, to prevent recoveries from taking too long. Once the + * maximum size has been reached a flush will happen, generating a new Lucene commit point. */ + flush_threshold_size?: ByteSize + retention?: IndicesTranslogRetention +} + +export type IndicesTranslogDurability = 'request' | 'REQUEST' | 'async' | 'ASYNC' + +export interface IndicesTranslogRetention { + /** This controls the total size of translog files to keep for each shard. Keeping more translog files increases + * the chance of performing an operation based sync when recovering a replica. If the translog files are not + * sufficient, replica recovery will fall back to a file based sync. This setting is ignored, and should not be + * set, if soft deletes are enabled. Soft deletes are enabled by default in indices created in Elasticsearch + * versions 7.0.0 and later. */ + size?: ByteSize + /** This controls the maximum duration for which translog files are kept by each shard. Keeping more + * translog files increases the chance of performing an operation based sync when recovering replicas. If + * the translog files are not sufficient, replica recovery will fall back to a file based sync. This setting + * is ignored, and should not be set, if soft deletes are enabled. Soft deletes are enabled by default in + * indices created in Elasticsearch versions 7.0.0 and later. */ + age?: Duration +} + +export interface IndicesAddBlockAddIndicesBlockStatus { + name: IndexName + blocked: boolean +} + +export interface IndicesAddBlockRequest extends RequestBase { + /** A comma-separated list or wildcard expression of index names used to limit the request. + * By default, you must explicitly name the indices you are adding blocks to. + * To allow the adding of blocks to indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. + * You can update this setting in the `elasticsearch.yml` file or by using the cluster update settings API. */ + index: Indices + /** The block type to add to the index. */ + block: IndicesIndicesBlockOptions + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. + * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ + allow_no_indices?: boolean + /** The type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * It supports comma-separated values, such as `open,hidden`. */ + expand_wildcards?: ExpandWildcards + /** If `false`, the request returns an error if it targets a missing or closed index. */ + ignore_unavailable?: boolean + /** The period to wait for the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ + master_timeout?: Duration + /** The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. + * If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. + * It can also be set to `-1` to indicate that the request should never timeout. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, block?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, block?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never, timeout?: never } +} + +export interface IndicesAddBlockResponse { + acknowledged: boolean + shards_acknowledged: boolean + indices: IndicesAddBlockAddIndicesBlockStatus[] +} + +export interface IndicesAnalyzeAnalyzeDetail { + analyzer?: IndicesAnalyzeAnalyzerDetail + charfilters?: IndicesAnalyzeCharFilterDetail[] + custom_analyzer: boolean + tokenfilters?: IndicesAnalyzeTokenDetail[] + tokenizer?: IndicesAnalyzeTokenDetail +} + +export interface IndicesAnalyzeAnalyzeToken { + end_offset: long + position: long + positionLength?: long + start_offset: long + token: string + type: string +} + +export interface IndicesAnalyzeAnalyzerDetail { + name: string + tokens: IndicesAnalyzeExplainAnalyzeToken[] +} + +export interface IndicesAnalyzeCharFilterDetail { + filtered_text: string[] + name: string +} + +export interface IndicesAnalyzeExplainAnalyzeTokenKeys { + bytes: string + end_offset: long + keyword?: boolean + position: long + positionLength: long + start_offset: long + termFrequency: long + token: string + type: string +} +export type IndicesAnalyzeExplainAnalyzeToken = IndicesAnalyzeExplainAnalyzeTokenKeys +& { [property: string]: any } + +export interface IndicesAnalyzeRequest extends RequestBase { + /** Index used to derive the analyzer. + * If specified, the `analyzer` or field parameter overrides this value. + * If no index is specified or the index does not have a default analyzer, the analyze API uses the standard analyzer. */ + index?: IndexName + /** The name of the analyzer that should be applied to the provided `text`. + * This could be a built-in analyzer, or an analyzer that’s been configured in the index. */ + analyzer?: string + /** Array of token attributes used to filter the output of the `explain` parameter. */ + attributes?: string[] + /** Array of character filters used to preprocess characters before the tokenizer. */ + char_filter?: AnalysisCharFilter[] + /** If `true`, the response includes token attributes and additional details. */ + explain?: boolean + /** Field used to derive the analyzer. + * To use this parameter, you must specify an index. + * If specified, the `analyzer` parameter overrides this value. */ + field?: Field + /** Array of token filters used to apply after the tokenizer. */ + filter?: AnalysisTokenFilter[] + /** Normalizer to use to convert text into a single token. */ + normalizer?: string + /** Text to analyze. + * If an array of strings is provided, it is analyzed as a multi-value field. */ + text?: IndicesAnalyzeTextToAnalyze + /** Tokenizer to use to convert text into tokens. */ + tokenizer?: AnalysisTokenizer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, analyzer?: never, attributes?: never, char_filter?: never, explain?: never, field?: never, filter?: never, normalizer?: never, text?: never, tokenizer?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, analyzer?: never, attributes?: never, char_filter?: never, explain?: never, field?: never, filter?: never, normalizer?: never, text?: never, tokenizer?: never } +} + +export interface IndicesAnalyzeResponse { + detail?: IndicesAnalyzeAnalyzeDetail + tokens?: IndicesAnalyzeAnalyzeToken[] +} + +export type IndicesAnalyzeTextToAnalyze = string | string[] + +export interface IndicesAnalyzeTokenDetail { + name: string + tokens: IndicesAnalyzeExplainAnalyzeToken[] +} + +export interface IndicesCancelMigrateReindexRequest extends RequestBase { + /** The index or data stream name */ + index: Indices + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never } +} + +export type IndicesCancelMigrateReindexResponse = AcknowledgedResponseBase + +export interface IndicesClearCacheRequest extends RequestBase { + /** Comma-separated list of data streams, indices, and aliases used to limit the request. + * Supports wildcards (`*`). + * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ + index?: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ + allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ + expand_wildcards?: ExpandWildcards + /** If `true`, clears the fields cache. + * Use the `fields` parameter to clear the cache of specific fields only. */ + fielddata?: boolean + /** Comma-separated list of field names used to limit the `fielddata` parameter. */ + fields?: Fields + /** If `false`, the request returns an error if it targets a missing or closed index. */ + ignore_unavailable?: boolean + /** If `true`, clears the query cache. */ + query?: boolean + /** If `true`, clears the request cache. */ + request?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, fielddata?: never, fields?: never, ignore_unavailable?: never, query?: never, request?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, fielddata?: never, fields?: never, ignore_unavailable?: never, query?: never, request?: never } +} + +export type IndicesClearCacheResponse = ShardsOperationResponseBase + +export interface IndicesCloneRequest extends RequestBase { + /** Name of the source index to clone. */ + index: IndexName + /** Name of the target index to create. */ + target: Name + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** The number of shard copies that must be active before proceeding with the operation. + * Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ + wait_for_active_shards?: WaitForActiveShards + /** Aliases for the resulting index. */ + aliases?: Record + /** Configuration options for the target index. */ + settings?: Record + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, target?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never, aliases?: never, settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, target?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never, aliases?: never, settings?: never } +} + +export interface IndicesCloneResponse { + acknowledged: boolean + index: IndexName + shards_acknowledged: boolean +} + +export interface IndicesCloseCloseIndexResult { + closed: boolean + shards?: Record +} + +export interface IndicesCloseCloseShardResult { + failures: ShardFailure[] +} + +export interface IndicesCloseRequest extends RequestBase { + /** Comma-separated list or wildcard expression of index names used to limit the request. */ + index: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ + allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ + expand_wildcards?: ExpandWildcards + /** If `false`, the request returns an error if it targets a missing or closed index. */ + ignore_unavailable?: boolean + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** The number of shard copies that must be active before proceeding with the operation. + * Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ + wait_for_active_shards?: WaitForActiveShards + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never } +} + +export interface IndicesCloseResponse { + acknowledged: boolean + indices: Record + shards_acknowledged: boolean +} + +export interface IndicesCreateRequest extends RequestBase { + /** Name of the index you wish to create. + * Index names must meet the following criteria: + * + * * Lowercase only + * * Cannot include `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, ` ` (space character), `,`, or `#` + * * Indices prior to 7.0 could contain a colon (`:`), but that has been deprecated and will not be supported in later versions + * * Cannot start with `-`, `_`, or `+` + * * Cannot be `.` or `..` + * * Cannot be longer than 255 bytes (note thtat it is bytes, so multi-byte characters will reach the limit faster) + * * Names starting with `.` are deprecated, except for hidden indices and internal indices managed by plugins */ + index: IndexName + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** The number of shard copies that must be active before proceeding with the operation. + * Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ + wait_for_active_shards?: WaitForActiveShards + /** Aliases for the index. */ + aliases?: Record + /** Mapping for fields in the index. If specified, this mapping can include: + * - Field names + * - Field data types + * - Mapping parameters */ + mappings?: MappingTypeMapping + /** Configuration options for the index. */ + settings?: IndicesIndexSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never, aliases?: never, mappings?: never, settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never, aliases?: never, mappings?: never, settings?: never } +} + +export interface IndicesCreateResponse { + index: IndexName + shards_acknowledged: boolean + acknowledged: boolean +} + +export interface IndicesCreateDataStreamRequest extends RequestBase { + /** Name of the data stream, which must meet the following criteria: + * Lowercase only; + * Cannot include `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, `#`, `:`, or a space character; + * Cannot start with `-`, `_`, `+`, or `.ds-`; + * Cannot be `.` or `..`; + * Cannot be longer than 255 bytes. Multi-byte characters count towards this limit faster. */ + name: DataStreamName + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } +} + +export type IndicesCreateDataStreamResponse = AcknowledgedResponseBase + +export interface IndicesCreateFromCreateFrom { + /** Mappings overrides to be applied to the destination index (optional) */ + mappings_override?: MappingTypeMapping + /** Settings overrides to be applied to the destination index (optional) */ + settings_override?: IndicesIndexSettings + /** If index blocks should be removed when creating destination index (optional) */ + remove_index_blocks?: boolean +} + +export interface IndicesCreateFromRequest extends RequestBase { + /** The source index or data stream name */ + source: IndexName + /** The destination index or data stream name */ + dest: IndexName + create_from?: IndicesCreateFromCreateFrom + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { source?: never, dest?: never, create_from?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { source?: never, dest?: never, create_from?: never } +} + +export interface IndicesCreateFromResponse { + acknowledged: boolean + index: IndexName + shards_acknowledged: boolean +} + +export interface IndicesDataStreamsStatsDataStreamsStatsItem { + /** Current number of backing indices for the data stream. */ + backing_indices: integer + /** Name of the data stream. */ + data_stream: Name + /** The data stream’s highest `@timestamp` value, converted to milliseconds since the Unix epoch. + * NOTE: This timestamp is provided as a best effort. + * The data stream may contain `@timestamp` values higher than this if one or more of the following conditions are met: + * The stream contains closed backing indices; + * Backing indices with a lower generation contain higher `@timestamp` values. */ + maximum_timestamp: EpochTime + /** Total size of all shards for the data stream’s backing indices. + * This parameter is only returned if the `human` query parameter is `true`. */ + store_size?: ByteSize + /** Total size, in bytes, of all shards for the data stream’s backing indices. */ + store_size_bytes: long +} + +export interface IndicesDataStreamsStatsRequest extends RequestBase { + /** Comma-separated list of data streams used to limit the request. + * Wildcard expressions (`*`) are supported. + * To target all data streams in a cluster, omit this parameter or use `*`. */ + name?: Indices + /** Type of data stream that wildcard patterns can match. + * Supports comma-separated values, such as `open,hidden`. */ + expand_wildcards?: ExpandWildcards + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, expand_wildcards?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, expand_wildcards?: never } +} + +export interface IndicesDataStreamsStatsResponse { + /** Contains information about shards that attempted to execute the request. */ + _shards: ShardStatistics + /** Total number of backing indices for the selected data streams. */ + backing_indices: integer + /** Total number of selected data streams. */ + data_stream_count: integer + /** Contains statistics for the selected data streams. */ + data_streams: IndicesDataStreamsStatsDataStreamsStatsItem[] + /** Total size of all shards for the selected data streams. + * This property is included only if the `human` query parameter is `true` */ + total_store_sizes?: ByteSize + /** Total size, in bytes, of all shards for the selected data streams. */ + total_store_size_bytes: long +} + +export interface IndicesDeleteRequest extends RequestBase { + /** Comma-separated list of indices to delete. + * You cannot specify index aliases. + * By default, this parameter does not support wildcards (`*`) or `_all`. + * To use wildcards or `_all`, set the `action.destructive_requires_name` cluster setting to `false`. */ + index: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ + allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ + expand_wildcards?: ExpandWildcards + /** If `false`, the request returns an error if it targets a missing or closed index. */ + ignore_unavailable?: boolean + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never, timeout?: never } +} + +export type IndicesDeleteResponse = IndicesResponseBase + +export interface IndicesDeleteAliasIndicesAliasesResponseBody extends AcknowledgedResponseBase { + errors?: boolean +} + +export interface IndicesDeleteAliasRequest extends RequestBase { + /** Comma-separated list of data streams or indices used to limit the request. + * Supports wildcards (`*`). */ + index: Indices + /** Comma-separated list of aliases to remove. + * Supports wildcards (`*`). To remove all aliases, use `*` or `_all`. */ + name: Names + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, name?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, name?: never, master_timeout?: never, timeout?: never } +} + +export type IndicesDeleteAliasResponse = IndicesDeleteAliasIndicesAliasesResponseBody + +export interface IndicesDeleteDataLifecycleRequest extends RequestBase { + /** A comma-separated list of data streams of which the data stream lifecycle will be deleted; use `*` to get all data streams */ + name: DataStreamNames + /** Whether wildcard expressions should get expanded to open or closed indices (default: open) */ + expand_wildcards?: ExpandWildcards + /** Specify timeout for connection to master */ + master_timeout?: Duration + /** Explicit timestamp for the document */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, expand_wildcards?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, expand_wildcards?: never, master_timeout?: never, timeout?: never } +} + +export type IndicesDeleteDataLifecycleResponse = AcknowledgedResponseBase + +export interface IndicesDeleteDataStreamRequest extends RequestBase { + /** Comma-separated list of data streams to delete. Wildcard (`*`) expressions are supported. */ + name: DataStreamNames + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** Type of data stream that wildcard patterns can match. Supports comma-separated values,such as `open,hidden`. */ + expand_wildcards?: ExpandWildcards + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, expand_wildcards?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, expand_wildcards?: never } +} + +export type IndicesDeleteDataStreamResponse = AcknowledgedResponseBase + +export interface IndicesDeleteDataStreamOptionsRequest extends RequestBase { + /** A comma-separated list of data streams of which the data stream options will be deleted; use `*` to get all data streams */ + name: DataStreamNames + /** Whether wildcard expressions should get expanded to open or closed indices (default: open) */ + expand_wildcards?: ExpandWildcards + /** Specify timeout for connection to master */ + master_timeout?: Duration + /** Explicit timestamp for the document */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, expand_wildcards?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, expand_wildcards?: never, master_timeout?: never, timeout?: never } +} + +export type IndicesDeleteDataStreamOptionsResponse = AcknowledgedResponseBase + +export interface IndicesDeleteIndexTemplateRequest extends RequestBase { + /** Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. */ + name: Names + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } +} + +export type IndicesDeleteIndexTemplateResponse = AcknowledgedResponseBase + +export interface IndicesDeleteSampleConfigurationRequest extends RequestBase { + /** The name of the index. */ + index: IndexName + /** Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an + * error. */ + master_timeout?: Duration + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, master_timeout?: never, timeout?: never } +} + +export type IndicesDeleteSampleConfigurationResponse = AcknowledgedResponseBase + +export interface IndicesDeleteTemplateRequest extends RequestBase { + /** The name of the legacy index template to delete. + * Wildcard (`*`) expressions are supported. */ + name: Name + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } +} + +export type IndicesDeleteTemplateResponse = AcknowledgedResponseBase + +export interface IndicesDiskUsageRequest extends RequestBase { + /** Comma-separated list of data streams, indices, and aliases used to limit the request. + * It’s recommended to execute this API with a single index (or the latest backing index of a data stream) as the API consumes resources significantly. */ + index: Indices + /** If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. + * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ + allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ + expand_wildcards?: ExpandWildcards + /** If `true`, the API performs a flush before analysis. + * If `false`, the response may not include uncommitted data. */ + flush?: boolean + /** If `true`, missing or closed indices are not included in the response. */ + ignore_unavailable?: boolean + /** Analyzing field disk usage is resource-intensive. + * To use the API, this parameter must be set to `true`. */ + run_expensive_tasks?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, flush?: never, ignore_unavailable?: never, run_expensive_tasks?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, flush?: never, ignore_unavailable?: never, run_expensive_tasks?: never } +} + +export type IndicesDiskUsageResponse = any + +export interface IndicesDownsampleRequest extends RequestBase { + /** Name of the time series index to downsample. */ + index: IndexName + /** Name of the index to create. */ + target_index: IndexName + config?: IndicesDownsampleConfig + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, target_index?: never, config?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, target_index?: never, config?: never } +} + +export type IndicesDownsampleResponse = any + +export interface IndicesExistsRequest extends RequestBase { + /** Comma-separated list of data streams, indices, and aliases. Supports wildcards (`*`). */ + index: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ + allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ + expand_wildcards?: ExpandWildcards + /** If `true`, returns settings in flat format. */ + flat_settings?: boolean + /** If `false`, the request returns an error if it targets a missing or closed index. */ + ignore_unavailable?: boolean + /** If `true`, return all default settings in the response. */ + include_defaults?: boolean + /** If `true`, the request retrieves information from the local node only. */ + local?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, flat_settings?: never, ignore_unavailable?: never, include_defaults?: never, local?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, flat_settings?: never, ignore_unavailable?: never, include_defaults?: never, local?: never } +} + +export type IndicesExistsResponse = boolean + +export interface IndicesExistsAliasRequest extends RequestBase { + /** Comma-separated list of aliases to check. Supports wildcards (`*`). */ + name: Names + /** Comma-separated list of data streams or indices used to limit the request. Supports wildcards (`*`). + * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ + index?: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ + allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ + expand_wildcards?: ExpandWildcards + /** If `false`, requests that include a missing data stream or index in the target indices or data streams return an error. */ + ignore_unavailable?: boolean + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never } +} + +export type IndicesExistsAliasResponse = boolean + +export interface IndicesExistsIndexTemplateRequest extends RequestBase { + /** Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. */ + name: Name + /** If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. */ + local?: boolean + /** If true, returns settings in flat format. */ + flat_settings?: boolean + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, local?: never, flat_settings?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, local?: never, flat_settings?: never, master_timeout?: never } +} + +export type IndicesExistsIndexTemplateResponse = boolean + +export interface IndicesExistsTemplateRequest extends RequestBase { + /** A comma-separated list of index template names used to limit the request. + * Wildcard (`*`) expressions are supported. */ + name: Names + /** Indicates whether to use a flat format for the response. */ + flat_settings?: boolean + /** Indicates whether to get information from the local node only. */ + local?: boolean + /** The period to wait for the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, flat_settings?: never, local?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, flat_settings?: never, local?: never, master_timeout?: never } +} + +export type IndicesExistsTemplateResponse = boolean + +export interface IndicesExplainDataLifecycleDataStreamLifecycleExplain { + index: IndexName + managed_by_lifecycle: boolean + index_creation_date_millis?: EpochTime + time_since_index_creation?: Duration + rollover_date_millis?: EpochTime + time_since_rollover?: Duration + lifecycle?: IndicesDataStreamLifecycleWithRollover + generation_time?: Duration + error?: string +} + +export interface IndicesExplainDataLifecycleRequest extends RequestBase { + /** The name of the index to explain */ + index: Indices + /** indicates if the API should return the default values the system uses for the index's lifecycle */ + include_defaults?: boolean + /** Specify timeout for connection to master */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, include_defaults?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, include_defaults?: never, master_timeout?: never } +} + +export interface IndicesExplainDataLifecycleResponse { + indices: Record +} + +export interface IndicesFieldUsageStatsFieldSummary { + any: uint + stored_fields: uint + doc_values: uint + points: uint + norms: uint + term_vectors: uint + knn_vectors: uint + inverted_index: IndicesFieldUsageStatsInvertedIndex +} + +export interface IndicesFieldUsageStatsFieldsUsageBodyKeys { + _shards: ShardStatistics +} +export type IndicesFieldUsageStatsFieldsUsageBody = IndicesFieldUsageStatsFieldsUsageBodyKeys +& { [property: string]: IndicesFieldUsageStatsUsageStatsIndex | ShardStatistics } + +export interface IndicesFieldUsageStatsInvertedIndex { + terms: uint + postings: uint + proximity: uint + positions: uint + term_frequencies: uint + offsets: uint + payloads: uint +} + +export interface IndicesFieldUsageStatsRequest extends RequestBase { + /** Comma-separated list or wildcard expression of index names used to limit the request. */ + index: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. + * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ + allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ + expand_wildcards?: ExpandWildcards + /** If `true`, missing or closed indices are not included in the response. */ + ignore_unavailable?: boolean + /** Comma-separated list or wildcard expressions of fields to include in the statistics. */ + fields?: Fields + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, fields?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, fields?: never } +} + +export type IndicesFieldUsageStatsResponse = IndicesFieldUsageStatsFieldsUsageBody + +export interface IndicesFieldUsageStatsShardsStats { + all_fields: IndicesFieldUsageStatsFieldSummary + fields: Record +} + +export interface IndicesFieldUsageStatsUsageStatsIndex { + shards: IndicesFieldUsageStatsUsageStatsShards[] +} + +export interface IndicesFieldUsageStatsUsageStatsShards { + routing: IndicesStatsShardRouting + stats: IndicesFieldUsageStatsShardsStats + tracking_id: string + tracking_started_at_millis: EpochTime +} + +export interface IndicesFlushRequest extends RequestBase { + /** Comma-separated list of data streams, indices, and aliases to flush. + * Supports wildcards (`*`). + * To flush all data streams and indices, omit this parameter or use `*` or `_all`. */ + index?: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ + allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ + expand_wildcards?: ExpandWildcards + /** If `true`, the request forces a flush even if there are no changes to commit to the index. */ + force?: boolean + /** If `false`, the request returns an error if it targets a missing or closed index. */ + ignore_unavailable?: boolean + /** If `true`, the flush operation blocks until execution when another flush operation is running. + * If `false`, Elasticsearch returns an error if you request a flush when another flush operation is running. */ + wait_if_ongoing?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, force?: never, ignore_unavailable?: never, wait_if_ongoing?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, force?: never, ignore_unavailable?: never, wait_if_ongoing?: never } +} + +export type IndicesFlushResponse = ShardsOperationResponseBase + +export interface IndicesForcemergeRequest extends RequestBase { + /** A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices */ + index?: Indices + /** Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) */ + allow_no_indices?: boolean + /** Whether to expand wildcard expression to concrete indices that are open, closed or both. */ + expand_wildcards?: ExpandWildcards + /** Specify whether the index should be flushed after performing the operation (default: true) */ + flush?: boolean + /** Whether specified concrete indices should be ignored when unavailable (missing or closed) */ + ignore_unavailable?: boolean + /** The number of segments the index should be merged into (default: dynamic) */ + max_num_segments?: long + /** Specify whether the operation should only expunge deleted documents */ + only_expunge_deletes?: boolean + /** Should the request wait until the force merge is completed. */ + wait_for_completion?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, flush?: never, ignore_unavailable?: never, max_num_segments?: never, only_expunge_deletes?: never, wait_for_completion?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, flush?: never, ignore_unavailable?: never, max_num_segments?: never, only_expunge_deletes?: never, wait_for_completion?: never } +} + +export type IndicesForcemergeResponse = IndicesForcemergeForceMergeResponseBody + +export interface IndicesForcemergeForceMergeResponseBody extends ShardsOperationResponseBase { + /** task contains a task id returned when wait_for_completion=false, + * you can use the task_id to get the status of the task at _tasks/ */ + task?: string +} + +export type IndicesGetFeature = 'aliases' | 'mappings' | 'settings' + +export type IndicesGetFeatures = IndicesGetFeature | IndicesGetFeature[] + +export interface IndicesGetRequest extends RequestBase { + /** Comma-separated list of data streams, indices, and index aliases used to limit the request. + * Wildcard expressions (*) are supported. */ + index: Indices + /** If false, the request returns an error if any wildcard expression, index alias, or _all value targets only + * missing or closed indices. This behavior applies even if the request targets other open indices. For example, + * a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. */ + allow_no_indices?: boolean + /** Type of index that wildcard expressions can match. If the request can target data streams, this argument + * determines whether wildcard expressions match hidden data streams. Supports comma-separated values, + * such as open,hidden. */ + expand_wildcards?: ExpandWildcards + /** If true, returns settings in flat format. */ + flat_settings?: boolean + /** If false, requests that target a missing index return an error. */ + ignore_unavailable?: boolean + /** If true, return all default settings in the response. */ + include_defaults?: boolean + /** If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. */ + local?: boolean + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** Return only information on specified index features */ + features?: IndicesGetFeatures + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, flat_settings?: never, ignore_unavailable?: never, include_defaults?: never, local?: never, master_timeout?: never, features?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, flat_settings?: never, ignore_unavailable?: never, include_defaults?: never, local?: never, master_timeout?: never, features?: never } +} + +export type IndicesGetResponse = Record + +export interface IndicesGetAliasRequest extends RequestBase { + /** Comma-separated list of aliases to retrieve. + * Supports wildcards (`*`). + * To retrieve all aliases, omit this parameter or use `*` or `_all`. */ + name?: Names + /** Comma-separated list of data streams or indices used to limit the request. + * Supports wildcards (`*`). + * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ + index?: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ + allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ + expand_wildcards?: ExpandWildcards + /** If `false`, the request returns an error if it targets a missing or closed index. */ + ignore_unavailable?: boolean + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never } +} + +export type IndicesGetAliasResponse = Record + +export interface IndicesGetAliasIndexAliases { + aliases: Record +} + +export interface IndicesGetAliasNotFoundAliasesKeys { + error: string + status: integer +} +export type IndicesGetAliasNotFoundAliases = IndicesGetAliasNotFoundAliasesKeys +& { [property: string]: IndicesGetAliasIndexAliases | string | integer } + +export interface IndicesGetAllSampleConfigurationRequest extends RequestBase { + /** Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an + * error. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never } +} + +export interface IndicesGetAllSampleConfigurationResponse { + configurations: IndicesGetAllSampleConfigurationIndexSamplingConfiguration[] +} + +export interface IndicesGetAllSampleConfigurationIndexSamplingConfiguration { + index: IndexName + configuration: IndicesSamplingConfiguration +} + +export interface IndicesGetDataLifecycleDataStreamWithLifecycle { + name: DataStreamName + lifecycle?: IndicesDataStreamLifecycleWithRollover +} + +export interface IndicesGetDataLifecycleRequest extends RequestBase { + /** Comma-separated list of data streams to limit the request. + * Supports wildcards (`*`). + * To target all data streams, omit this parameter or use `*` or `_all`. */ + name: DataStreamNames + /** Type of data stream that wildcard patterns can match. + * Supports comma-separated values, such as `open,hidden`. */ + expand_wildcards?: ExpandWildcards + /** If `true`, return all default settings in the response. */ + include_defaults?: boolean + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, expand_wildcards?: never, include_defaults?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, expand_wildcards?: never, include_defaults?: never, master_timeout?: never } +} + +export interface IndicesGetDataLifecycleResponse { + data_streams: IndicesGetDataLifecycleDataStreamWithLifecycle[] +} + +export interface IndicesGetDataLifecycleStatsDataStreamStats { + /** The count of the backing indices for the data stream. */ + backing_indices_in_error: integer + /** The count of the backing indices for the data stream that have encountered an error. */ + backing_indices_in_total: integer + /** The name of the data stream. */ + name: DataStreamName +} + +export interface IndicesGetDataLifecycleStatsRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } +} + +export interface IndicesGetDataLifecycleStatsResponse { + /** The count of data streams currently being managed by the data stream lifecycle. */ + data_stream_count: integer + /** Information about the data streams that are managed by the data stream lifecycle. */ + data_streams: IndicesGetDataLifecycleStatsDataStreamStats[] + /** The duration of the last data stream lifecycle execution. */ + last_run_duration_in_millis?: DurationValue + /** The time that passed between the start of the last two data stream lifecycle executions. + * This value should amount approximately to `data_streams.lifecycle.poll_interval`. */ + time_between_starts_in_millis?: DurationValue +} + +export interface IndicesGetDataStreamRequest extends RequestBase { + /** Comma-separated list of data stream names used to limit the request. + * Wildcard (`*`) expressions are supported. If omitted, all data streams are returned. */ + name?: DataStreamNames + /** Type of data stream that wildcard patterns can match. + * Supports comma-separated values, such as `open,hidden`. */ + expand_wildcards?: ExpandWildcards + /** If true, returns all relevant default configurations for the index template. */ + include_defaults?: boolean + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** Whether the maximum timestamp for each data stream should be calculated and returned. */ + verbose?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, expand_wildcards?: never, include_defaults?: never, master_timeout?: never, verbose?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, expand_wildcards?: never, include_defaults?: never, master_timeout?: never, verbose?: never } +} + +export interface IndicesGetDataStreamResponse { + data_streams: IndicesDataStream[] +} + +export interface IndicesGetDataStreamMappingsDataStreamMappings { + /** The name of the data stream. */ + name: string + /** The settings specific to this data stream */ + mappings: MappingTypeMapping + /** The settings specific to this data stream merged with the settings from its template. These `effective_settings` + * are the settings that will be used when a new index is created for this data stream. */ + effective_mappings: MappingTypeMapping +} + +export interface IndicesGetDataStreamMappingsRequest extends RequestBase { + /** A comma-separated list of data streams or data stream patterns. Supports wildcards (`*`). */ + name: Indices + /** The period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an + * error. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never } +} + +export interface IndicesGetDataStreamMappingsResponse { + data_streams: IndicesGetDataStreamMappingsDataStreamMappings[] +} + +export interface IndicesGetDataStreamOptionsDataStreamWithOptions { + name: DataStreamName + options?: IndicesDataStreamOptions +} + +export interface IndicesGetDataStreamOptionsRequest extends RequestBase { + /** Comma-separated list of data streams to limit the request. + * Supports wildcards (`*`). + * To target all data streams, omit this parameter or use `*` or `_all`. */ + name: DataStreamNames + /** Type of data stream that wildcard patterns can match. + * Supports comma-separated values, such as `open,hidden`. */ + expand_wildcards?: ExpandWildcards + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, expand_wildcards?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, expand_wildcards?: never, master_timeout?: never } +} + +export interface IndicesGetDataStreamOptionsResponse { + data_streams: IndicesGetDataStreamOptionsDataStreamWithOptions[] +} + +export interface IndicesGetDataStreamSettingsDataStreamSettings { + /** The name of the data stream. */ + name: string + /** The settings specific to this data stream */ + settings: IndicesIndexSettings + /** The settings specific to this data stream merged with the settings from its template. These `effective_settings` + * are the settings that will be used when a new index is created for this data stream. */ + effective_settings: IndicesIndexSettings +} + +export interface IndicesGetDataStreamSettingsRequest extends RequestBase { + /** A comma-separated list of data streams or data stream patterns. Supports wildcards (`*`). */ + name: Indices + /** The period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an + * error. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never } +} + +export interface IndicesGetDataStreamSettingsResponse { + data_streams: IndicesGetDataStreamSettingsDataStreamSettings[] +} + +export interface IndicesGetFieldMappingRequest extends RequestBase { + /** Comma-separated list or wildcard expression of fields used to limit returned information. + * Supports wildcards (`*`). */ + fields: Fields + /** Comma-separated list of data streams, indices, and aliases used to limit the request. + * Supports wildcards (`*`). + * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ + index?: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ + allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ + expand_wildcards?: ExpandWildcards + /** If `false`, the request returns an error if it targets a missing or closed index. */ + ignore_unavailable?: boolean + /** If `true`, return all default settings in the response. */ + include_defaults?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { fields?: never, index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, include_defaults?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { fields?: never, index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, include_defaults?: never } +} + +export type IndicesGetFieldMappingResponse = Record + +export interface IndicesGetFieldMappingTypeFieldMappings { + mappings: Record +} + +export interface IndicesGetIndexTemplateIndexTemplateItem { + name: Name + index_template: IndicesIndexTemplate +} + +export interface IndicesGetIndexTemplateRequest extends RequestBase { + /** Name of index template to retrieve. Wildcard (*) expressions are supported. */ + name?: Name + /** If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. */ + local?: boolean + /** If true, returns settings in flat format. */ + flat_settings?: boolean + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** If true, returns all relevant default configurations for the index template. */ + include_defaults?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, local?: never, flat_settings?: never, master_timeout?: never, include_defaults?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, local?: never, flat_settings?: never, master_timeout?: never, include_defaults?: never } +} + +export interface IndicesGetIndexTemplateResponse { + index_templates: IndicesGetIndexTemplateIndexTemplateItem[] +} + +export interface IndicesGetMappingIndexMappingRecord { + item?: MappingTypeMapping + mappings: MappingTypeMapping +} + +export interface IndicesGetMappingRequest extends RequestBase { + /** Comma-separated list of data streams, indices, and aliases used to limit the request. + * Supports wildcards (`*`). + * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ + index?: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ + allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ + expand_wildcards?: ExpandWildcards + /** If `false`, the request returns an error if it targets a missing or closed index. */ + ignore_unavailable?: boolean + /** If `true`, the request retrieves information from the local node only. */ + local?: boolean + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, local?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, local?: never, master_timeout?: never } +} + +export type IndicesGetMappingResponse = Record + +export interface IndicesGetMigrateReindexStatusRequest extends RequestBase { + /** The index or data stream name. */ + index: Indices + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never } +} + +export interface IndicesGetMigrateReindexStatusResponse { + start_time?: DateTime + start_time_millis: EpochTime + complete: boolean + total_indices_in_data_stream: integer + total_indices_requiring_upgrade: integer + successes: integer + in_progress: IndicesGetMigrateReindexStatusStatusInProgress[] + pending: integer + errors: IndicesGetMigrateReindexStatusStatusError[] + exception?: string +} + +export interface IndicesGetMigrateReindexStatusStatusError { + index: string + message: string +} + +export interface IndicesGetMigrateReindexStatusStatusInProgress { + index: string + total_doc_count: long + reindexed_doc_count: long +} + +export interface IndicesGetSampleRequest extends RequestBase { + /** Single index or data stream name. Wildcards are not supported. */ + index: IndexName + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never } +} + +export interface IndicesGetSampleResponse { + sample: IndicesGetSampleRawDocument[] +} + +export interface IndicesGetSampleRawDocument { + /** Name of the index for this raw document. */ + index: string + /** The original raw source. */ + source: Record +} + +export interface IndicesGetSampleConfigurationRequest extends RequestBase { + /** The name of the index. */ + index: IndexName + /** Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an + * error. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, master_timeout?: never } +} + +export interface IndicesGetSampleConfigurationResponse { + index: IndexName + configuration: IndicesSamplingConfiguration | null +} + +export interface IndicesGetSampleStatsRequest extends RequestBase { + /** Single index or data stream name. Wildcards are not supported. */ + index: IndexName + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never } +} + +export interface IndicesGetSampleStatsResponse { + potential_samples: long + samples_rejected_for_max_samples_exceeded: long + samples_rejected_for_condition: long + samples_rejected_for_rate: long + samples_rejected_for_exception: long + samples_rejected_for_size: long + samples_accepted: long + time_sampling?: Duration + time_sampling_millis: DurationValue + time_evaluating_condition?: Duration + time_evaluating_condition_millis: DurationValue + time_compiling_condition?: Duration + time_compiling_condition_millis: DurationValue + last_exception?: string +} + +export interface IndicesGetSettingsRequest extends RequestBase { + /** Comma-separated list of data streams, indices, and aliases used to limit + * the request. Supports wildcards (`*`). To target all data streams and + * indices, omit this parameter or use `*` or `_all`. */ + index?: Indices + /** Comma-separated list or wildcard expression of settings to retrieve. */ + name?: Names + /** If `false`, the request returns an error if any wildcard expression, index + * alias, or `_all` value targets only missing or closed indices. This + * behavior applies even if the request targets other open indices. For + * example, a request targeting `foo*,bar*` returns an error if an index + * starts with foo but no index starts with `bar`. */ + allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ + expand_wildcards?: ExpandWildcards + /** If `true`, returns settings in flat format. */ + flat_settings?: boolean + /** If `false`, the request returns an error if it targets a missing or closed index. */ + ignore_unavailable?: boolean + /** If `true`, return all default settings in the response. */ + include_defaults?: boolean + /** If `true`, the request retrieves information from the local node only. If + * `false`, information is retrieved from the master node. */ + local?: boolean + /** Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an + * error. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, name?: never, allow_no_indices?: never, expand_wildcards?: never, flat_settings?: never, ignore_unavailable?: never, include_defaults?: never, local?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, name?: never, allow_no_indices?: never, expand_wildcards?: never, flat_settings?: never, ignore_unavailable?: never, include_defaults?: never, local?: never, master_timeout?: never } +} + +export type IndicesGetSettingsResponse = Record + +export interface IndicesGetTemplateRequest extends RequestBase { + /** Comma-separated list of index template names used to limit the request. + * Wildcard (`*`) expressions are supported. + * To return all index templates, omit this parameter or use a value of `_all` or `*`. */ + name?: Names + /** If `true`, returns settings in flat format. */ + flat_settings?: boolean + /** If `true`, the request retrieves information from the local node only. */ + local?: boolean + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, flat_settings?: never, local?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, flat_settings?: never, local?: never, master_timeout?: never } +} + +export type IndicesGetTemplateResponse = Record + +export interface IndicesMigrateReindexMigrateReindex { + /** Reindex mode. Currently only 'upgrade' is supported. */ + mode: IndicesMigrateReindexModeEnum + /** The source index or data stream (only data streams are currently supported). */ + source: IndicesMigrateReindexSourceIndex +} + +export type IndicesMigrateReindexModeEnum = 'upgrade' + +export interface IndicesMigrateReindexRequest extends RequestBase { + reindex?: IndicesMigrateReindexMigrateReindex + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { reindex?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { reindex?: never } +} + +export type IndicesMigrateReindexResponse = AcknowledgedResponseBase + +export interface IndicesMigrateReindexSourceIndex { + index: IndexName +} + +export interface IndicesMigrateToDataStreamRequest extends RequestBase { + /** Name of the index alias to convert to a data stream. */ + name: IndexName + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } +} + +export type IndicesMigrateToDataStreamResponse = AcknowledgedResponseBase + +export interface IndicesModifyDataStreamAction { + /** Adds an existing index as a backing index for a data stream. + * The index is hidden as part of this operation. + * WARNING: Adding indices with the `add_backing_index` action can potentially result in improper data stream behavior. + * This should be considered an expert level API. */ + add_backing_index?: IndicesModifyDataStreamIndexAndDataStreamAction + /** Removes a backing index from a data stream. + * The index is unhidden as part of this operation. + * A data stream’s write index cannot be removed. */ + remove_backing_index?: IndicesModifyDataStreamIndexAndDataStreamAction +} + +export interface IndicesModifyDataStreamIndexAndDataStreamAction { + /** Data stream targeted by the action. */ + data_stream: DataStreamName + /** Index for the action. */ + index: IndexName +} + +export interface IndicesModifyDataStreamRequest extends RequestBase { + /** Actions to perform. */ + actions: IndicesModifyDataStreamAction[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { actions?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { actions?: never } +} + +export type IndicesModifyDataStreamResponse = AcknowledgedResponseBase + +export interface IndicesOpenRequest extends RequestBase { + /** Comma-separated list of data streams, indices, and aliases used to limit the request. + * Supports wildcards (`*`). + * By default, you must explicitly name the indices you using to limit the request. + * To limit a request using `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to false. + * You can update this setting in the `elasticsearch.yml` file or using the cluster update settings API. */ + index: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ + allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ + expand_wildcards?: ExpandWildcards + /** If `false`, the request returns an error if it targets a missing or closed index. */ + ignore_unavailable?: boolean + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** The number of shard copies that must be active before proceeding with the operation. + * Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ + wait_for_active_shards?: WaitForActiveShards + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never } +} + +export interface IndicesOpenResponse { + acknowledged: boolean + shards_acknowledged: boolean +} + +export interface IndicesPromoteDataStreamRequest extends RequestBase { + /** The name of the data stream */ + name: IndexName + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never } +} + +export type IndicesPromoteDataStreamResponse = any + +export interface IndicesPutAliasRequest extends RequestBase { + /** Comma-separated list of data streams or indices to add. + * Supports wildcards (`*`). + * Wildcard patterns that match both data streams and indices return an error. */ + index: Indices + /** Alias to update. + * If the alias doesn’t exist, the request creates it. + * Index alias names support date math. */ + name: Name + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** Query used to limit documents the alias can access. */ + filter?: QueryDslQueryContainer + /** Value used to route indexing operations to a specific shard. + * If specified, this overwrites the `routing` value for indexing operations. + * Data stream aliases don’t support this parameter. */ + index_routing?: Routing + /** If `true`, sets the write index or data stream for the alias. + * If an alias points to multiple indices or data streams and `is_write_index` isn’t set, the alias rejects write requests. + * If an index alias points to one index and `is_write_index` isn’t set, the index automatically acts as the write index. + * Data stream aliases don’t automatically set a write data stream, even if the alias points to one data stream. */ + is_write_index?: boolean + /** Value used to route indexing and search operations to a specific shard. + * Data stream aliases don’t support this parameter. */ + routing?: Routing + /** Value used to route search operations to a specific shard. + * If specified, this overwrites the `routing` value for search operations. + * Data stream aliases don’t support this parameter. */ + search_routing?: Routing + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, name?: never, master_timeout?: never, timeout?: never, filter?: never, index_routing?: never, is_write_index?: never, routing?: never, search_routing?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, name?: never, master_timeout?: never, timeout?: never, filter?: never, index_routing?: never, is_write_index?: never, routing?: never, search_routing?: never } +} + +export type IndicesPutAliasResponse = AcknowledgedResponseBase + +export interface IndicesPutDataLifecycleRequest extends RequestBase { + /** Comma-separated list of data streams used to limit the request. + * Supports wildcards (`*`). + * To target all data streams use `*` or `_all`. */ + name: DataStreamNames + /** Type of data stream that wildcard patterns can match. + * Supports comma-separated values, such as `open,hidden`. */ + expand_wildcards?: ExpandWildcards + /** Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an + * error. */ + master_timeout?: Duration + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** If defined, every document added to this data stream will be stored at least for this time frame. + * Any time after this duration the document could be deleted. + * When empty, every document in this data stream will be stored indefinitely. */ + data_retention?: Duration + /** The downsampling configuration to execute for the managed backing index after rollover. */ + downsampling?: IndicesDownsamplingRound[] + /** If defined, it turns data stream lifecycle on/off (`true`/`false`) for this data stream. A data stream lifecycle + * that's disabled (enabled: `false`) will have no effect on the data stream. */ + enabled?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, expand_wildcards?: never, master_timeout?: never, timeout?: never, data_retention?: never, downsampling?: never, enabled?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, expand_wildcards?: never, master_timeout?: never, timeout?: never, data_retention?: never, downsampling?: never, enabled?: never } +} + +export type IndicesPutDataLifecycleResponse = AcknowledgedResponseBase + +export interface IndicesPutDataStreamMappingsRequest extends RequestBase { + /** A comma-separated list of data streams or data stream patterns. */ + name: Indices + /** If `true`, the request does not actually change the mappings on any data streams. Instead, it + * simulates changing the settings and reports back to the user what would have happened had these settings + * actually been applied. */ + dry_run?: boolean + /** The period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an + * error. */ + master_timeout?: Duration + /** The period to wait for a response. If no response is received before the + * timeout expires, the request fails and returns an error. */ + timeout?: Duration + mappings?: MappingTypeMapping + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, dry_run?: never, master_timeout?: never, timeout?: never, mappings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, dry_run?: never, master_timeout?: never, timeout?: never, mappings?: never } +} + +export interface IndicesPutDataStreamMappingsResponse { + data_streams: IndicesPutDataStreamMappingsUpdatedDataStreamMappings[] +} + +export interface IndicesPutDataStreamMappingsUpdatedDataStreamMappings { + /** The data stream name. */ + name: IndexName + /** If the mappings were successfully applied to the data stream (or would have been, if running in `dry_run` + * mode), it is `true`. If an error occurred, it is `false`. */ + applied_to_data_stream: boolean + /** A message explaining why the mappings could not be applied to the data stream. */ + error?: string + /** The mappings that are specfic to this data stream that will override any mappings from the matching index template. */ + mappings?: MappingTypeMapping + /** The mappings that are effective on this data stream, taking into account the mappings from the matching index + * template and the mappings specific to this data stream. */ + effective_mappings?: MappingTypeMapping +} + +export interface IndicesPutDataStreamOptionsRequest extends RequestBase { + /** Comma-separated list of data streams used to limit the request. + * Supports wildcards (`*`). + * To target all data streams use `*` or `_all`. */ + name: DataStreamNames + /** Type of data stream that wildcard patterns can match. + * Supports comma-separated values, such as `open,hidden`. */ + expand_wildcards?: ExpandWildcards + /** Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an + * error. */ + master_timeout?: Duration + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** If defined, it will update the failure store configuration of every data stream resolved by the name expression. */ + failure_store?: IndicesDataStreamFailureStore + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, expand_wildcards?: never, master_timeout?: never, timeout?: never, failure_store?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, expand_wildcards?: never, master_timeout?: never, timeout?: never, failure_store?: never } +} + +export type IndicesPutDataStreamOptionsResponse = AcknowledgedResponseBase + +export interface IndicesPutDataStreamSettingsDataStreamSettingsError { + index: IndexName + /** A message explaining why the settings could not be applied to specific indices. */ + error: string +} + +export interface IndicesPutDataStreamSettingsIndexSettingResults { + /** The list of settings that were applied to the data stream but not to backing indices. These will be applied to + * the write index the next time the data stream is rolled over. */ + applied_to_data_stream_only: string[] + /** The list of settings that were applied to the data stream and to all of its backing indices. These settings will + * also be applied to the write index the next time the data stream is rolled over. */ + applied_to_data_stream_and_backing_indices: string[] + errors?: IndicesPutDataStreamSettingsDataStreamSettingsError[] +} + +export interface IndicesPutDataStreamSettingsRequest extends RequestBase { + /** A comma-separated list of data streams or data stream patterns. */ + name: Indices + /** If `true`, the request does not actually change the settings on any data streams or indices. Instead, it + * simulates changing the settings and reports back to the user what would have happened had these settings + * actually been applied. */ + dry_run?: boolean + /** The period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an + * error. */ + master_timeout?: Duration + /** The period to wait for a response. If no response is received before the + * timeout expires, the request fails and returns an error. */ + timeout?: Duration + settings?: IndicesIndexSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, dry_run?: never, master_timeout?: never, timeout?: never, settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, dry_run?: never, master_timeout?: never, timeout?: never, settings?: never } +} + +export interface IndicesPutDataStreamSettingsResponse { + data_streams: IndicesPutDataStreamSettingsUpdatedDataStreamSettings[] +} + +export interface IndicesPutDataStreamSettingsUpdatedDataStreamSettings { + /** The data stream name. */ + name: IndexName + /** If the settings were successfully applied to the data stream (or would have been, if running in `dry_run` + * mode), it is `true`. If an error occurred, it is `false`. */ + applied_to_data_stream: boolean + /** A message explaining why the settings could not be applied to the data stream. */ + error?: string + /** The settings that are specfic to this data stream that will override any settings from the matching index template. */ + settings: IndicesIndexSettings + /** The settings that are effective on this data stream, taking into account the settings from the matching index + * template and the settings specific to this data stream. */ + effective_settings: IndicesIndexSettings + /** Information about whether and where each setting was applied. */ + index_settings_results: IndicesPutDataStreamSettingsIndexSettingResults +} + +export interface IndicesPutIndexTemplateIndexTemplateMapping { + /** Aliases to add. + * If the index template includes a `data_stream` object, these are data stream aliases. + * Otherwise, these are index aliases. + * Data stream aliases ignore the `index_routing`, `routing`, and `search_routing` options. */ + aliases?: Record + /** Mapping for fields in the index. + * If specified, this mapping can include field names, field data types, and mapping parameters. */ + mappings?: MappingTypeMapping + /** Configuration options for the index. */ + settings?: IndicesIndexSettings + lifecycle?: IndicesDataStreamLifecycle +} + +export interface IndicesPutIndexTemplateRequest extends RequestBase { + /** Index or template name */ + name: Name + /** If `true`, this request cannot replace or update existing index templates. */ + create?: boolean + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** User defined reason for creating/updating the index template */ + cause?: string + /** Name of the index template to create. */ + index_patterns?: Indices + /** An ordered list of component template names. + * Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. */ + composed_of?: Name[] + /** Template to be applied. + * It may optionally include an `aliases`, `mappings`, or `settings` configuration. */ + template?: IndicesPutIndexTemplateIndexTemplateMapping + /** If this object is included, the template is used to create data streams and their backing indices. + * Supports an empty object. + * Data streams require a matching index template with a `data_stream` object. */ + data_stream?: IndicesDataStreamVisibility + /** Priority to determine index template precedence when a new data stream or index is created. + * The index template with the highest priority is chosen. + * If no priority is specified the template is treated as though it is of priority 0 (lowest priority). + * This number is not automatically generated by Elasticsearch. */ + priority?: long + /** Version number used to manage index templates externally. + * This number is not automatically generated by Elasticsearch. + * External systems can use these version numbers to simplify template management. + * To unset a version, replace the template without specifying one. */ + version?: VersionNumber + /** Optional user metadata about the index template. + * It may have any contents. + * It is not automatically generated or used by Elasticsearch. + * This user-defined object is stored in the cluster state, so keeping it short is preferable + * To unset the metadata, replace the template without specifying it. */ + _meta?: Metadata + /** This setting overrides the value of the `action.auto_create_index` cluster setting. + * If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. + * If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created. */ + allow_auto_create?: boolean + /** The configuration option ignore_missing_component_templates can be used when an index template + * references a component template that might not exist */ + ignore_missing_component_templates?: string[] + /** Marks this index template as deprecated. When creating or updating a non-deprecated index template + * that uses deprecated components, Elasticsearch will emit a deprecation warning. */ + deprecated?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, create?: never, master_timeout?: never, cause?: never, index_patterns?: never, composed_of?: never, template?: never, data_stream?: never, priority?: never, version?: never, _meta?: never, allow_auto_create?: never, ignore_missing_component_templates?: never, deprecated?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, create?: never, master_timeout?: never, cause?: never, index_patterns?: never, composed_of?: never, template?: never, data_stream?: never, priority?: never, version?: never, _meta?: never, allow_auto_create?: never, ignore_missing_component_templates?: never, deprecated?: never } +} + +export type IndicesPutIndexTemplateResponse = AcknowledgedResponseBase + +export interface IndicesPutMappingRequest extends RequestBase { + /** A comma-separated list of index names the mapping should be added to (supports wildcards); use `_all` or omit to add the mapping on all indices. */ + index: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ + allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ + expand_wildcards?: ExpandWildcards + /** If `false`, the request returns an error if it targets a missing or closed index. */ + ignore_unavailable?: boolean + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** If `true`, the mappings are applied only to the current write index for the target. */ + write_index_only?: boolean + /** Controls whether dynamic date detection is enabled. */ + date_detection?: boolean + /** Controls whether new fields are added dynamically. */ + dynamic?: MappingDynamicMapping + /** If date detection is enabled then new string fields are checked + * against 'dynamic_date_formats' and if the value matches then + * a new date field is added instead of string. */ + dynamic_date_formats?: string[] + /** Specify dynamic templates for the mapping. */ + dynamic_templates?: Partial>[] + /** Control whether field names are enabled for the index. */ + _field_names?: MappingFieldNamesField + /** A mapping type can have custom meta data associated with it. These are + * not used at all by Elasticsearch, but can be used to store + * application-specific metadata. */ + _meta?: Metadata + /** Automatically map strings into numeric data types for all fields. */ + numeric_detection?: boolean + /** Mapping for a field. For new fields, this mapping can include: + * + * - Field name + * - Field data type + * - Mapping parameters */ + properties?: Record + /** Enable making a routing value required on indexed documents. */ + _routing?: MappingRoutingField + /** Control whether the _source field is enabled on the index. */ + _source?: MappingSourceField + /** Mapping of runtime fields for the index. */ + runtime?: MappingRuntimeFields + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never, timeout?: never, write_index_only?: never, date_detection?: never, dynamic?: never, dynamic_date_formats?: never, dynamic_templates?: never, _field_names?: never, _meta?: never, numeric_detection?: never, properties?: never, _routing?: never, _source?: never, runtime?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never, timeout?: never, write_index_only?: never, date_detection?: never, dynamic?: never, dynamic_date_formats?: never, dynamic_templates?: never, _field_names?: never, _meta?: never, numeric_detection?: never, properties?: never, _routing?: never, _source?: never, runtime?: never } +} + +export type IndicesPutMappingResponse = IndicesResponseBase + +export interface IndicesPutSampleConfigurationRequest extends RequestBase { + /** The name of the index or data stream. */ + index: IndexName + /** Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an + * error. */ + master_timeout?: Duration + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** The fraction of documents to sample. Must be greater than 0 and less than or equal to 1. + * Can be specified as a number or a string. */ + rate: SpecUtilsStringified + /** The maximum number of documents to sample. Must be greater than 0 and less than or equal to 10,000. */ + max_samples?: integer + /** The maximum total size of sampled documents. Must be greater than 0 and less than or equal to 5GB. */ + max_size?: ByteSize + /** The duration for which the sampled documents should be retained. + * Must be greater than 0 and less than or equal to 30 days. */ + time_to_live?: Duration + /** An optional condition script that sampled documents must satisfy. */ + if?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, master_timeout?: never, timeout?: never, rate?: never, max_samples?: never, max_size?: never, time_to_live?: never, if?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, master_timeout?: never, timeout?: never, rate?: never, max_samples?: never, max_size?: never, time_to_live?: never, if?: never } +} + +export type IndicesPutSampleConfigurationResponse = AcknowledgedResponseBase + +export interface IndicesPutSettingsRequest extends RequestBase { + /** Comma-separated list of data streams, indices, and aliases used to limit + * the request. Supports wildcards (`*`). To target all data streams and + * indices, omit this parameter or use `*` or `_all`. */ + index?: Indices + /** If `false`, the request returns an error if any wildcard expression, index + * alias, or `_all` value targets only missing or closed indices. This + * behavior applies even if the request targets other open indices. For + * example, a request targeting `foo*,bar*` returns an error if an index + * starts with `foo` but no index starts with `bar`. */ + allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. If the request can target + * data streams, this argument determines whether wildcard expressions match + * hidden data streams. Supports comma-separated values, such as + * `open,hidden`. */ + expand_wildcards?: ExpandWildcards + /** If `true`, returns settings in flat format. */ + flat_settings?: boolean + /** If `true`, returns settings in flat format. */ + ignore_unavailable?: boolean + /** Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an + * error. */ + master_timeout?: Duration + /** If `true`, existing index settings remain unchanged. */ + preserve_existing?: boolean + /** Whether to close and reopen the index to apply non-dynamic settings. + * If set to `true` the indices to which the settings are being applied + * will be closed temporarily and then reopened in order to apply the changes. */ + reopen?: boolean + /** Period to wait for a response. If no response is received before the + * timeout expires, the request fails and returns an error. */ + timeout?: Duration + settings?: IndicesIndexSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, flat_settings?: never, ignore_unavailable?: never, master_timeout?: never, preserve_existing?: never, reopen?: never, timeout?: never, settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, flat_settings?: never, ignore_unavailable?: never, master_timeout?: never, preserve_existing?: never, reopen?: never, timeout?: never, settings?: never } +} + +export type IndicesPutSettingsResponse = AcknowledgedResponseBase + +export interface IndicesPutTemplateRequest extends RequestBase { + /** The name of the template */ + name: Name + /** If true, this request cannot replace or update existing index templates. */ + create?: boolean + /** Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** User defined reason for creating/updating the index template */ + cause?: string + /** Aliases for the index. */ + aliases?: Record + /** Array of wildcard expressions used to match the names + * of indices during creation. */ + index_patterns?: string | string[] + /** Mapping for fields in the index. */ + mappings?: MappingTypeMapping + /** Order in which Elasticsearch applies this template if index + * matches multiple templates. + * + * Templates with lower 'order' values are merged first. Templates with higher + * 'order' values are merged later, overriding templates with lower values. */ + order?: integer + /** Configuration options for the index. */ + settings?: IndicesIndexSettings + /** Version number used to manage index templates externally. This number + * is not automatically generated by Elasticsearch. + * To unset a version, replace the template without specifying one. */ + version?: VersionNumber + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, create?: never, master_timeout?: never, cause?: never, aliases?: never, index_patterns?: never, mappings?: never, order?: never, settings?: never, version?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, create?: never, master_timeout?: never, cause?: never, aliases?: never, index_patterns?: never, mappings?: never, order?: never, settings?: never, version?: never } +} + +export type IndicesPutTemplateResponse = AcknowledgedResponseBase + +export interface IndicesRecoveryFileDetails { + length: long + name: string + recovered: long +} + +export interface IndicesRecoveryRecoveryBytes { + percent: Percentage + recovered?: ByteSize + recovered_in_bytes: ByteSize + recovered_from_snapshot?: ByteSize + recovered_from_snapshot_in_bytes?: ByteSize + reused?: ByteSize + reused_in_bytes: ByteSize + total?: ByteSize + total_in_bytes: ByteSize +} + +export interface IndicesRecoveryRecoveryFiles { + details?: IndicesRecoveryFileDetails[] + percent: Percentage + recovered: long + reused: long + total: long +} + +export interface IndicesRecoveryRecoveryIndexStatus { + bytes?: IndicesRecoveryRecoveryBytes + files: IndicesRecoveryRecoveryFiles + size: IndicesRecoveryRecoveryBytes + source_throttle_time?: Duration + source_throttle_time_in_millis: DurationValue + target_throttle_time?: Duration + target_throttle_time_in_millis: DurationValue + total_time?: Duration + total_time_in_millis: DurationValue +} + +export interface IndicesRecoveryRecoveryOrigin { + hostname?: string + host?: Host + transport_address?: TransportAddress + id?: Id + ip?: Ip + name?: Name + bootstrap_new_history_uuid?: boolean + repository?: Name + snapshot?: Name + version?: VersionString + restoreUUID?: Uuid + index?: IndexName +} + +export interface IndicesRecoveryRecoveryStartStatus { + check_index_time?: Duration + check_index_time_in_millis: DurationValue + total_time?: Duration + total_time_in_millis: DurationValue +} + +export interface IndicesRecoveryRecoveryStatus { + shards: IndicesRecoveryShardRecovery[] +} + +export interface IndicesRecoveryRequest extends RequestBase { + /** Comma-separated list of data streams, indices, and aliases used to limit the request. + * Supports wildcards (`*`). + * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ + index?: Indices + /** If `true`, the response only includes ongoing shard recoveries. */ + active_only?: boolean + /** If `true`, the response includes detailed information about shard recoveries. */ + detailed?: boolean + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ + allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ + expand_wildcards?: ExpandWildcards + /** If `false`, the request returns an error if it targets a missing or closed index. */ + ignore_unavailable?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, active_only?: never, detailed?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, active_only?: never, detailed?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never } +} + +export type IndicesRecoveryResponse = Record + +export interface IndicesRecoveryShardRecovery { + id: long + index: IndicesRecoveryRecoveryIndexStatus + primary: boolean + source: IndicesRecoveryRecoveryOrigin + stage: string + start?: IndicesRecoveryRecoveryStartStatus + start_time?: DateTime + start_time_in_millis: EpochTime + stop_time?: DateTime + stop_time_in_millis?: EpochTime + target: IndicesRecoveryRecoveryOrigin + total_time?: Duration + total_time_in_millis: DurationValue + translog: IndicesRecoveryTranslogStatus + type: string + verify_index: IndicesRecoveryVerifyIndex +} + +export interface IndicesRecoveryTranslogStatus { + percent: Percentage + recovered: long + total: long + total_on_start: long + total_time?: Duration + total_time_in_millis: DurationValue +} + +export interface IndicesRecoveryVerifyIndex { + check_index_time?: Duration + check_index_time_in_millis: DurationValue + total_time?: Duration + total_time_in_millis: DurationValue +} + +export interface IndicesRefreshRequest extends RequestBase { + /** Comma-separated list of data streams, indices, and aliases used to limit the request. + * Supports wildcards (`*`). + * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ + index?: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ + allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ + expand_wildcards?: ExpandWildcards + /** If `false`, the request returns an error if it targets a missing or closed index. */ + ignore_unavailable?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never } +} + +export type IndicesRefreshResponse = ShardsOperationResponseBase + +export interface IndicesReloadSearchAnalyzersReloadDetails { + index: string + reloaded_analyzers: string[] + reloaded_node_ids: string[] +} + +export interface IndicesReloadSearchAnalyzersReloadResult { + reload_details: IndicesReloadSearchAnalyzersReloadDetails[] + _shards: ShardStatistics +} + +export interface IndicesReloadSearchAnalyzersRequest extends RequestBase { + /** A comma-separated list of index names to reload analyzers for */ + index: Indices + /** Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) */ + allow_no_indices?: boolean + /** Whether to expand wildcard expression to concrete indices that are open, closed or both. */ + expand_wildcards?: ExpandWildcards + /** Whether specified concrete indices should be ignored when unavailable (missing or closed) */ + ignore_unavailable?: boolean + /** Changed resource to reload analyzers from if applicable */ + resource?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, resource?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, resource?: never } +} + +export type IndicesReloadSearchAnalyzersResponse = IndicesReloadSearchAnalyzersReloadResult + +export interface IndicesRemoveBlockRemoveIndicesBlockStatus { + name: IndexName + unblocked?: boolean + exception?: ErrorCause +} + +export interface IndicesRemoveBlockRequest extends RequestBase { + /** A comma-separated list or wildcard expression of index names used to limit the request. + * By default, you must explicitly name the indices you are removing blocks from. + * To allow the removal of blocks from indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. + * You can update this setting in the `elasticsearch.yml` file or by using the cluster update settings API. */ + index: Indices + /** The block type to remove from the index. */ + block: IndicesIndicesBlockOptions + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. + * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ + allow_no_indices?: boolean + /** The type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * It supports comma-separated values, such as `open,hidden`. */ + expand_wildcards?: ExpandWildcards + /** If `false`, the request returns an error if it targets a missing or closed index. */ + ignore_unavailable?: boolean + /** The period to wait for the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ + master_timeout?: Duration + /** The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. + * If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. + * It can also be set to `-1` to indicate that the request should never timeout. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, block?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, block?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never, timeout?: never } +} + +export interface IndicesRemoveBlockResponse { + acknowledged: boolean + indices: IndicesRemoveBlockRemoveIndicesBlockStatus[] +} + +export interface IndicesResolveClusterRequest extends RequestBase { + /** A comma-separated list of names or index patterns for the indices, aliases, and data streams to resolve. + * Resources on remote clusters can be specified using the ``:`` syntax. + * Index and cluster exclusions (e.g., `-cluster1:*`) are also supported. + * If no index expression is specified, information about all remote clusters configured on the local cluster + * is returned without doing any index matching */ + name?: Names + /** If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing + * or closed indices. This behavior applies even if the request targets other open indices. For example, a request + * targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. + * NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index + * options to the `_resolve/cluster` API endpoint that takes no index expression. */ + allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. + * NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index + * options to the `_resolve/cluster` API endpoint that takes no index expression. */ + expand_wildcards?: ExpandWildcards + /** If true, concrete, expanded, or aliased indices are ignored when frozen. + * NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index + * options to the `_resolve/cluster` API endpoint that takes no index expression. */ + ignore_throttled?: boolean + /** If false, the request returns an error if it targets a missing or closed index. + * NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index + * options to the `_resolve/cluster` API endpoint that takes no index expression. */ + ignore_unavailable?: boolean + /** The maximum time to wait for remote clusters to respond. + * If a remote cluster does not respond within this timeout period, the API response + * will show the cluster as not connected and include an error message that the + * request timed out. + * + * The default timeout is unset and the query can take + * as long as the networking layer is configured to wait for remote clusters that are + * not responding (typically 30 seconds). */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, timeout?: never } +} + +export interface IndicesResolveClusterResolveClusterInfo { + /** Whether the remote cluster is connected to the local (querying) cluster. */ + connected: boolean + /** The `skip_unavailable` setting for a remote cluster. */ + skip_unavailable: boolean + /** Whether the index expression provided in the request matches any indices, aliases or data streams + * on the cluster. */ + matching_indices?: boolean + /** Provides error messages that are likely to occur if you do a search with this index expression + * on the specified cluster (for example, lack of security privileges to query an index). */ + error?: string + /** Provides version information about the cluster. */ + version?: ElasticsearchVersionMinInfo +} + +export type IndicesResolveClusterResponse = Record + +export interface IndicesResolveIndexRequest extends RequestBase { + /** Comma-separated name(s) or index pattern(s) of the indices, aliases, and data streams to resolve. + * Resources on remote clusters can be specified using the ``:`` syntax. */ + name: Names + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ + expand_wildcards?: ExpandWildcards + /** If `false`, the request returns an error if it targets a missing or closed index. */ + ignore_unavailable?: boolean + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. + * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ + allow_no_indices?: boolean + /** Filter indices by index mode - standard, lookup, time_series, etc. Comma-separated list of IndexMode. Empty means no filter. */ + mode?: IndicesIndexMode | IndicesIndexMode[] + /** Specifies a subset of projects to target using project + * metadata tags in a subset of Lucene query syntax. + * Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). + * Examples: + * _alias:my-project + * _alias:_origin + * _alias:*pr* + * Supported in serverless only. */ + project_routing?: ProjectRouting + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, expand_wildcards?: never, ignore_unavailable?: never, allow_no_indices?: never, mode?: never, project_routing?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, expand_wildcards?: never, ignore_unavailable?: never, allow_no_indices?: never, mode?: never, project_routing?: never } +} + +export interface IndicesResolveIndexResolveIndexAliasItem { + name: Name + indices: Indices +} + +export interface IndicesResolveIndexResolveIndexDataStreamsItem { + name: DataStreamName + timestamp_field: Field + backing_indices: Indices +} + +export interface IndicesResolveIndexResolveIndexItem { + name: Name + aliases?: string[] + attributes: string[] + data_stream?: DataStreamName + mode?: IndicesIndexMode +} + +export interface IndicesResolveIndexResponse { + indices: IndicesResolveIndexResolveIndexItem[] + aliases: IndicesResolveIndexResolveIndexAliasItem[] + data_streams: IndicesResolveIndexResolveIndexDataStreamsItem[] +} + +export interface IndicesRolloverRequest extends RequestBase { + /** Name of the data stream or index alias to roll over. */ + alias: IndexAlias + /** Name of the index to create. + * Supports date math. + * Data streams do not support this parameter. */ + new_index?: IndexName + /** If `true`, checks whether the current index satisfies the specified conditions but does not perform a rollover. */ + dry_run?: boolean + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** The number of shard copies that must be active before proceeding with the operation. + * Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ + wait_for_active_shards?: WaitForActiveShards + /** If set to true, the rollover action will only mark a data stream to signal that it needs to be rolled over at the next write. + * Only allowed on data streams. */ + lazy?: boolean + /** Aliases for the target index. + * Data streams do not support this parameter. */ + aliases?: Record + /** Conditions for the rollover. + * If specified, Elasticsearch only performs the rollover if the current index satisfies these conditions. + * If this parameter is not specified, Elasticsearch performs the rollover unconditionally. + * If conditions are specified, at least one of them must be a `max_*` condition. + * The index will rollover if any `max_*` condition is satisfied and all `min_*` conditions are satisfied. */ + conditions?: IndicesRolloverRolloverConditions + /** Mapping for fields in the index. + * If specified, this mapping can include field names, field data types, and mapping paramaters. */ + mappings?: MappingTypeMapping + /** Configuration options for the index. + * Data streams do not support this parameter. */ + settings?: Record + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { alias?: never, new_index?: never, dry_run?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never, lazy?: never, aliases?: never, conditions?: never, mappings?: never, settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { alias?: never, new_index?: never, dry_run?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never, lazy?: never, aliases?: never, conditions?: never, mappings?: never, settings?: never } +} + +export interface IndicesRolloverResponse { + acknowledged: boolean + conditions: Record + dry_run: boolean + new_index: string + old_index: string + rolled_over: boolean + shards_acknowledged: boolean +} + +export interface IndicesRolloverRolloverConditions { + min_age?: Duration + max_age?: Duration + max_age_millis?: DurationValue + min_docs?: long + max_docs?: long + /** The `max_size` condition has been deprecated in 9.3.0 and `max_primary_shard_size` should be used instead */ + max_size?: ByteSize + max_size_bytes?: long + min_size?: ByteSize + min_size_bytes?: long + max_primary_shard_size?: ByteSize + max_primary_shard_size_bytes?: long + min_primary_shard_size?: ByteSize + min_primary_shard_size_bytes?: long + max_primary_shard_docs?: long + min_primary_shard_docs?: long +} + +export interface IndicesSegmentsIndexSegment { + shards: Record +} + +export interface IndicesSegmentsRequest extends RequestBase { + /** Comma-separated list of data streams, indices, and aliases used to limit the request. + * Supports wildcards (`*`). + * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ + index?: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ + allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ + expand_wildcards?: ExpandWildcards + /** If `false`, the request returns an error if it targets a missing or closed index. */ + ignore_unavailable?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never } +} + +export interface IndicesSegmentsResponse { + indices: Record + _shards: ShardStatistics +} + +export interface IndicesSegmentsSegment { + attributes: Record + committed: boolean + compound: boolean + deleted_docs: long + generation: integer + search: boolean + size_in_bytes: double + num_docs: long + version: VersionString +} + +export interface IndicesSegmentsShardSegmentRouting { + node: string + primary: boolean + state: string +} + +export interface IndicesSegmentsShardsSegment { + num_committed_segments: integer + routing: IndicesSegmentsShardSegmentRouting + num_search_segments: integer + segments: Record +} + +export interface IndicesShardStoresIndicesShardStores { + shards: Record +} + +export interface IndicesShardStoresRequest extends RequestBase { + /** List of data streams, indices, and aliases used to limit the request. */ + index?: Indices + /** If false, the request returns an error if any wildcard expression, index alias, or _all + * value targets only missing or closed indices. This behavior applies even if the request + * targets other open indices. */ + allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. If the request can target data streams, + * this argument determines whether wildcard expressions match hidden data streams. */ + expand_wildcards?: ExpandWildcards + /** If true, missing or closed indices are not included in the response. */ + ignore_unavailable?: boolean + /** List of shard health statuses used to limit the request. */ + status?: IndicesShardStoresShardStoreStatus | IndicesShardStoresShardStoreStatus[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, status?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, status?: never } +} + +export interface IndicesShardStoresResponse { + indices: Record +} + +export interface IndicesShardStoresShardStoreKeys { + allocation: IndicesShardStoresShardStoreAllocation + allocation_id?: Id + store_exception?: IndicesShardStoresShardStoreException +} +export type IndicesShardStoresShardStore = IndicesShardStoresShardStoreKeys +& { [property: string]: IndicesShardStoresShardStoreNode | IndicesShardStoresShardStoreAllocation | Id | IndicesShardStoresShardStoreException } + +export type IndicesShardStoresShardStoreAllocation = 'primary' | 'replica' | 'unused' + +export interface IndicesShardStoresShardStoreException { + reason: string + type: string +} + +export interface IndicesShardStoresShardStoreNode { + attributes: Record + ephemeral_id?: string + external_id?: string + name: Name + roles: string[] + transport_address: TransportAddress +} + +export type IndicesShardStoresShardStoreStatus = 'green' | 'yellow' | 'red' | 'all' + +export interface IndicesShardStoresShardStoreWrapper { + stores: IndicesShardStoresShardStore[] +} + +export interface IndicesShrinkRequest extends RequestBase { + /** Name of the source index to shrink. */ + index: IndexName + /** Name of the target index to create. */ + target: IndexName + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** The number of shard copies that must be active before proceeding with the operation. + * Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ + wait_for_active_shards?: WaitForActiveShards + /** The key is the alias name. + * Index alias names support date math. */ + aliases?: Record + /** Configuration options for the target index. */ + settings?: Record + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, target?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never, aliases?: never, settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, target?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never, aliases?: never, settings?: never } +} + +export interface IndicesShrinkResponse { + acknowledged: boolean + shards_acknowledged: boolean + index: IndexName +} + +export interface IndicesSimulateIndexTemplateRequest extends RequestBase { + /** Name of the index to simulate */ + name: Name + /** Whether the index template we optionally defined in the body should only be dry-run added if new or can also replace an existing one */ + create?: boolean + /** User defined reason for dry-run creating the new template for simulation purposes */ + cause?: string + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** If true, returns all relevant default configurations for the index template. */ + include_defaults?: boolean + index_template?: IndicesIndexTemplate + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, create?: never, cause?: never, master_timeout?: never, include_defaults?: never, index_template?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, create?: never, cause?: never, master_timeout?: never, include_defaults?: never, index_template?: never } +} + +export interface IndicesSimulateIndexTemplateResponse { + overlapping?: IndicesSimulateTemplateOverlapping[] + template: IndicesSimulateTemplateTemplate +} + +export interface IndicesSimulateTemplateOverlapping { + name: Name + index_patterns: string[] +} + +export interface IndicesSimulateTemplateRequest extends RequestBase { + /** Name of the index template to simulate. To test a template configuration before you add it to the cluster, omit + * this parameter and specify the template configuration in the request body. */ + name?: Name + /** If true, the template passed in the body is only used if no existing templates match the same index patterns. If false, the simulation uses the template with the highest priority. Note that the template is not permanently added or updated in either case; it is only used for the simulation. */ + create?: boolean + /** User defined reason for dry-run creating the new template for simulation purposes */ + cause?: string + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** If true, returns all relevant default configurations for the index template. */ + include_defaults?: boolean + /** This setting overrides the value of the `action.auto_create_index` cluster setting. + * If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. + * If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created. */ + allow_auto_create?: boolean + /** Array of wildcard (`*`) expressions used to match the names of data streams and indices during creation. */ + index_patterns?: Indices + /** An ordered list of component template names. + * Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. */ + composed_of?: Name[] + /** Template to be applied. + * It may optionally include an `aliases`, `mappings`, or `settings` configuration. */ + template?: IndicesPutIndexTemplateIndexTemplateMapping + /** If this object is included, the template is used to create data streams and their backing indices. + * Supports an empty object. + * Data streams require a matching index template with a `data_stream` object. */ + data_stream?: IndicesDataStreamVisibility + /** Priority to determine index template precedence when a new data stream or index is created. + * The index template with the highest priority is chosen. + * If no priority is specified the template is treated as though it is of priority 0 (lowest priority). + * This number is not automatically generated by Elasticsearch. */ + priority?: long + /** Version number used to manage index templates externally. + * This number is not automatically generated by Elasticsearch. */ + version?: VersionNumber + /** Optional user metadata about the index template. + * May have any contents. + * This map is not automatically generated by Elasticsearch. */ + _meta?: Metadata + /** The configuration option ignore_missing_component_templates can be used when an index template + * references a component template that might not exist */ + ignore_missing_component_templates?: string[] + /** Marks this index template as deprecated. When creating or updating a non-deprecated index template + * that uses deprecated components, Elasticsearch will emit a deprecation warning. */ + deprecated?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, create?: never, cause?: never, master_timeout?: never, include_defaults?: never, allow_auto_create?: never, index_patterns?: never, composed_of?: never, template?: never, data_stream?: never, priority?: never, version?: never, _meta?: never, ignore_missing_component_templates?: never, deprecated?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, create?: never, cause?: never, master_timeout?: never, include_defaults?: never, allow_auto_create?: never, index_patterns?: never, composed_of?: never, template?: never, data_stream?: never, priority?: never, version?: never, _meta?: never, ignore_missing_component_templates?: never, deprecated?: never } +} + +export interface IndicesSimulateTemplateResponse { + overlapping?: IndicesSimulateTemplateOverlapping[] + template: IndicesSimulateTemplateTemplate +} + +export interface IndicesSimulateTemplateTemplate { + aliases: Record + mappings: MappingTypeMapping + settings: IndicesIndexSettings +} + +export interface IndicesSplitRequest extends RequestBase { + /** Name of the source index to split. */ + index: IndexName + /** Name of the target index to create. */ + target: IndexName + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** The number of shard copies that must be active before proceeding with the operation. + * Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ + wait_for_active_shards?: WaitForActiveShards + /** Aliases for the resulting index. */ + aliases?: Record + /** Configuration options for the target index. */ + settings?: Record + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, target?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never, aliases?: never, settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, target?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never, aliases?: never, settings?: never } +} + +export interface IndicesSplitResponse { + acknowledged: boolean + shards_acknowledged: boolean + index: IndexName +} + +export type IndicesStatsIndexMetadataState = 'open' | 'close' + +export interface IndicesStatsIndexStats { + /** Contains statistics about completions across all shards assigned to the node. */ + completion?: CompletionStats + /** Contains statistics about documents across all primary shards assigned to the node. */ + docs?: DocStats + /** Contains statistics about the field data cache across all shards assigned to the node. */ + fielddata?: FielddataStats + /** Contains statistics about flush operations for the node. */ + flush?: FlushStats + /** Contains statistics about get operations for the node. */ + get?: GetStats + /** Contains statistics about indexing operations for the node. */ + indexing?: IndexingStats + /** Contains statistics about indices operations for the node. */ + indices?: IndicesStatsIndicesStats + /** Contains statistics about merge operations for the node. */ + merges?: MergesStats + /** Contains statistics about the query cache across all shards assigned to the node. */ + query_cache?: QueryCacheStats + /** Contains statistics about recovery operations for the node. */ + recovery?: RecoveryStats + /** Contains statistics about refresh operations for the node. */ + refresh?: RefreshStats + /** Contains statistics about the request cache across all shards assigned to the node. */ + request_cache?: RequestCacheStats + /** Contains statistics about search operations for the node. */ + search?: SearchStats + /** Contains statistics about segments across all shards assigned to the node. */ + segments?: SegmentsStats + /** Contains statistics about the size of shards assigned to the node. */ + store?: StoreStats + /** Contains statistics about transaction log operations for the node. */ + translog?: TranslogStats + /** Contains statistics about index warming operations for the node. */ + warmer?: WarmerStats + bulk?: BulkStats + shard_stats?: IndicesStatsShardsTotalStats +} + +export interface IndicesStatsIndicesStats { + primaries?: IndicesStatsIndexStats + shards?: Record + total?: IndicesStatsIndexStats + uuid?: Uuid + health?: HealthStatus + status?: IndicesStatsIndexMetadataState +} + +export interface IndicesStatsMappingStats { + total_count: long + total_estimated_overhead?: ByteSize + total_estimated_overhead_in_bytes: long +} + +export interface IndicesStatsRequest extends RequestBase { + /** Limit the information returned the specific metrics. */ + metric?: CommonStatsFlags + /** A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices */ + index?: Indices + /** Comma-separated list or wildcard expressions of fields to include in fielddata and suggest statistics. */ + completion_fields?: Fields + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument + * determines whether wildcard expressions match hidden data streams. Supports comma-separated values, + * such as `open,hidden`. */ + expand_wildcards?: ExpandWildcards + /** Comma-separated list or wildcard expressions of fields to include in fielddata statistics. */ + fielddata_fields?: Fields + /** Comma-separated list or wildcard expressions of fields to include in the statistics. */ + fields?: Fields + /** If true, statistics are not collected from closed indices. */ + forbid_closed_indices?: boolean + /** Comma-separated list of search groups to include in the search statistics. */ + groups?: string | string[] + /** If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested). */ + include_segment_file_sizes?: boolean + /** If true, the response includes information from segments that are not loaded into memory. */ + include_unloaded_segments?: boolean + /** Indicates whether statistics are aggregated at the cluster, indices, or shards level. */ + level?: Level + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { metric?: never, index?: never, completion_fields?: never, expand_wildcards?: never, fielddata_fields?: never, fields?: never, forbid_closed_indices?: never, groups?: never, include_segment_file_sizes?: never, include_unloaded_segments?: never, level?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { metric?: never, index?: never, completion_fields?: never, expand_wildcards?: never, fielddata_fields?: never, fields?: never, forbid_closed_indices?: never, groups?: never, include_segment_file_sizes?: never, include_unloaded_segments?: never, level?: never } +} + +export interface IndicesStatsResponse { + indices?: Record + _shards: ShardStatistics + _all: IndicesStatsIndicesStats +} + +export interface IndicesStatsShardCommit { + generation: integer + id: Id + num_docs: long + user_data: Record +} + +export interface IndicesStatsShardFileSizeInfo { + description: string + size_in_bytes: long + min_size_in_bytes?: long + max_size_in_bytes?: long + average_size_in_bytes?: long + count?: long +} + +export interface IndicesStatsShardLease { + id: Id + retaining_seq_no: SequenceNumber + timestamp: long + source: string +} + +export interface IndicesStatsShardPath { + data_path: string + is_custom_data_path: boolean + state_path: string +} + +export interface IndicesStatsShardQueryCache { + cache_count: long + cache_size: long + evictions: long + hit_count: long + memory_size_in_bytes: long + miss_count: long + total_count: long +} + +export interface IndicesStatsShardRetentionLeases { + primary_term: long + version: VersionNumber + leases: IndicesStatsShardLease[] +} + +export interface IndicesStatsShardRouting { + node: string + primary: boolean + relocating_node?: string | null + state: IndicesStatsShardRoutingState +} + +export type IndicesStatsShardRoutingState = 'UNASSIGNED' | 'INITIALIZING' | 'STARTED' | 'RELOCATING' + +export interface IndicesStatsShardSequenceNumber { + global_checkpoint: long + local_checkpoint: long + max_seq_no: SequenceNumber +} + +export interface IndicesStatsShardStats { + commit?: IndicesStatsShardCommit + completion?: CompletionStats + docs?: DocStats + fielddata?: FielddataStats + flush?: FlushStats + get?: GetStats + indexing?: IndexingStats + mappings?: IndicesStatsMappingStats + merges?: MergesStats + shard_path?: IndicesStatsShardPath + query_cache?: IndicesStatsShardQueryCache + recovery?: RecoveryStats + refresh?: RefreshStats + request_cache?: RequestCacheStats + retention_leases?: IndicesStatsShardRetentionLeases + routing?: IndicesStatsShardRouting + search?: SearchStats + segments?: SegmentsStats + seq_no?: IndicesStatsShardSequenceNumber + store?: StoreStats + translog?: TranslogStats + warmer?: WarmerStats + bulk?: BulkStats + shards?: Record + shard_stats?: IndicesStatsShardsTotalStats + indices?: IndicesStatsIndicesStats +} + +export interface IndicesStatsShardsTotalStats { + total_count: long +} + +export interface IndicesUpdateAliasesAction { + /** Adds a data stream or index to an alias. + * If the alias doesn’t exist, the `add` action creates it. */ + add?: IndicesUpdateAliasesAddAction + /** Removes a data stream or index from an alias. */ + remove?: IndicesUpdateAliasesRemoveAction + /** Deletes an index. + * You cannot use this action on aliases or data streams. */ + remove_index?: IndicesUpdateAliasesRemoveIndexAction +} + +export interface IndicesUpdateAliasesAddAction { + /** Alias for the action. + * Index alias names support date math. */ + alias?: IndexAlias + /** Aliases for the action. + * Index alias names support date math. */ + aliases?: IndexAlias | IndexAlias[] + /** Query used to limit documents the alias can access. */ + filter?: QueryDslQueryContainer + /** Data stream or index for the action. + * Supports wildcards (`*`). */ + index?: IndexName + /** Data streams or indices for the action. + * Supports wildcards (`*`). */ + indices?: Indices + /** Value used to route indexing operations to a specific shard. + * If specified, this overwrites the `routing` value for indexing operations. + * Data stream aliases don’t support this parameter. */ + index_routing?: Routing + /** If `true`, the alias is hidden. */ + is_hidden?: boolean + /** If `true`, sets the write index or data stream for the alias. */ + is_write_index?: boolean + /** Value used to route indexing and search operations to a specific shard. + * Data stream aliases don’t support this parameter. */ + routing?: Routing + /** Value used to route search operations to a specific shard. + * If specified, this overwrites the `routing` value for search operations. + * Data stream aliases don’t support this parameter. */ + search_routing?: Routing + /** If `true`, the alias must exist to perform the action. */ + must_exist?: boolean +} + +export interface IndicesUpdateAliasesRemoveAction { + /** Alias for the action. + * Index alias names support date math. */ + alias?: IndexAlias + /** Aliases for the action. + * Index alias names support date math. */ + aliases?: IndexAlias | IndexAlias[] + /** Data stream or index for the action. + * Supports wildcards (`*`). */ + index?: IndexName + /** Data streams or indices for the action. + * Supports wildcards (`*`). */ + indices?: Indices + /** If `true`, the alias must exist to perform the action. */ + must_exist?: boolean +} + +export interface IndicesUpdateAliasesRemoveIndexAction { + /** Data stream or index for the action. + * Supports wildcards (`*`). */ + index?: IndexName + /** Data streams or indices for the action. + * Supports wildcards (`*`). */ + indices?: Indices + /** If `true`, the alias must exist to perform the action. */ + must_exist?: boolean +} + +export interface IndicesUpdateAliasesRequest extends RequestBase { + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** Actions to perform. */ + actions?: IndicesUpdateAliasesAction[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never, actions?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never, timeout?: never, actions?: never } +} + +export type IndicesUpdateAliasesResponse = AcknowledgedResponseBase + +export interface IndicesValidateQueryIndicesValidationExplanation { + error?: string + explanation?: string + index: IndexName + valid: boolean +} + +export interface IndicesValidateQueryRequest extends RequestBase { + /** Comma-separated list of data streams, indices, and aliases to search. + * Supports wildcards (`*`). + * To search all data streams or indices, omit this parameter or use `*` or `_all`. */ + index?: Indices + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ + allow_no_indices?: boolean + /** If `true`, the validation is executed on all shards instead of one random shard per index. */ + all_shards?: boolean + /** Analyzer to use for the query string. + * This parameter can only be used when the `q` query string parameter is specified. */ + analyzer?: string + /** If `true`, wildcard and prefix queries are analyzed. */ + analyze_wildcard?: boolean + /** The default operator for query string query: `and` or `or`. */ + default_operator?: QueryDslOperator + /** Field to use as default where no field prefix is given in the query string. + * This parameter can only be used when the `q` query string parameter is specified. */ + df?: string + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ + expand_wildcards?: ExpandWildcards + /** If `true`, the response returns detailed information if an error has occurred. */ + explain?: boolean + /** If `false`, the request returns an error if it targets a missing or closed index. */ + ignore_unavailable?: boolean + /** If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. */ + lenient?: boolean + /** If `true`, returns a more detailed explanation showing the actual Lucene query that will be executed. */ + rewrite?: boolean + /** Query in the Lucene query string syntax. */ + q?: string + /** Query in the Lucene query string syntax. */ + query?: QueryDslQueryContainer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, all_shards?: never, analyzer?: never, analyze_wildcard?: never, default_operator?: never, df?: never, expand_wildcards?: never, explain?: never, ignore_unavailable?: never, lenient?: never, rewrite?: never, q?: never, query?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, all_shards?: never, analyzer?: never, analyze_wildcard?: never, default_operator?: never, df?: never, expand_wildcards?: never, explain?: never, ignore_unavailable?: never, lenient?: never, rewrite?: never, q?: never, query?: never } +} + +export interface IndicesValidateQueryResponse { + explanations?: IndicesValidateQueryIndicesValidationExplanation[] + _shards?: ShardStatistics + valid: boolean + error?: string +} + +export interface InferenceAdaptiveAllocations { + /** Turn on `adaptive_allocations`. */ + enabled?: boolean + /** The maximum number of allocations to scale to. + * If set, it must be greater than or equal to `min_number_of_allocations`. */ + max_number_of_allocations?: integer + /** The minimum number of allocations to scale to. + * If set, it must be greater than or equal to 0. + * If not defined, the deployment scales to 0. */ + min_number_of_allocations?: integer +} + +export interface InferenceAi21ServiceSettings { + /** The name of the model to use for the inference task. + * Refer to the AI21 models documentation for the list of supported models and versions. + * Service has been tested and confirmed to be working for `completion` and `chat_completion` tasks with the following models: + * * `jamba-mini` + * * `jamba-large` */ + model_id: string + /** A valid API key for accessing AI21 API. + * + * IMPORTANT: You need to provide the API key only once, during the inference model creation. + * The get inference endpoint API does not retrieve your API key. + * After creating the inference model, you cannot change the associated API key. + * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ + api_key?: string + /** This setting helps to minimize the number of rate limit errors returned from the AI21 API. + * By default, the `ai21` service sets the number of requests allowed per minute to 200. Please refer to AI21 documentation for more details. */ + rate_limit?: InferenceRateLimitSetting +} + +export type InferenceAi21ServiceType = 'ai21' + +export type InferenceAi21TaskType = 'completion' | 'chat_completion' + +export interface InferenceAlibabaCloudServiceSettings { + /** A valid API key for the AlibabaCloud AI Search API. */ + api_key: string + /** The name of the host address used for the inference task. + * You can find the host address in the API keys section of the documentation. */ + host: string + /** This setting helps to minimize the number of rate limit errors returned from AlibabaCloud AI Search. + * By default, the `alibabacloud-ai-search` service sets the number of requests allowed per minute to `1000`. */ + rate_limit?: InferenceRateLimitSetting + /** The name of the model service to use for the inference task. + * The following service IDs are available for the `completion` task: + * + * * `ops-qwen-turbo` + * * `qwen-turbo` + * * `qwen-plus` + * * `qwen-max ÷ qwen-max-longcontext` + * + * The following service ID is available for the `rerank` task: + * + * * `ops-bge-reranker-larger` + * + * The following service ID is available for the `sparse_embedding` task: + * + * * `ops-text-sparse-embedding-001` + * + * The following service IDs are available for the `text_embedding` task: + * + * `ops-text-embedding-001` + * `ops-text-embedding-zh-001` + * `ops-text-embedding-en-001` + * `ops-text-embedding-002` */ + service_id: string + /** The name of the workspace used for the inference task. */ + workspace: string +} + +export type InferenceAlibabaCloudServiceType = 'alibabacloud-ai-search' + +export interface InferenceAlibabaCloudTaskSettings { + /** For a `sparse_embedding` or `text_embedding` task, specify the type of input passed to the model. + * Valid values are: + * + * * `ingest` for storing document embeddings in a vector database. + * * `search` for storing embeddings of search queries run against a vector database to find relevant documents. */ + input_type?: string + /** For a `sparse_embedding` task, it affects whether the token name will be returned in the response. + * It defaults to `false`, which means only the token ID will be returned in the response. */ + return_token?: boolean +} + +export type InferenceAlibabaCloudTaskType = 'completion' | 'rerank' | 'sparse_embedding' | 'text_embedding' + +export interface InferenceAmazonBedrockServiceSettings { + /** A valid AWS access key that has permissions to use Amazon Bedrock and access to models for inference requests. */ + access_key: string + /** The base model ID or an ARN to a custom model based on a foundational model. + * The base model IDs can be found in the Amazon Bedrock documentation. + * Note that the model ID must be available for the provider chosen and your IAM user must have access to the model. */ + model: string + /** The model provider for your deployment. + * Note that some providers may support only certain task types. + * Supported providers include: + * + * * `amazontitan` - available for `text_embedding` and `completion` task types + * * `anthropic` - available for `completion` task type only + * * `ai21labs` - available for `completion` task type only + * * `cohere` - available for `text_embedding` and `completion` task types + * * `meta` - available for `completion` task type only + * * `mistral` - available for `completion` task type only */ + provider?: string + /** The region that your model or ARN is deployed in. + * The list of available regions per model can be found in the Amazon Bedrock documentation. */ + region: string + /** This setting helps to minimize the number of rate limit errors returned from Watsonx. + * By default, the `watsonxai` service sets the number of requests allowed per minute to 120. */ + rate_limit?: InferenceRateLimitSetting + /** A valid AWS secret key that is paired with the `access_key`. + * For informationg about creating and managing access and secret keys, refer to the AWS documentation. */ + secret_key: string +} + +export type InferenceAmazonBedrockServiceType = 'amazonbedrock' + +export interface InferenceAmazonBedrockTaskSettings { + /** For a `completion` task, it sets the maximum number for the output tokens to be generated. */ + max_new_tokens?: integer + /** For a `completion` task, it is a number between 0.0 and 1.0 that controls the apparent creativity of the results. + * At temperature 0.0 the model is most deterministic, at temperature 1.0 most random. + * It should not be used if `top_p` or `top_k` is specified. */ + temperature?: float + /** For a `completion` task, it limits samples to the top-K most likely words, balancing coherence and variability. + * It is only available for anthropic, cohere, and mistral providers. + * It is an alternative to `temperature`; it should not be used if `temperature` is specified. */ + top_k?: float + /** For a `completion` task, it is a number in the range of 0.0 to 1.0, to eliminate low-probability tokens. + * Top-p uses nucleus sampling to select top tokens whose sum of likelihoods does not exceed a certain value, ensuring both variety and coherence. + * It is an alternative to `temperature`; it should not be used if `temperature` is specified. */ + top_p?: float +} + +export type InferenceAmazonBedrockTaskType = 'completion' | 'text_embedding' + +export type InferenceAmazonSageMakerApi = 'openai' | 'elastic' + +export interface InferenceAmazonSageMakerServiceSettings { + /** A valid AWS access key that has permissions to use Amazon SageMaker and access to models for invoking requests. */ + access_key: string + /** The name of the SageMaker endpoint. */ + endpoint_name: string + /** The API format to use when calling SageMaker. + * Elasticsearch will convert the POST _inference request to this data format when invoking the SageMaker endpoint. */ + api: InferenceAmazonSageMakerApi + /** The region that your endpoint or Amazon Resource Name (ARN) is deployed in. + * The list of available regions per model can be found in the Amazon SageMaker documentation. */ + region: string + /** A valid AWS secret key that is paired with the `access_key`. + * For information about creating and managing access and secret keys, refer to the AWS documentation. */ + secret_key: string + /** The model ID when calling a multi-model endpoint. */ + target_model?: string + /** The container to directly invoke when calling a multi-container endpoint. */ + target_container_hostname?: string + /** The inference component to directly invoke when calling a multi-component endpoint. */ + inference_component_name?: string + /** The maximum number of inputs in each batch. This value is used by inference ingestion pipelines + * when processing semantic values. It correlates to the number of times the SageMaker endpoint is + * invoked (one per batch of input). */ + batch_size?: integer + /** The number of dimensions returned by the text embedding models. If this value is not provided, then + * it is guessed by making invoking the endpoint for the `text_embedding` task. */ + dimensions?: integer +} + +export type InferenceAmazonSageMakerServiceType = 'amazon_sagemaker' + +export interface InferenceAmazonSageMakerTaskSettings { + /** The AWS custom attributes passed verbatim through to the model running in the SageMaker Endpoint. + * Values will be returned in the `X-elastic-sagemaker-custom-attributes` header. */ + custom_attributes?: string + /** The optional JMESPath expression used to override the EnableExplanations provided during endpoint creation. */ + enable_explanations?: string + /** The capture data ID when enabled in the endpoint. */ + inference_id?: string + /** The stateful session identifier for a new or existing session. + * New sessions will be returned in the `X-elastic-sagemaker-new-session-id` header. + * Closed sessions will be returned in the `X-elastic-sagemaker-closed-session-id` header. */ + session_id?: string + /** Specifies the variant when running with multi-variant Endpoints. */ + target_variant?: string +} + +export interface InferenceAnthropicServiceSettings { + /** A valid API key for the Anthropic API. */ + api_key: string + /** The name of the model to use for the inference task. + * Refer to the Anthropic documentation for the list of supported models. */ + model_id: string + /** This setting helps to minimize the number of rate limit errors returned from Anthropic. + * By default, the `anthropic` service sets the number of requests allowed per minute to 50. */ + rate_limit?: InferenceRateLimitSetting +} + +export type InferenceAnthropicServiceType = 'anthropic' + +export interface InferenceAnthropicTaskSettings { + /** For a `completion` task, it is the maximum number of tokens to generate before stopping. */ + max_tokens: integer + /** For a `completion` task, it is the amount of randomness injected into the response. + * For more details about the supported range, refer to Anthropic documentation. */ + temperature?: float + /** For a `completion` task, it specifies to only sample from the top K options for each subsequent token. + * It is recommended for advanced use cases only. + * You usually only need to use `temperature`. */ + top_k?: integer + /** For a `completion` task, it specifies to use Anthropic's nucleus sampling. + * In nucleus sampling, Anthropic computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches the specified probability. + * You should either alter `temperature` or `top_p`, but not both. + * It is recommended for advanced use cases only. + * You usually only need to use `temperature`. */ + top_p?: float +} + +export type InferenceAnthropicTaskType = 'completion' + +export interface InferenceAzureAiStudioServiceSettings { + /** A valid API key of your Azure AI Studio model deployment. + * This key can be found on the overview page for your deployment in the management section of your Azure AI Studio account. + * + * IMPORTANT: You need to provide the API key only once, during the inference model creation. + * The get inference endpoint API does not retrieve your API key. + * After creating the inference model, you cannot change the associated API key. + * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ + api_key: string + /** The type of endpoint that is available for deployment through Azure AI Studio: `token` or `realtime`. + * The `token` endpoint type is for "pay as you go" endpoints that are billed per token. + * The `realtime` endpoint type is for "real-time" endpoints that are billed per hour of usage. */ + endpoint_type: string + /** The target URL of your Azure AI Studio model deployment. + * This can be found on the overview page for your deployment in the management section of your Azure AI Studio account. */ + target: string + /** The model provider for your deployment. + * Note that some providers may support only certain task types. + * Supported providers include: + * + * * `cohere` - available for `text_embedding` and `completion` task types + * * `databricks` - available for `completion` task type only + * * `meta` - available for `completion` task type only + * * `microsoft_phi` - available for `completion` task type only + * * `mistral` - available for `completion` task type only + * * `openai` - available for `text_embedding` and `completion` task types */ + provider: string + /** This setting helps to minimize the number of rate limit errors returned from Azure AI Studio. + * By default, the `azureaistudio` service sets the number of requests allowed per minute to 240. */ + rate_limit?: InferenceRateLimitSetting +} + +export type InferenceAzureAiStudioServiceType = 'azureaistudio' + +export interface InferenceAzureAiStudioTaskSettings { + /** For a `completion` task, instruct the inference process to perform sampling. + * It has no effect unless `temperature` or `top_p` is specified. */ + do_sample?: float + /** For a `completion` task, provide a hint for the maximum number of output tokens to be generated. */ + max_new_tokens?: integer + /** For a `completion` task, control the apparent creativity of generated completions with a sampling temperature. + * It must be a number in the range of 0.0 to 2.0. + * It should not be used if `top_p` is specified. */ + temperature?: float + /** For a `completion` task, make the model consider the results of the tokens with nucleus sampling probability. + * It is an alternative value to `temperature` and must be a number in the range of 0.0 to 2.0. + * It should not be used if `temperature` is specified. */ + top_p?: float + /** For a `text_embedding` task, specify the user issuing the request. + * This information can be used for abuse detection. */ + user?: string + /** For a `rerank` task, return doc text within the results. */ + return_documents?: boolean + /** For a `rerank` task, the number of most relevant documents to return. + * It defaults to the number of the documents. */ + top_n?: integer +} + +export type InferenceAzureAiStudioTaskType = 'completion' | 'rerank' | 'text_embedding' + +export interface InferenceAzureOpenAIServiceSettings { + /** A valid API key for your Azure OpenAI account. + * You must specify either `api_key` or `entra_id`. + * If you do not provide either or you provide both, you will receive an error when you try to create your model. + * + * IMPORTANT: You need to provide the API key only once, during the inference model creation. + * The get inference endpoint API does not retrieve your API key. + * After creating the inference model, you cannot change the associated API key. + * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ + api_key?: string + /** The Azure API version ID to use. + * It is recommended to use the latest supported non-preview version. */ + api_version: string + /** The deployment name of your deployed models. + * Your Azure OpenAI deployments can be found though the Azure OpenAI Studio portal that is linked to your subscription. */ + deployment_id: string + /** A valid Microsoft Entra token. + * You must specify either `api_key` or `entra_id`. + * If you do not provide either or you provide both, you will receive an error when you try to create your model. */ + entra_id?: string + /** This setting helps to minimize the number of rate limit errors returned from Azure. + * The `azureopenai` service sets a default number of requests allowed per minute depending on the task type. + * For `text_embedding`, it is set to `1440`. + * For `completion`, it is set to `120`. */ + rate_limit?: InferenceRateLimitSetting + /** The name of your Azure OpenAI resource. + * You can find this from the list of resources in the Azure Portal for your subscription. */ + resource_name: string +} + +export type InferenceAzureOpenAIServiceType = 'azureopenai' + +export interface InferenceAzureOpenAITaskSettings { + /** For a `completion` or `text_embedding` task, specify the user issuing the request. + * This information can be used for abuse detection. */ + user?: string +} + +export type InferenceAzureOpenAITaskType = 'completion' | 'text_embedding' + +export type InferenceCohereEmbeddingType = 'binary' | 'bit' | 'byte' | 'float' | 'int8' + +export type InferenceCohereInputType = 'classification' | 'clustering' | 'ingest' | 'search' + +export interface InferenceCohereServiceSettings { + /** A valid API key for your Cohere account. + * You can find or create your Cohere API keys on the Cohere API key settings page. + * + * IMPORTANT: You need to provide the API key only once, during the inference model creation. + * The get inference endpoint API does not retrieve your API key. + * After creating the inference model, you cannot change the associated API key. + * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ + api_key: string + /** For a `text_embedding` task, the types of embeddings you want to get back. + * Use `binary` for binary embeddings, which are encoded as bytes with signed int8 precision. + * Use `bit` for binary embeddings, which are encoded as bytes with signed int8 precision (this is a synonym of `binary`). + * Use `byte` for signed int8 embeddings (this is a synonym of `int8`). + * Use `float` for the default float embeddings. + * Use `int8` for signed int8 embeddings. */ + embedding_type?: InferenceCohereEmbeddingType + /** For a `completion`, `rerank`, or `text_embedding` task, the name of the model to use for the inference task. + * + * * For the available `completion` models, refer to the [Cohere command docs](https://docs.cohere.com/docs/models#command). + * * For the available `rerank` models, refer to the [Cohere rerank docs](https://docs.cohere.com/reference/rerank-1). + * * For the available `text_embedding` models, refer to [Cohere embed docs](https://docs.cohere.com/reference/embed). */ + model_id: string + /** This setting helps to minimize the number of rate limit errors returned from Cohere. + * By default, the `cohere` service sets the number of requests allowed per minute to 10000. */ + rate_limit?: InferenceRateLimitSetting + /** The similarity measure. + * If the `embedding_type` is `float`, the default value is `dot_product`. + * If the `embedding_type` is `int8` or `byte`, the default value is `cosine`. */ + similarity?: InferenceCohereSimilarityType +} + +export type InferenceCohereServiceType = 'cohere' + +export type InferenceCohereSimilarityType = 'cosine' | 'dot_product' | 'l2_norm' + +export interface InferenceCohereTaskSettings { + /** For a `text_embedding` task, the type of input passed to the model. + * Valid values are: + * + * * `classification`: Use it for embeddings passed through a text classifier. + * * `clustering`: Use it for the embeddings run through a clustering algorithm. + * * `ingest`: Use it for storing document embeddings in a vector database. + * * `search`: Use it for storing embeddings of search queries run against a vector database to find relevant documents. + * + * IMPORTANT: The `input_type` field is required when using embedding models `v3` and higher. */ + input_type: InferenceCohereInputType + /** For a `rerank` task, return doc text within the results. */ + return_documents?: boolean + /** For a `rerank` task, the number of most relevant documents to return. + * It defaults to the number of the documents. + * If this inference endpoint is used in a `text_similarity_reranker` retriever query and `top_n` is set, it must be greater than or equal to `rank_window_size` in the query. */ + top_n?: integer + /** For a `text_embedding` task, the method to handle inputs longer than the maximum token length. + * Valid values are: + * + * * `END`: When the input exceeds the maximum input token length, the end of the input is discarded. + * * `NONE`: When the input exceeds the maximum input token length, an error is returned. + * * `START`: When the input exceeds the maximum input token length, the start of the input is discarded. */ + truncate?: InferenceCohereTruncateType +} + +export type InferenceCohereTaskType = 'completion' | 'rerank' | 'text_embedding' + +export type InferenceCohereTruncateType = 'END' | 'NONE' | 'START' + +export interface InferenceCompletionInferenceResult { + completion: InferenceCompletionResult[] +} + +export interface InferenceCompletionResult { + result: string +} + +export interface InferenceCompletionTool { + /** The type of tool. */ + type: string + /** The function definition. */ + function: InferenceCompletionToolFunction +} + +export interface InferenceCompletionToolChoice { + /** The type of the tool. */ + type: string + /** The tool choice function. */ + function: InferenceCompletionToolChoiceFunction +} + +export interface InferenceCompletionToolChoiceFunction { + /** The name of the function to call. */ + name: string +} + +export interface InferenceCompletionToolFunction { + /** A description of what the function does. + * This is used by the model to choose when and how to call the function. */ + description?: string + /** The name of the function. */ + name: string + /** The parameters the functional accepts. This should be formatted as a JSON object. */ + parameters?: any + /** Whether to enable schema adherence when generating the function call. */ + strict?: boolean +} + +export type InferenceCompletionToolType = string | InferenceCompletionToolChoice + +export interface InferenceContentObject { + /** The text content. */ + text: string + /** The type of content. */ + type: string +} + +export interface InferenceContextualAIServiceSettings { + /** A valid API key for your Contexutual AI account. + * + * IMPORTANT: You need to provide the API key only once, during the inference model creation. + * The get inference endpoint API does not retrieve your API key. + * After creating the inference model, you cannot change the associated API key. + * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ + api_key: string + /** The name of the model to use for the inference task. + * Refer to the Contextual AI documentation for the list of available rerank models. */ + model_id: string + /** This setting helps to minimize the number of rate limit errors returned from Contextual AI. + * The `contextualai` service sets a default number of requests allowed per minute depending on the task type. + * For `rerank`, it is set to `1000`. */ + rate_limit?: InferenceRateLimitSetting +} + +export type InferenceContextualAIServiceType = 'contextualai' + +export interface InferenceContextualAITaskSettings { + /** Instructions for the reranking model. Refer to + * Only for the `rerank` task type. */ + instruction?: string + /** Whether to return the source documents in the response. + * Only for the `rerank` task type. */ + return_documents?: boolean + /** The number of most relevant documents to return. + * If not specified, the reranking results of all documents will be returned. + * Only for the `rerank` task type. */ + top_k?: integer +} + +export interface InferenceCustomRequestParams { + /** The body structure of the request. It requires passing in the string-escaped result of the JSON format HTTP request body. + * For example: + * ``` + * "request": "{\"input\":${input}}" + * ``` + * > info + * > The content string needs to be a single line except when using the Kibana console. */ + content: string +} + +export interface InferenceCustomResponseParams { + /** Specifies the JSON parser that is used to parse the response from the custom service. + * Different task types require different json_parser parameters. + * For example: + * ``` + * # text_embedding + * # For a response like this: + * + * { + * "object": "list", + * "data": [ + * { + * "object": "embedding", + * "index": 0, + * "embedding": [ + * 0.014539449, + * -0.015288644 + * ] + * } + * ], + * "model": "text-embedding-ada-002-v2", + * "usage": { + * "prompt_tokens": 8, + * "total_tokens": 8 + * } + * } + * + * # the json_parser definition should look like this: + * + * "response":{ + * "json_parser":{ + * "text_embeddings":"$.data[*].embedding[*]" + * } + * } + * + * # Elasticsearch supports the following embedding types: + * * float + * * byte + * * bit (or binary) + * + * To specify the embedding type for the response, the `embedding_type` + * field should be added in the `json_parser` object. Here's an example: + * "response":{ + * "json_parser":{ + * "text_embeddings":"$.data[*].embedding[*]", + * "embedding_type":"bit" + * } + * } + * + * If `embedding_type` is not specified, it defaults to `float`. + * + * # sparse_embedding + * # For a response like this: + * + * { + * "request_id": "75C50B5B-E79E-4930-****-F48DBB392231", + * "latency": 22, + * "usage": { + * "token_count": 11 + * }, + * "result": { + * "sparse_embeddings": [ + * { + * "index": 0, + * "embedding": [ + * { + * "token_id": 6, + * "weight": 0.101 + * }, + * { + * "token_id": 163040, + * "weight": 0.28417 + * } + * ] + * } + * ] + * } + * } + * + * # the json_parser definition should look like this: + * + * "response":{ + * "json_parser":{ + * "token_path":"$.result.sparse_embeddings[*].embedding[*].token_id", + * "weight_path":"$.result.sparse_embeddings[*].embedding[*].weight" + * } + * } + * + * # rerank + * # For a response like this: + * + * { + * "results": [ + * { + * "index": 3, + * "relevance_score": 0.999071, + * "document": "abc" + * }, + * { + * "index": 4, + * "relevance_score": 0.7867867, + * "document": "123" + * }, + * { + * "index": 0, + * "relevance_score": 0.32713068, + * "document": "super" + * } + * ], + * } + * + * # the json_parser definition should look like this: + * + * "response":{ + * "json_parser":{ + * "reranked_index":"$.result.scores[*].index", // optional + * "relevance_score":"$.result.scores[*].score", + * "document_text":"xxx" // optional + * } + * } + * + * # completion + * # For a response like this: + * + * { + * "id": "chatcmpl-B9MBs8CjcvOU2jLn4n570S5qMJKcT", + * "object": "chat.completion", + * "created": 1741569952, + * "model": "gpt-4.1-2025-04-14", + * "choices": [ + * { + * "index": 0, + * "message": { + * "role": "assistant", + * "content": "Hello! How can I assist you today?", + * "refusal": null, + * "annotations": [] + * }, + * "logprobs": null, + * "finish_reason": "stop" + * } + * ] + * } + * + * # the json_parser definition should look like this: + * + * "response":{ + * "json_parser":{ + * "completion_result":"$.choices[*].message.content" + * } + * } */ + json_parser: any +} + +export interface InferenceCustomServiceSettings { + /** Specifies the batch size used for the semantic_text field. If the field is not provided, the default is 10. + * The batch size is the maximum number of inputs in a single request to the upstream service. + * The chunk within the batch are controlled by the selected chunking strategy for the semantic_text field. */ + batch_size?: integer + /** Specifies the HTTP header parameters – such as `Authentication` or `Content-Type` – that are required to access the custom service. + * For example: + * ``` + * "headers":{ + * "Authorization": "Bearer ${api_key}", + * "Content-Type": "application/json;charset=utf-8" + * } + * ``` */ + headers?: any + /** Specifies the input type translation values that are used to replace the `${input_type}` template in the request body. + * For example: + * ``` + * "input_type": { + * "translation": { + * "ingest": "do_ingest", + * "search": "do_search" + * }, + * "default": "a_default" + * }, + * ``` + * If the subsequent inference requests come from a search context, the `search` key will be used and the template will be replaced with `do_search`. + * If it comes from the ingest context `do_ingest` is used. If it's a different context that is not specified, the default value will be used. If no default is specified an empty string is used. + * `translation` can be: + * * `classification` + * * `clustering` + * * `ingest` + * * `search` */ + input_type?: any + /** Specifies the query parameters as a list of tuples. The arrays inside the `query_parameters` must have two items, a key and a value. + * For example: + * ``` + * "query_parameters":[ + * ["param_key", "some_value"], + * ["param_key", "another_value"], + * ["other_key", "other_value"] + * ] + * ``` + * If the base url is `https://www.elastic.co` it results in: `https://www.elastic.co?param_key=some_value¶m_key=another_value&other_key=other_value`. */ + query_parameters?: any + /** The request configuration object. */ + request: InferenceCustomRequestParams + /** The response configuration object. */ + response: InferenceCustomResponseParams + /** Specifies secret parameters, like `api_key` or `api_token`, that are required to access the custom service. + * For example: + * ``` + * "secret_parameters":{ + * "api_key":"" + * } + * ``` */ + secret_parameters: any + /** The URL endpoint to use for the requests. */ + url?: string +} + +export type InferenceCustomServiceType = 'custom' + +export interface InferenceCustomTaskSettings { + /** Specifies parameters that are required to run the custom service. The parameters depend on the model your custom service uses. + * For example: + * ``` + * "task_settings":{ + * "parameters":{ + * "input_type":"query", + * "return_token":true + * } + * } + * ``` */ + parameters?: any +} + +export type InferenceCustomTaskType = 'text_embedding' | 'sparse_embedding' | 'rerank' | 'completion' + +export interface InferenceDeepSeekServiceSettings { + /** A valid API key for your DeepSeek account. + * You can find or create your DeepSeek API keys on the DeepSeek API key page. + * + * IMPORTANT: You need to provide the API key only once, during the inference model creation. + * The get inference endpoint API does not retrieve your API key. + * After creating the inference model, you cannot change the associated API key. + * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ + api_key: string + /** For a `completion` or `chat_completion` task, the name of the model to use for the inference task. + * + * For the available `completion` and `chat_completion` models, refer to the [DeepSeek Models & Pricing docs](https://api-docs.deepseek.com/quick_start/pricing). */ + model_id: string + /** The URL endpoint to use for the requests. Defaults to `https://api.deepseek.com/chat/completions`. */ + url?: string +} + +export type InferenceDeepSeekServiceType = 'deepseek' + +export interface InferenceDeleteInferenceEndpointResult extends AcknowledgedResponseBase { + pipelines: string[] +} + +export type InferenceDenseByteVector = byte[] + +export type InferenceDenseVector = float[] + +export interface InferenceElasticsearchServiceSettings { + /** Adaptive allocations configuration details. + * If `enabled` is true, the number of allocations of the model is set based on the current load the process gets. + * When the load is high, a new model allocation is automatically created, respecting the value of `max_number_of_allocations` if it's set. + * When the load is low, a model allocation is automatically removed, respecting the value of `min_number_of_allocations` if it's set. + * If `enabled` is true, do not set the number of allocations manually. */ + adaptive_allocations?: InferenceAdaptiveAllocations + /** The deployment identifier for a trained model deployment. + * When `deployment_id` is used the `model_id` is optional. */ + deployment_id?: string + /** The name of the model to use for the inference task. + * It can be the ID of a built-in model (for example, `.multilingual-e5-small` for E5) or a text embedding model that was uploaded by using the Eland client. */ + model_id: string + /** The total number of allocations that are assigned to the model across machine learning nodes. + * Increasing this value generally increases the throughput. + * If adaptive allocations are enabled, do not set this value because it's automatically set. */ + num_allocations?: integer + /** The number of threads used by each model allocation during inference. + * This setting generally increases the speed per inference request. + * The inference process is a compute-bound process; `threads_per_allocations` must not exceed the number of available allocated processors per node. + * The value must be a power of 2. + * The maximum value is 32. */ + num_threads: integer + /** Available only for the `rerank` task type using the Elastic reranker model. + * Controls the strategy used for processing long documents during inference. + * + * Possible values: + * - `truncate` (default): Processes only the beginning of each document. + * - `chunk`: Splits long documents into smaller parts (chunks) before inference. + * + * When `long_document_strategy` is set to `chunk`, Elasticsearch splits each document into smaller parts but still returns a single score per document. + * That score reflects the highest relevance score among all chunks. */ + long_document_strategy?: string + /** Only for the `rerank` task type. + * Limits the number of chunks per document that are sent for inference when chunking is enabled. + * If not set, all chunks generated for the document are processed. */ + max_chunks_per_doc?: integer +} + +export type InferenceElasticsearchServiceType = 'elasticsearch' + +export interface InferenceElasticsearchTaskSettings { + /** For a `rerank` task, return the document instead of only the index. */ + return_documents?: boolean +} + +export type InferenceElasticsearchTaskType = 'rerank' | 'sparse_embedding' | 'text_embedding' + +export interface InferenceElserServiceSettings { + /** Adaptive allocations configuration details. + * If `enabled` is true, the number of allocations of the model is set based on the current load the process gets. + * When the load is high, a new model allocation is automatically created, respecting the value of `max_number_of_allocations` if it's set. + * When the load is low, a model allocation is automatically removed, respecting the value of `min_number_of_allocations` if it's set. + * If `enabled` is true, do not set the number of allocations manually. */ + adaptive_allocations?: InferenceAdaptiveAllocations + /** The total number of allocations this model is assigned across machine learning nodes. + * Increasing this value generally increases the throughput. + * If adaptive allocations is enabled, do not set this value because it's automatically set. */ + num_allocations: integer + /** The number of threads used by each model allocation during inference. + * Increasing this value generally increases the speed per inference request. + * The inference process is a compute-bound process; `threads_per_allocations` must not exceed the number of available allocated processors per node. + * The value must be a power of 2. + * The maximum value is 32. + * + * > info + * > If you want to optimize your ELSER endpoint for ingest, set the number of threads to 1. If you want to optimize your ELSER endpoint for search, set the number of threads to greater than 1. */ + num_threads: integer +} + +export type InferenceElserServiceType = 'elser' + +export type InferenceElserTaskType = 'sparse_embedding' + +export type InferenceGoogleAiServiceType = 'googleaistudio' + +export interface InferenceGoogleAiStudioServiceSettings { + /** A valid API key of your Google Gemini account. */ + api_key: string + /** The name of the model to use for the inference task. + * Refer to the Google documentation for the list of supported models. */ + model_id: string + /** This setting helps to minimize the number of rate limit errors returned from Google AI Studio. + * By default, the `googleaistudio` service sets the number of requests allowed per minute to 360. */ + rate_limit?: InferenceRateLimitSetting +} + +export type InferenceGoogleAiStudioTaskType = 'completion' | 'text_embedding' + +export type InferenceGoogleModelGardenProvider = 'google' | 'anthropic' + +export interface InferenceGoogleVertexAIServiceSettings { + /** The name of the Google Model Garden Provider for `completion` and `chat_completion` tasks. + * In order for a Google Model Garden endpoint to be used `provider` must be defined and be other than `google`. + * Modes: + * - Google Model Garden (third-party models): set `provider` to a supported non-`google` value and provide `url` and/or `streaming_url`. + * - Google Vertex AI: omit `provider` or set it to `google`. In this mode, do not set `url` or `streaming_url` and Elastic will construct the endpoint url from `location`, `model_id`, and `project_id` parameters. */ + provider?: InferenceGoogleModelGardenProvider + /** The URL for non-streaming `completion` requests to a Google Model Garden provider endpoint. + * If both `url` and `streaming_url` are provided, each is used for its respective mode. + * If `streaming_url` is not provided, `url` is also used for streaming `completion` and `chat_completion`. + * If `provider` is not provided or set to `google` (Google Vertex AI), do not set `url` (or `streaming_url`). + * At least one of `url` or `streaming_url` must be provided for Google Model Garden endpoint usage. */ + url?: string + /** The URL for streaming `completion` and `chat_completion` requests to a Google Model Garden provider endpoint. + * If both `streaming_url` and `url` are provided, each is used for its respective mode. + * If `url` is not provided, `streaming_url` is also used for non-streaming `completion` requests. + * If `provider` is not provided or set to `google` (Google Vertex AI), do not set `streaming_url` (or `url`). + * At least one of `streaming_url` or `url` must be provided for Google Model Garden endpoint usage. */ + streaming_url?: string + /** The name of the location to use for the inference task for the Google Vertex AI inference task. + * For Google Vertex AI, when `provider` is omitted or `google` `location` is mandatory. + * For Google Model Garden's `completion` and `chat_completion` tasks, when `provider` is a supported non-`google` value - `location` is ignored. + * Refer to the Google documentation for the list of supported locations. */ + location?: string + /** The name of the model to use for the inference task. + * For Google Vertex AI `model_id` is mandatory. + * For Google Model Garden's `completion` and `chat_completion` tasks, when `provider` is a supported non-`google` value - `model_id` will be used for some providers that require it, otherwise - ignored. + * Refer to the Google documentation for the list of supported models for Google Vertex AI. */ + model_id?: string + /** The name of the project to use for the Google Vertex AI inference task. + * For Google Vertex AI `project_id` is mandatory. + * For Google Model Garden's `completion` and `chat_completion` tasks, when `provider` is a supported non-`google` value - `project_id` is ignored. */ + project_id?: string + /** This setting helps to minimize the number of rate limit errors returned from Google Vertex AI. + * By default, the `googlevertexai` service sets the number of requests allowed per minute to 30.000. */ + rate_limit?: InferenceRateLimitSetting + /** A valid service account in JSON format for the Google Vertex AI API. */ + service_account_json: string + /** For a `text_embedding` task, the number of dimensions the resulting output embeddings should have. + * By default, the model's standard output dimension is used. + * Refer to the Google documentation for more information. */ + dimensions?: integer +} + +export type InferenceGoogleVertexAIServiceType = 'googlevertexai' + +export interface InferenceGoogleVertexAITaskSettings { + /** For a `text_embedding` task, truncate inputs longer than the maximum token length automatically. */ + auto_truncate?: boolean + /** For a `rerank` task, the number of the top N documents that should be returned. */ + top_n?: integer + /** For a `completion` or `chat_completion` task, allows configuration of the thinking features for the model. + * Refer to the Google documentation for the allowable configurations for each model type. */ + thinking_config?: InferenceThinkingConfig + /** For `completion` and `chat_completion` tasks, specifies the `max_tokens` value for requests sent to the Google Model Garden `anthropic` provider. + * If `provider` is not set to `anthropic`, this field is ignored. + * If `max_tokens` is specified - it must be a positive integer. If not specified, the default value of 1024 is used. + * Anthropic models require `max_tokens` to be set for each request. Please refer to the Anthropic documentation for more information. */ + max_tokens?: integer +} + +export type InferenceGoogleVertexAITaskType = 'rerank' | 'text_embedding' | 'completion' | 'chat_completion' + +export interface InferenceHuggingFaceServiceSettings { + /** A valid access token for your HuggingFace account. + * You can create or find your access tokens on the HuggingFace settings page. + * + * IMPORTANT: You need to provide the API key only once, during the inference model creation. + * The get inference endpoint API does not retrieve your API key. + * After creating the inference model, you cannot change the associated API key. + * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ + api_key: string + /** This setting helps to minimize the number of rate limit errors returned from Hugging Face. + * By default, the `hugging_face` service sets the number of requests allowed per minute to 3000 for all supported tasks. + * Hugging Face does not publish a universal rate limit — actual limits may vary. + * It is recommended to adjust this value based on the capacity and limits of your specific deployment environment. */ + rate_limit?: InferenceRateLimitSetting + /** The URL endpoint to use for the requests. + * For `completion` and `chat_completion` tasks, the deployed model must be compatible with the Hugging Face Chat Completion interface (see the linked external documentation for details). The endpoint URL for the request must include `/v1/chat/completions`. + * If the model supports the OpenAI Chat Completion schema, a toggle should appear in the interface. Enabling this toggle doesn't change any model behavior, it reveals the full endpoint URL needed (which should include `/v1/chat/completions`) when configuring the inference endpoint in Elasticsearch. If the model doesn't support this schema, the toggle may not be shown. */ + url: string + /** The name of the HuggingFace model to use for the inference task. + * For `completion` and `chat_completion` tasks, this field is optional but may be required for certain models — particularly when using serverless inference endpoints. + * For the `text_embedding` task, this field should not be included. Otherwise, the request will fail. */ + model_id?: string +} + +export type InferenceHuggingFaceServiceType = 'hugging_face' + +export interface InferenceHuggingFaceTaskSettings { + /** For a `rerank` task, return doc text within the results. */ + return_documents?: boolean + /** For a `rerank` task, the number of most relevant documents to return. + * It defaults to the number of the documents. */ + top_n?: integer +} + +export type InferenceHuggingFaceTaskType = 'chat_completion' | 'completion' | 'rerank' | 'text_embedding' + +export interface InferenceInferenceChunkingSettings { + /** The maximum size of a chunk in words. + * This value cannot be lower than `20` (for `sentence` strategy) or `10` (for `word` strategy). + * This value should not exceed the window size for the associated model. */ + max_chunk_size?: integer + /** The number of overlapping words for chunks. + * It is applicable only to a `word` chunking strategy. + * This value cannot be higher than half the `max_chunk_size` value. */ + overlap?: integer + /** The number of overlapping sentences for chunks. + * It is applicable only for a `sentence` chunking strategy. + * It can be either `1` or `0`. */ + sentence_overlap?: integer + /** Only applicable to the `recursive` strategy and required when using it. + * + * Sets a predefined list of separators in the saved chunking settings based on the selected text type. + * Values can be `markdown` or `plaintext`. + * + * Using this parameter is an alternative to manually specifying a custom `separators` list. */ + separator_group?: string + /** Only applicable to the `recursive` strategy and required when using it. + * + * A list of strings used as possible split points when chunking text. + * + * Each string can be a plain string or a regular expression (regex) pattern. + * The system tries each separator in order to split the text, starting from the first item in the list. + * + * After splitting, it attempts to recombine smaller pieces into larger chunks that stay within + * the `max_chunk_size` limit, to reduce the total number of chunks generated. */ + separators?: string[] + /** The chunking strategy: `sentence`, `word`, `none` or `recursive`. + * + * * If `strategy` is set to `recursive`, you must also specify: + * + * - `max_chunk_size` + * - either `separators` or`separator_group` + * + * Learn more about different chunking strategies in the linked documentation. */ + strategy?: string +} + +export interface InferenceInferenceEndpoint { + /** The chunking configuration object. + * Applies only to the `sparse_embedding` and `text_embedding` task types. + * Not applicable to the `rerank`, `completion`, or `chat_completion` task types. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The service type */ + service: string + /** Settings specific to the service */ + service_settings: InferenceServiceSettings + /** Task settings specific to the service and task type */ + task_settings?: InferenceTaskSettings +} + +export interface InferenceInferenceEndpointInfo extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskType +} + +export interface InferenceInferenceEndpointInfoAi21 extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeAi21 +} + +export interface InferenceInferenceEndpointInfoAlibabaCloudAI extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeAlibabaCloudAI +} + +export interface InferenceInferenceEndpointInfoAmazonBedrock extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeAmazonBedrock +} + +export interface InferenceInferenceEndpointInfoAmazonSageMaker extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeAmazonSageMaker +} + +export interface InferenceInferenceEndpointInfoAnthropic extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeAnthropic +} + +export interface InferenceInferenceEndpointInfoAzureAIStudio extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeAzureAIStudio +} + +export interface InferenceInferenceEndpointInfoAzureOpenAI extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeAzureOpenAI +} + +export interface InferenceInferenceEndpointInfoCohere extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeCohere +} + +export interface InferenceInferenceEndpointInfoContextualAi extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeContextualAI +} + +export interface InferenceInferenceEndpointInfoCustom extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeCustom +} + +export interface InferenceInferenceEndpointInfoDeepSeek extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeDeepSeek +} + +export interface InferenceInferenceEndpointInfoELSER extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeELSER +} + +export interface InferenceInferenceEndpointInfoElasticsearch extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeElasticsearch +} + +export interface InferenceInferenceEndpointInfoGoogleAIStudio extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeGoogleAIStudio +} + +export interface InferenceInferenceEndpointInfoGoogleVertexAI extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeGoogleVertexAI +} + +export interface InferenceInferenceEndpointInfoHuggingFace extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeHuggingFace +} + +export interface InferenceInferenceEndpointInfoJinaAi extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeJinaAi +} + +export interface InferenceInferenceEndpointInfoLlama extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeLlama +} + +export interface InferenceInferenceEndpointInfoMistral extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeMistral +} + +export interface InferenceInferenceEndpointInfoOpenAI extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeOpenAI +} + +export interface InferenceInferenceEndpointInfoVoyageAI extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeVoyageAI +} + +export interface InferenceInferenceEndpointInfoWatsonx extends InferenceInferenceEndpoint { + /** The inference Id */ + inference_id: string + /** The task type */ + task_type: InferenceTaskTypeWatsonx +} + +export interface InferenceInferenceResult { + text_embedding_bytes?: InferenceTextEmbeddingByteResult[] + text_embedding_bits?: InferenceTextEmbeddingByteResult[] + text_embedding?: InferenceTextEmbeddingResult[] + sparse_embedding?: InferenceSparseEmbeddingResult[] + completion?: InferenceCompletionResult[] + rerank?: InferenceRankedDocument[] +} + +export interface InferenceJinaAIServiceSettings { + /** A valid API key of your JinaAI account. + * + * IMPORTANT: You need to provide the API key only once, during the inference model creation. + * The get inference endpoint API does not retrieve your API key. + * After creating the inference model, you cannot change the associated API key. + * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ + api_key: string + /** The name of the model to use for the inference task. + * For a `rerank` task, it is required. + * For a `text_embedding` task, it is optional. */ + model_id?: string + /** This setting helps to minimize the number of rate limit errors returned from JinaAI. + * By default, the `jinaai` service sets the number of requests allowed per minute to 2000 for all task types. */ + rate_limit?: InferenceRateLimitSetting + /** For a `text_embedding` task, the similarity measure. One of cosine, dot_product, l2_norm. + * The default values varies with the embedding type. + * For example, a float embedding type uses a `dot_product` similarity measure by default. */ + similarity?: InferenceJinaAISimilarityType +} + +export type InferenceJinaAIServiceType = 'jinaai' + +export type InferenceJinaAISimilarityType = 'cosine' | 'dot_product' | 'l2_norm' + +export interface InferenceJinaAITaskSettings { + /** For a `rerank` task, return the doc text within the results. */ + return_documents?: boolean + /** For a `text_embedding` task, the task passed to the model. + * Valid values are: + * + * * `classification`: Use it for embeddings passed through a text classifier. + * * `clustering`: Use it for the embeddings run through a clustering algorithm. + * * `ingest`: Use it for storing document embeddings in a vector database. + * * `search`: Use it for storing embeddings of search queries run against a vector database to find relevant documents. */ + task?: InferenceJinaAITextEmbeddingTask + /** For a `rerank` task, the number of most relevant documents to return. + * It defaults to the number of the documents. + * If this inference endpoint is used in a `text_similarity_reranker` retriever query and `top_n` is set, it must be greater than or equal to `rank_window_size` in the query. */ + top_n?: integer +} + +export type InferenceJinaAITaskType = 'rerank' | 'text_embedding' + +export type InferenceJinaAITextEmbeddingTask = 'classification' | 'clustering' | 'ingest' | 'search' + +export interface InferenceLlamaServiceSettings { + /** The URL endpoint of the Llama stack endpoint. + * URL must contain: + * * For `text_embedding` task - `/v1/inference/embeddings`. + * * For `completion` and `chat_completion` tasks - `/v1/openai/v1/chat/completions`. */ + url: string + /** The name of the model to use for the inference task. + * Refer to the Llama downloading models documentation for different ways of getting a list of available models and downloading them. + * Service has been tested and confirmed to be working with the following models: + * * For `text_embedding` task - `all-MiniLM-L6-v2`. + * * For `completion` and `chat_completion` tasks - `llama3.2:3b`. */ + model_id: string + /** For a `text_embedding` task, the maximum number of tokens per input before chunking occurs. */ + max_input_tokens?: integer + /** For a `text_embedding` task, the similarity measure. One of cosine, dot_product, l2_norm. */ + similarity?: InferenceLlamaSimilarityType + /** This setting helps to minimize the number of rate limit errors returned from the Llama API. + * By default, the `llama` service sets the number of requests allowed per minute to 3000. */ + rate_limit?: InferenceRateLimitSetting +} + +export type InferenceLlamaServiceType = 'llama' + +export type InferenceLlamaSimilarityType = 'cosine' | 'dot_product' | 'l2_norm' + +export type InferenceLlamaTaskType = 'text_embedding' | 'completion' | 'chat_completion' + +export interface InferenceMessage { + /** The content of the message. + * + * String example: + * ``` + * { + * "content": "Some string" + * } + * ``` + * + * Object example: + * ``` + * { + * "content": [ + * { + * "text": "Some text", + * "type": "text" + * } + * ] + * } + * ``` */ + content?: InferenceMessageContent + /** The role of the message author. Valid values are `user`, `assistant`, `system`, and `tool`. */ + role: string + /** Only for `tool` role messages. The tool call that this message is responding to. */ + tool_call_id?: Id + /** Only for `assistant` role messages. The tool calls generated by the model. If it's specified, the `content` field is optional. + * Example: + * ``` + * { + * "tool_calls": [ + * { + * "id": "call_KcAjWtAww20AihPHphUh46Gd", + * "type": "function", + * "function": { + * "name": "get_current_weather", + * "arguments": "{\"location\":\"Boston, MA\"}" + * } + * } + * ] + * } + * ``` */ + tool_calls?: InferenceToolCall[] +} + +export type InferenceMessageContent = string | InferenceContentObject[] + +export interface InferenceMistralServiceSettings { + /** A valid API key of your Mistral account. + * You can find your Mistral API keys or you can create a new one on the API Keys page. + * + * IMPORTANT: You need to provide the API key only once, during the inference model creation. + * The get inference endpoint API does not retrieve your API key. + * After creating the inference model, you cannot change the associated API key. + * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ + api_key: string + /** The maximum number of tokens per input before chunking occurs. */ + max_input_tokens?: integer + /** The name of the model to use for the inference task. + * Refer to the Mistral models documentation for the list of available models. */ + model: string + /** This setting helps to minimize the number of rate limit errors returned from the Mistral API. + * By default, the `mistral` service sets the number of requests allowed per minute to 240. */ + rate_limit?: InferenceRateLimitSetting +} + +export type InferenceMistralServiceType = 'mistral' + +export type InferenceMistralTaskType = 'text_embedding' | 'completion' | 'chat_completion' + +export interface InferenceOpenAIServiceSettings { + /** A valid API key of your OpenAI account. + * You can find your OpenAI API keys in your OpenAI account under the API keys section. + * + * IMPORTANT: You need to provide the API key only once, during the inference model creation. + * The get inference endpoint API does not retrieve your API key. + * After creating the inference model, you cannot change the associated API key. + * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ + api_key: string + /** The number of dimensions the resulting output embeddings should have. + * It is supported only in `text-embedding-3` and later models. + * If it is not set, the OpenAI defined default for the model is used. */ + dimensions?: integer + /** The name of the model to use for the inference task. + * Refer to the OpenAI documentation for the list of available text embedding models. */ + model_id: string + /** The unique identifier for your organization. + * You can find the Organization ID in your OpenAI account under *Settings > Organizations*. */ + organization_id?: string + /** This setting helps to minimize the number of rate limit errors returned from OpenAI. + * The `openai` service sets a default number of requests allowed per minute depending on the task type. + * For `text_embedding`, it is set to `3000`. + * For `completion`, it is set to `500`. */ + rate_limit?: InferenceRateLimitSetting + /** The URL endpoint to use for the requests. + * It can be changed for testing purposes. */ + url?: string +} + +export type InferenceOpenAIServiceType = 'openai' + +export interface InferenceOpenAITaskSettings { + /** For a `completion` or `text_embedding` task, specify the user issuing the request. + * This information can be used for abuse detection. */ + user?: string + /** Specifies custom HTTP header parameters. + * For example: + * ``` + * "headers":{ + * "Custom-Header": "Some-Value", + * "Another-Custom-Header": "Another-Value" + * } + * ``` */ + headers?: any +} + +export type InferenceOpenAITaskType = 'chat_completion' | 'completion' | 'text_embedding' + +export interface InferenceRankedDocument { + index: integer + relevance_score: float + text?: string +} + +export interface InferenceRateLimitSetting { + /** The number of requests allowed per minute. + * By default, the number of requests allowed per minute is set by each service as follows: + * + * * `alibabacloud-ai-search` service: `1000` + * * `anthropic` service: `50` + * * `azureaistudio` service: `240` + * * `azureopenai` service and task type `text_embedding`: `1440` + * * `azureopenai` service and task type `completion`: `120` + * * `cohere` service: `10000` + * * `contextualai` service: `1000` + * * `elastic` service and task type `chat_completion`: `240` + * * `googleaistudio` service: `360` + * * `googlevertexai` service: `30000` + * * `hugging_face` service: `3000` + * * `jinaai` service: `2000` + * * `llama` service: `3000` + * * `mistral` service: `240` + * * `openai` service and task type `text_embedding`: `3000` + * * `openai` service and task type `completion`: `500` + * * `voyageai` service: `2000` + * * `watsonxai` service: `120` */ + requests_per_minute?: integer +} + +export interface InferenceRequestChatCompletion { + /** A list of objects representing the conversation. + * Requests should generally only add new messages from the user (role `user`). + * The other message roles (`assistant`, `system`, or `tool`) should generally only be copied from the response to a previous completion request, such that the messages array is built up throughout a conversation. */ + messages: InferenceMessage[] + /** The ID of the model to use. By default, the model ID is set to the value included when creating the inference endpoint. */ + model?: string + /** The upper bound limit for the number of tokens that can be generated for a completion request. */ + max_completion_tokens?: long + /** A sequence of strings to control when the model should stop generating additional tokens. */ + stop?: string[] + /** The sampling temperature to use. */ + temperature?: float + /** Controls which tool is called by the model. + * String representation: One of `auto`, `none`, or `requrired`. `auto` allows the model to choose between calling tools and generating a message. `none` causes the model to not call any tools. `required` forces the model to call one or more tools. + * Example (object representation): + * ``` + * { + * "tool_choice": { + * "type": "function", + * "function": { + * "name": "get_current_weather" + * } + * } + * } + * ``` */ + tool_choice?: InferenceCompletionToolType + /** A list of tools that the model can call. + * Example: + * ``` + * { + * "tools": [ + * { + * "type": "function", + * "function": { + * "name": "get_price_of_item", + * "description": "Get the current price of an item", + * "parameters": { + * "type": "object", + * "properties": { + * "item": { + * "id": "12345" + * }, + * "unit": { + * "type": "currency" + * } + * } + * } + * } + * } + * ] + * } + * ``` */ + tools?: InferenceCompletionTool[] + /** Nucleus sampling, an alternative to sampling with temperature. */ + top_p?: float +} + +export interface InferenceRerankedInferenceResult { + rerank: InferenceRankedDocument[] +} + +export type InferenceServiceSettings = any + +export interface InferenceSparseEmbeddingInferenceResult { + sparse_embedding: InferenceSparseEmbeddingResult[] +} + +export interface InferenceSparseEmbeddingResult { + embedding: InferenceSparseVector +} + +export type InferenceSparseVector = Record + +export type InferenceTaskSettings = any + +export type InferenceTaskType = 'sparse_embedding' | 'text_embedding' | 'rerank' | 'completion' | 'chat_completion' + +export type InferenceTaskTypeAi21 = 'completion' | 'chat_completion' + +export type InferenceTaskTypeAlibabaCloudAI = 'text_embedding' | 'rerank' | 'completion' | 'sparse_embedding' + +export type InferenceTaskTypeAmazonBedrock = 'text_embedding' | 'completion' + +export type InferenceTaskTypeAmazonSageMaker = 'text_embedding' | 'completion' | 'chat_completion' | 'sparse_embedding' | 'rerank' + +export type InferenceTaskTypeAnthropic = 'completion' + +export type InferenceTaskTypeAzureAIStudio = 'text_embedding' | 'completion' | 'rerank' + +export type InferenceTaskTypeAzureOpenAI = 'text_embedding' | 'completion' + +export type InferenceTaskTypeCohere = 'text_embedding' | 'rerank' | 'completion' + +export type InferenceTaskTypeContextualAI = 'rerank' + +export type InferenceTaskTypeCustom = 'text_embedding' | 'sparse_embedding' | 'rerank' | 'completion' + +export type InferenceTaskTypeDeepSeek = 'completion' | 'chat_completion' + +export type InferenceTaskTypeELSER = 'sparse_embedding' + +export type InferenceTaskTypeElasticsearch = 'sparse_embedding' | 'text_embedding' | 'rerank' + +export type InferenceTaskTypeGoogleAIStudio = 'text_embedding' | 'completion' + +export type InferenceTaskTypeGoogleVertexAI = 'chat_completion' | 'completion' | 'text_embedding' | 'rerank' + +export type InferenceTaskTypeHuggingFace = 'chat_completion' | 'completion' | 'rerank' | 'text_embedding' + +export type InferenceTaskTypeJinaAi = 'text_embedding' | 'rerank' + +export type InferenceTaskTypeLlama = 'text_embedding' | 'chat_completion' | 'completion' + +export type InferenceTaskTypeMistral = 'text_embedding' | 'chat_completion' | 'completion' + +export type InferenceTaskTypeOpenAI = 'text_embedding' | 'chat_completion' | 'completion' + +export type InferenceTaskTypeVoyageAI = 'text_embedding' | 'rerank' + +export type InferenceTaskTypeWatsonx = 'text_embedding' | 'chat_completion' | 'completion' + +export interface InferenceTextEmbeddingByteResult { + embedding: InferenceDenseByteVector +} + +export interface InferenceTextEmbeddingInferenceResult { + text_embedding_bytes?: InferenceTextEmbeddingByteResult[] + text_embedding_bits?: InferenceTextEmbeddingByteResult[] + text_embedding?: InferenceTextEmbeddingResult[] +} + +export interface InferenceTextEmbeddingResult { + embedding: InferenceDenseVector +} + +export interface InferenceThinkingConfig { + /** Indicates the desired thinking budget in tokens. */ + thinking_budget?: integer +} + +export interface InferenceToolCall { + /** The identifier of the tool call. */ + id: Id + /** The function that the model called. */ + function: InferenceToolCallFunction + /** The type of the tool call. */ + type: string +} + +export interface InferenceToolCallFunction { + /** The arguments to call the function with in JSON format. */ + arguments: string + /** The name of the function to call. */ + name: string +} + +export interface InferenceVoyageAIServiceSettings { + /** The number of dimensions for resulting output embeddings. + * This setting maps to `output_dimension` in the VoyageAI documentation. + * Only for the `text_embedding` task type. */ + dimensions?: integer + /** The name of the model to use for the inference task. + * Refer to the VoyageAI documentation for the list of available text embedding and rerank models. */ + model_id: string + /** This setting helps to minimize the number of rate limit errors returned from VoyageAI. + * The `voyageai` service sets a default number of requests allowed per minute depending on the task type. + * For both `text_embedding` and `rerank`, it is set to `2000`. */ + rate_limit?: InferenceRateLimitSetting + /** The data type for the embeddings to be returned. + * This setting maps to `output_dtype` in the VoyageAI documentation. + * Permitted values: float, int8, bit. + * `int8` is a synonym of `byte` in the VoyageAI documentation. + * `bit` is a synonym of `binary` in the VoyageAI documentation. + * Only for the `text_embedding` task type. */ + embedding_type?: float +} + +export type InferenceVoyageAIServiceType = 'voyageai' + +export interface InferenceVoyageAITaskSettings { + /** Type of the input text. + * Permitted values: `ingest` (maps to `document` in the VoyageAI documentation), `search` (maps to `query` in the VoyageAI documentation). + * Only for the `text_embedding` task type. */ + input_type?: string + /** Whether to return the source documents in the response. + * Only for the `rerank` task type. */ + return_documents?: boolean + /** The number of most relevant documents to return. + * If not specified, the reranking results of all documents will be returned. + * Only for the `rerank` task type. */ + top_k?: integer + /** Whether to truncate the input texts to fit within the context length. */ + truncation?: boolean +} + +export type InferenceVoyageAITaskType = 'text_embedding' | 'rerank' + +export interface InferenceWatsonxServiceSettings { + /** A valid API key of your Watsonx account. + * You can find your Watsonx API keys or you can create a new one on the API keys page. + * + * IMPORTANT: You need to provide the API key only once, during the inference model creation. + * The get inference endpoint API does not retrieve your API key. + * After creating the inference model, you cannot change the associated API key. + * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ + api_key: string + /** A version parameter that takes a version date in the format of `YYYY-MM-DD`. + * For the active version data parameters, refer to the Wastonx documentation. */ + api_version: string + /** The name of the model to use for the inference task. + * Refer to the IBM Embedding Models section in the Watsonx documentation for the list of available text embedding models. + * Refer to the IBM library - Foundation models in Watsonx.ai. */ + model_id: string + /** The identifier of the IBM Cloud project to use for the inference task. */ + project_id: string + /** This setting helps to minimize the number of rate limit errors returned from Watsonx. + * By default, the `watsonxai` service sets the number of requests allowed per minute to 120. */ + rate_limit?: InferenceRateLimitSetting + /** The URL of the inference endpoint that you created on Watsonx. */ + url: string +} + +export type InferenceWatsonxServiceType = 'watsonxai' + +export type InferenceWatsonxTaskType = 'text_embedding' | 'chat_completion' | 'completion' + +export interface InferenceChatCompletionUnifiedRequest extends RequestBase { + /** The inference Id */ + inference_id: Id + /** Specifies the amount of time to wait for the inference request to complete. */ + timeout?: Duration + chat_completion_request?: InferenceRequestChatCompletion + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { inference_id?: never, timeout?: never, chat_completion_request?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { inference_id?: never, timeout?: never, chat_completion_request?: never } +} + +export type InferenceChatCompletionUnifiedResponse = StreamResult + +export interface InferenceCompletionRequest extends RequestBase { + /** The inference Id */ + inference_id: Id + /** Specifies the amount of time to wait for the inference request to complete. */ + timeout?: Duration + /** Inference input. + * Either a string or an array of strings. */ + input: string | string[] + /** Task settings for the individual inference request. These settings are specific to the you specified and override the task settings specified when initializing the service. */ + task_settings?: InferenceTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { inference_id?: never, timeout?: never, input?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { inference_id?: never, timeout?: never, input?: never, task_settings?: never } +} + +export type InferenceCompletionResponse = InferenceCompletionInferenceResult + +export interface InferenceDeleteRequest extends RequestBase { + /** The task type */ + task_type?: InferenceTaskType + /** The inference identifier. */ + inference_id: Id + /** When true, checks the semantic_text fields and inference processors that reference the endpoint and returns them in a list, but does not delete the endpoint. */ + dry_run?: boolean + /** When true, the inference endpoint is forcefully deleted even if it is still being used by ingest processors or semantic text fields. */ + force?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, inference_id?: never, dry_run?: never, force?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, inference_id?: never, dry_run?: never, force?: never } +} + +export type InferenceDeleteResponse = InferenceDeleteInferenceEndpointResult + +export interface InferenceGetRequest extends RequestBase { + /** The task type */ + task_type?: InferenceTaskType + /** The inference Id */ + inference_id?: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, inference_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, inference_id?: never } +} + +export interface InferenceGetResponse { + endpoints: InferenceInferenceEndpointInfo[] +} + +export interface InferenceInferenceRequest extends RequestBase { + /** The type of inference task that the model performs. */ + task_type?: InferenceTaskType + /** The unique identifier for the inference endpoint. */ + inference_id: Id + /** The amount of time to wait for the inference request to complete. */ + timeout?: Duration + /** The query input, which is required only for the `rerank` task. + * It is not required for other tasks. */ + query?: string + /** The text on which you want to perform the inference task. + * It can be a single string or an array. + * + * > info + * > Inference endpoints for the `completion` task type currently only support a single string as input. */ + input: string | string[] + /** Specifies the input data type for the text embedding model. The `input_type` parameter only applies to Inference Endpoints with the `text_embedding` task type. Possible values include: + * * `SEARCH` + * * `INGEST` + * * `CLASSIFICATION` + * * `CLUSTERING` + * Not all services support all values. Unsupported values will trigger a validation exception. + * Accepted values depend on the configured inference service, refer to the relevant service-specific documentation for more info. + * + * > info + * > The `input_type` parameter specified on the root level of the request body will take precedence over the `input_type` parameter specified in `task_settings`. */ + input_type?: string + /** Task settings for the individual inference request. + * These settings are specific to the task type you specified and override the task settings specified when initializing the service. */ + task_settings?: InferenceTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, inference_id?: never, timeout?: never, query?: never, input?: never, input_type?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, inference_id?: never, timeout?: never, query?: never, input?: never, input_type?: never, task_settings?: never } +} + +export type InferenceInferenceResponse = InferenceInferenceResult + +export interface InferencePutRequest extends RequestBase { + /** The task type. Refer to the integration list in the API description for the available task types. */ + task_type?: InferenceTaskType + /** The inference Id */ + inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration + inference_config?: InferenceInferenceEndpoint + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, inference_id?: never, timeout?: never, inference_config?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, inference_id?: never, timeout?: never, inference_config?: never } +} + +export type InferencePutResponse = InferenceInferenceEndpointInfo + +export interface InferencePutAi21Request extends RequestBase { + /** The type of the inference task that the model will perform. */ + task_type: InferenceAi21TaskType + /** The unique identifier of the inference endpoint. */ + ai21_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration + /** The type of service supported for the specified task type. In this case, `ai21`. */ + service: InferenceAi21ServiceType + /** Settings used to install the inference model. These settings are specific to the `ai21` service. */ + service_settings: InferenceAi21ServiceSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, ai21_inference_id?: never, timeout?: never, service?: never, service_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, ai21_inference_id?: never, timeout?: never, service?: never, service_settings?: never } +} + +export type InferencePutAi21Response = InferenceInferenceEndpointInfoAi21 + +export interface InferencePutAlibabacloudRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ + task_type: InferenceAlibabaCloudTaskType + /** The unique identifier of the inference endpoint. */ + alibabacloud_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration + /** The chunking configuration object. + * Applies only to the `sparse_embedding` or `text_embedding` task types. + * Not applicable to the `rerank` or `completion` task types. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `alibabacloud-ai-search`. */ + service: InferenceAlibabaCloudServiceType + /** Settings used to install the inference model. These settings are specific to the `alibabacloud-ai-search` service. */ + service_settings: InferenceAlibabaCloudServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ + task_settings?: InferenceAlibabaCloudTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, alibabacloud_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, alibabacloud_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } +} + +export type InferencePutAlibabacloudResponse = InferenceInferenceEndpointInfoAlibabaCloudAI + +export interface InferencePutAmazonbedrockRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ + task_type: InferenceAmazonBedrockTaskType + /** The unique identifier of the inference endpoint. */ + amazonbedrock_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration + /** The chunking configuration object. + * Applies only to the `text_embedding` task type. + * Not applicable to the `completion` task type. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `amazonbedrock`. */ + service: InferenceAmazonBedrockServiceType + /** Settings used to install the inference model. These settings are specific to the `amazonbedrock` service. */ + service_settings: InferenceAmazonBedrockServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ + task_settings?: InferenceAmazonBedrockTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, amazonbedrock_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, amazonbedrock_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } +} + +export type InferencePutAmazonbedrockResponse = InferenceInferenceEndpointInfoAmazonBedrock + +export interface InferencePutAmazonsagemakerRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ + task_type: InferenceTaskTypeAmazonSageMaker + /** The unique identifier of the inference endpoint. */ + amazonsagemaker_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration + /** The chunking configuration object. + * Applies only to the `sparse_embedding` or `text_embedding` task types. + * Not applicable to the `rerank`, `completion`, or `chat_completion` task types. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `amazon_sagemaker`. */ + service: InferenceAmazonSageMakerServiceType + /** Settings used to install the inference model. + * These settings are specific to the `amazon_sagemaker` service and `service_settings.api` you specified. */ + service_settings: InferenceAmazonSageMakerServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type and `service_settings.api` you specified. */ + task_settings?: InferenceAmazonSageMakerTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, amazonsagemaker_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, amazonsagemaker_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } +} + +export type InferencePutAmazonsagemakerResponse = InferenceInferenceEndpointInfoAmazonSageMaker + +export interface InferencePutAnthropicRequest extends RequestBase { + /** The task type. + * The only valid task type for the model to perform is `completion`. */ + task_type: InferenceAnthropicTaskType + /** The unique identifier of the inference endpoint. */ + anthropic_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration + /** The type of service supported for the specified task type. In this case, `anthropic`. */ + service: InferenceAnthropicServiceType + /** Settings used to install the inference model. These settings are specific to the `anthropic` service. */ + service_settings: InferenceAnthropicServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ + task_settings?: InferenceAnthropicTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, anthropic_inference_id?: never, timeout?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, anthropic_inference_id?: never, timeout?: never, service?: never, service_settings?: never, task_settings?: never } +} + +export type InferencePutAnthropicResponse = InferenceInferenceEndpointInfoAnthropic + +export interface InferencePutAzureaistudioRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ + task_type: InferenceAzureAiStudioTaskType + /** The unique identifier of the inference endpoint. */ + azureaistudio_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration + /** The chunking configuration object. + * Applies only to the `text_embedding` task type. + * Not applicable to the `rerank` or `completion` task types. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `azureaistudio`. */ + service: InferenceAzureAiStudioServiceType + /** Settings used to install the inference model. These settings are specific to the `azureaistudio` service. */ + service_settings: InferenceAzureAiStudioServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ + task_settings?: InferenceAzureAiStudioTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, azureaistudio_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, azureaistudio_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } +} + +export type InferencePutAzureaistudioResponse = InferenceInferenceEndpointInfoAzureAIStudio + +export interface InferencePutAzureopenaiRequest extends RequestBase { + /** The type of the inference task that the model will perform. + * NOTE: The `chat_completion` task type only supports streaming and only through the _stream API. */ + task_type: InferenceAzureOpenAITaskType + /** The unique identifier of the inference endpoint. */ + azureopenai_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration + /** The chunking configuration object. + * Applies only to the `text_embedding` task type. + * Not applicable to the `completion` task type. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `azureopenai`. */ + service: InferenceAzureOpenAIServiceType + /** Settings used to install the inference model. These settings are specific to the `azureopenai` service. */ + service_settings: InferenceAzureOpenAIServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ + task_settings?: InferenceAzureOpenAITaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, azureopenai_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, azureopenai_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } +} + +export type InferencePutAzureopenaiResponse = InferenceInferenceEndpointInfoAzureOpenAI + +export interface InferencePutCohereRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ + task_type: InferenceCohereTaskType + /** The unique identifier of the inference endpoint. */ + cohere_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration + /** The chunking configuration object. + * Applies only to the `text_embedding` task type. + * Not applicable to the `rerank` or `completion` task type. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `cohere`. */ + service: InferenceCohereServiceType + /** Settings used to install the inference model. + * These settings are specific to the `cohere` service. */ + service_settings: InferenceCohereServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ + task_settings?: InferenceCohereTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, cohere_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, cohere_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } +} + +export type InferencePutCohereResponse = InferenceInferenceEndpointInfoCohere + +export interface InferencePutContextualaiRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ + task_type: InferenceTaskTypeContextualAI + /** The unique identifier of the inference endpoint. */ + contextualai_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration + /** The type of service supported for the specified task type. In this case, `contextualai`. */ + service: InferenceContextualAIServiceType + /** Settings used to install the inference model. These settings are specific to the `contextualai` service. */ + service_settings: InferenceContextualAIServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ + task_settings?: InferenceContextualAITaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, contextualai_inference_id?: never, timeout?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, contextualai_inference_id?: never, timeout?: never, service?: never, service_settings?: never, task_settings?: never } +} + +export type InferencePutContextualaiResponse = InferenceInferenceEndpointInfoContextualAi + +export interface InferencePutCustomRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ + task_type: InferenceCustomTaskType + /** The unique identifier of the inference endpoint. */ + custom_inference_id: Id + /** The chunking configuration object. + * Applies only to the `sparse_embedding` or `text_embedding` task types. + * Not applicable to the `rerank` or `completion` task types. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `custom`. */ + service: InferenceCustomServiceType + /** Settings used to install the inference model. + * These settings are specific to the `custom` service. */ + service_settings: InferenceCustomServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ + task_settings?: InferenceCustomTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, custom_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, custom_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } +} + +export type InferencePutCustomResponse = InferenceInferenceEndpointInfoCustom + +export interface InferencePutDeepseekRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ + task_type: InferenceTaskTypeDeepSeek + /** The unique identifier of the inference endpoint. */ + deepseek_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration + /** The type of service supported for the specified task type. In this case, `deepseek`. */ + service: InferenceDeepSeekServiceType + /** Settings used to install the inference model. + * These settings are specific to the `deepseek` service. */ + service_settings: InferenceDeepSeekServiceSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, deepseek_inference_id?: never, timeout?: never, service?: never, service_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, deepseek_inference_id?: never, timeout?: never, service?: never, service_settings?: never } +} + +export type InferencePutDeepseekResponse = InferenceInferenceEndpointInfoDeepSeek + +export interface InferencePutElasticsearchRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ + task_type: InferenceElasticsearchTaskType + /** The unique identifier of the inference endpoint. + * The must not match the `model_id`. */ + elasticsearch_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration + /** The chunking configuration object. + * Applies only to the `sparse_embedding` and `text_embedding` task types. + * Not applicable to the `rerank` task type. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `elasticsearch`. */ + service: InferenceElasticsearchServiceType + /** Settings used to install the inference model. These settings are specific to the `elasticsearch` service. */ + service_settings: InferenceElasticsearchServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ + task_settings?: InferenceElasticsearchTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, elasticsearch_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, elasticsearch_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } +} + +export type InferencePutElasticsearchResponse = InferenceInferenceEndpointInfoElasticsearch + +export interface InferencePutElserRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ + task_type: InferenceElserTaskType + /** The unique identifier of the inference endpoint. */ + elser_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration + /** The chunking configuration object. + * Note that for ELSER endpoints, the max_chunk_size may not exceed `300`. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `elser`. */ + service: InferenceElserServiceType + /** Settings used to install the inference model. These settings are specific to the `elser` service. */ + service_settings: InferenceElserServiceSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, elser_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, elser_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never } +} + +export type InferencePutElserResponse = InferenceInferenceEndpointInfoELSER + +export interface InferencePutGoogleaistudioRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ + task_type: InferenceGoogleAiStudioTaskType + /** The unique identifier of the inference endpoint. */ + googleaistudio_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration + /** The chunking configuration object. + * Applies only to the `text_embedding` task type. + * Not applicable to the `completion` task type. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `googleaistudio`. */ + service: InferenceGoogleAiServiceType + /** Settings used to install the inference model. These settings are specific to the `googleaistudio` service. */ + service_settings: InferenceGoogleAiStudioServiceSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, googleaistudio_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, googleaistudio_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never } +} + +export type InferencePutGoogleaistudioResponse = InferenceInferenceEndpointInfoGoogleAIStudio + +export interface InferencePutGooglevertexaiRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ + task_type: InferenceGoogleVertexAITaskType + /** The unique identifier of the inference endpoint. */ + googlevertexai_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration + /** The chunking configuration object. + * Applies only to the `text_embedding` task type. + * Not applicable to the `rerank`, `completion`, or `chat_completion` task types. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `googlevertexai`. */ + service: InferenceGoogleVertexAIServiceType + /** Settings used to install the inference model. These settings are specific to the `googlevertexai` service. */ + service_settings: InferenceGoogleVertexAIServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ + task_settings?: InferenceGoogleVertexAITaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, googlevertexai_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, googlevertexai_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } +} + +export type InferencePutGooglevertexaiResponse = InferenceInferenceEndpointInfoGoogleVertexAI + +export interface InferencePutHuggingFaceRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ + task_type: InferenceHuggingFaceTaskType + /** The unique identifier of the inference endpoint. */ + huggingface_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration + /** The chunking configuration object. + * Applies only to the `text_embedding` task type. + * Not applicable to the `rerank`, `completion`, or `chat_completion` task types. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `hugging_face`. */ + service: InferenceHuggingFaceServiceType + /** Settings used to install the inference model. These settings are specific to the `hugging_face` service. */ + service_settings: InferenceHuggingFaceServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ + task_settings?: InferenceHuggingFaceTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, huggingface_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, huggingface_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } +} + +export type InferencePutHuggingFaceResponse = InferenceInferenceEndpointInfoHuggingFace + +export interface InferencePutJinaaiRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ + task_type: InferenceJinaAITaskType + /** The unique identifier of the inference endpoint. */ + jinaai_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration + /** The chunking configuration object. + * Applies only to the `text_embedding` task type. + * Not applicable to the `rerank` task type. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `jinaai`. */ + service: InferenceJinaAIServiceType + /** Settings used to install the inference model. These settings are specific to the `jinaai` service. */ + service_settings: InferenceJinaAIServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ + task_settings?: InferenceJinaAITaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, jinaai_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, jinaai_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } +} + +export type InferencePutJinaaiResponse = InferenceInferenceEndpointInfoJinaAi + +export interface InferencePutLlamaRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ + task_type: InferenceLlamaTaskType + /** The unique identifier of the inference endpoint. */ + llama_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration + /** The chunking configuration object. + * Applies only to the `text_embedding` task type. + * Not applicable to the `completion` or `chat_completion` task types. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `llama`. */ + service: InferenceLlamaServiceType + /** Settings used to install the inference model. These settings are specific to the `llama` service. */ + service_settings: InferenceLlamaServiceSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, llama_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, llama_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never } +} + +export type InferencePutLlamaResponse = InferenceInferenceEndpointInfoLlama + +export interface InferencePutMistralRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ + task_type: InferenceMistralTaskType + /** The unique identifier of the inference endpoint. */ + mistral_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration + /** The chunking configuration object. + * Applies only to the `text_embedding` task type. + * Not applicable to the `completion` or `chat_completion` task types. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `mistral`. */ + service: InferenceMistralServiceType + /** Settings used to install the inference model. These settings are specific to the `mistral` service. */ + service_settings: InferenceMistralServiceSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, mistral_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, mistral_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never } +} + +export type InferencePutMistralResponse = InferenceInferenceEndpointInfoMistral + +export interface InferencePutOpenaiRequest extends RequestBase { + /** The type of the inference task that the model will perform. + * NOTE: The `chat_completion` task type only supports streaming and only through the _stream API. */ + task_type: InferenceOpenAITaskType + /** The unique identifier of the inference endpoint. */ + openai_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration + /** The chunking configuration object. + * Applies only to the `text_embedding` task type. + * Not applicable to the `completion` or `chat_completion` task types. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `openai`. */ + service: InferenceOpenAIServiceType + /** Settings used to install the inference model. These settings are specific to the `openai` service. */ + service_settings: InferenceOpenAIServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ + task_settings?: InferenceOpenAITaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, openai_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, openai_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } +} + +export type InferencePutOpenaiResponse = InferenceInferenceEndpointInfoOpenAI + +export interface InferencePutVoyageaiRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ + task_type: InferenceVoyageAITaskType + /** The unique identifier of the inference endpoint. */ + voyageai_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration + /** The chunking configuration object. + * Applies only to the `text_embedding` task type. + * Not applicable to the `rerank` task type. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `voyageai`. */ + service: InferenceVoyageAIServiceType + /** Settings used to install the inference model. These settings are specific to the `voyageai` service. */ + service_settings: InferenceVoyageAIServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ + task_settings?: InferenceVoyageAITaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, voyageai_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, voyageai_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } +} + +export type InferencePutVoyageaiResponse = InferenceInferenceEndpointInfoVoyageAI + +export interface InferencePutWatsonxRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ + task_type: InferenceWatsonxTaskType + /** The unique identifier of the inference endpoint. */ + watsonx_inference_id: Id + /** Specifies the amount of time to wait for the inference endpoint to be created. */ + timeout?: Duration + /** The chunking configuration object. + * Applies only to the `text_embedding` task type. + * Not applicable to the `completion` or `chat_completion` task types. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `watsonxai`. */ + service: InferenceWatsonxServiceType + /** Settings used to install the inference model. These settings are specific to the `watsonxai` service. */ + service_settings: InferenceWatsonxServiceSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, watsonx_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, watsonx_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never } +} + +export type InferencePutWatsonxResponse = InferenceInferenceEndpointInfoWatsonx + +export interface InferenceRerankRequest extends RequestBase { + /** The unique identifier for the inference endpoint. */ + inference_id: Id + /** The amount of time to wait for the inference request to complete. */ + timeout?: Duration + /** Query input. */ + query: string + /** The documents to rank. */ + input: string[] + /** Include the document text in the response. */ + return_documents?: boolean + /** Limit the response to the top N documents. */ + top_n?: integer + /** Task settings for the individual inference request. + * These settings are specific to the task type you specified and override the task settings specified when initializing the service. */ + task_settings?: InferenceTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { inference_id?: never, timeout?: never, query?: never, input?: never, return_documents?: never, top_n?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { inference_id?: never, timeout?: never, query?: never, input?: never, return_documents?: never, top_n?: never, task_settings?: never } +} + +export type InferenceRerankResponse = InferenceRerankedInferenceResult + +export interface InferenceSparseEmbeddingRequest extends RequestBase { + /** The inference Id */ + inference_id: Id + /** Specifies the amount of time to wait for the inference request to complete. */ + timeout?: Duration + /** Inference input. + * Either a string or an array of strings. */ + input: string | string[] + /** Task settings for the individual inference request. These settings are specific to the you specified and override the task settings specified when initializing the service. */ + task_settings?: InferenceTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { inference_id?: never, timeout?: never, input?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { inference_id?: never, timeout?: never, input?: never, task_settings?: never } +} + +export type InferenceSparseEmbeddingResponse = InferenceSparseEmbeddingInferenceResult + +export interface InferenceStreamCompletionRequest extends RequestBase { + /** The unique identifier for the inference endpoint. */ + inference_id: Id + /** The amount of time to wait for the inference request to complete. */ + timeout?: Duration + /** The text on which you want to perform the inference task. + * It can be a single string or an array. + * + * NOTE: Inference endpoints for the completion task type currently only support a single string as input. */ + input: string | string[] + /** Task settings for the individual inference request. These settings are specific to the you specified and override the task settings specified when initializing the service. */ + task_settings?: InferenceTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { inference_id?: never, timeout?: never, input?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { inference_id?: never, timeout?: never, input?: never, task_settings?: never } +} + +export type InferenceStreamCompletionResponse = StreamResult + +export interface InferenceTextEmbeddingRequest extends RequestBase { + /** The inference Id */ + inference_id: Id + /** Specifies the amount of time to wait for the inference request to complete. */ + timeout?: Duration + /** Inference input. + * Either a string or an array of strings. */ + input: string | string[] + /** The input data type for the text embedding model. Possible values include: + * * `SEARCH` + * * `INGEST` + * * `CLASSIFICATION` + * * `CLUSTERING` + * Not all services support all values. Unsupported values will trigger a validation exception. + * Accepted values depend on the configured inference service, refer to the relevant service-specific documentation for more info. + * + * > info + * > The `input_type` parameter specified on the root level of the request body will take precedence over the `input_type` parameter specified in `task_settings`. */ + input_type?: string + /** Task settings for the individual inference request. These settings are specific to the you specified and override the task settings specified when initializing the service. */ + task_settings?: InferenceTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { inference_id?: never, timeout?: never, input?: never, input_type?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { inference_id?: never, timeout?: never, input?: never, input_type?: never, task_settings?: never } +} + +export type InferenceTextEmbeddingResponse = InferenceTextEmbeddingInferenceResult + +export interface InferenceUpdateRequest extends RequestBase { + /** The unique identifier of the inference endpoint. */ + inference_id: Id + /** The type of inference task that the model performs. */ + task_type?: InferenceTaskType + inference_config?: InferenceInferenceEndpoint + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { inference_id?: never, task_type?: never, inference_config?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { inference_id?: never, task_type?: never, inference_config?: never } +} + +export type InferenceUpdateResponse = InferenceInferenceEndpointInfo + +export interface IngestAppendProcessor extends IngestProcessorBase { + /** The field to be appended to. + * Supports template snippets. */ + field: Field + /** The value to be appended. Supports template snippets. May specify only one of `value` or `copy_from`. */ + value?: any | any[] + /** The origin field which will be appended to `field`, cannot set `value` simultaneously. */ + copy_from?: Field + /** If `false`, the processor does not append values already present in the field. */ + allow_duplicates?: boolean +} + +export interface IngestAttachmentProcessor extends IngestProcessorBase { + /** The field to get the base64 encoded field from. */ + field: Field + /** If `true` and field does not exist, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean + /** The number of chars being used for extraction to prevent huge fields. + * Use `-1` for no limit. */ + indexed_chars?: long + /** Field name from which you can overwrite the number of chars being used for extraction. */ + indexed_chars_field?: Field + /** Array of properties to select to be stored. + * Can be `content`, `title`, `name`, `author`, `keywords`, `date`, `content_type`, `content_length`, `language`. */ + properties?: string[] + /** The field that will hold the attachment information. */ + target_field?: Field + /** If true, the binary field will be removed from the document */ + remove_binary?: boolean + /** Field containing the name of the resource to decode. + * If specified, the processor passes this resource name to the underlying Tika library to enable Resource Name Based Detection. */ + resource_name?: string +} + +export interface IngestBytesProcessor extends IngestProcessorBase { + /** The field to convert. */ + field: Field + /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean + /** The field to assign the converted value to. + * By default, the field is updated in-place. */ + target_field?: Field +} + +export interface IngestCircleProcessor extends IngestProcessorBase { + /** The difference between the resulting inscribed distance from center to side and the circle’s radius (measured in meters for `geo_shape`, unit-less for `shape`). */ + error_distance: double + /** The field to interpret as a circle. Either a string in WKT format or a map for GeoJSON. */ + field: Field + /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean + /** Which field mapping type is to be used when processing the circle: `geo_shape` or `shape`. */ + shape_type: IngestShapeType + /** The field to assign the polygon shape to + * By default, the field is updated in-place. */ + target_field?: Field +} + +export interface IngestCommunityIDProcessor extends IngestProcessorBase { + /** Field containing the source IP address. */ + source_ip?: Field + /** Field containing the source port. */ + source_port?: Field + /** Field containing the destination IP address. */ + destination_ip?: Field + /** Field containing the destination port. */ + destination_port?: Field + /** Field containing the IANA number. */ + iana_number?: Field + /** Field containing the ICMP type. */ + icmp_type?: Field + /** Field containing the ICMP code. */ + icmp_code?: Field + /** Field containing the transport protocol name or number. Used only when the + * iana_number field is not present. The following protocol names are currently + * supported: eigrp, gre, icmp, icmpv6, igmp, ipv6-icmp, ospf, pim, sctp, tcp, udp */ + transport?: Field + /** Output field for the community ID. */ + target_field?: Field + /** Seed for the community ID hash. Must be between 0 and 65535 (inclusive). The + * seed can prevent hash collisions between network domains, such as a staging + * and production network that use the same addressing scheme. */ + seed?: integer + /** If true and any required fields are missing, the processor quietly exits + * without modifying the document. */ + ignore_missing?: boolean +} + +export interface IngestConvertProcessor extends IngestProcessorBase { + /** The field whose value is to be converted. */ + field: Field + /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean + /** The field to assign the converted value to. + * By default, the `field` is updated in-place. */ + target_field?: Field + /** The type to convert the existing value to. */ + type: IngestConvertType +} + +export type IngestConvertType = 'integer' | 'long' | 'double' | 'float' | 'boolean' | 'ip' | 'string' | 'auto' + +export interface IngestCsvProcessor extends IngestProcessorBase { + /** Value used to fill empty fields. + * Empty fields are skipped if this is not provided. + * An empty field is one with no value (2 consecutive separators) or empty quotes (`""`). */ + empty_value?: any + /** The field to extract data from. */ + field: Field + /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean + /** Quote used in CSV, has to be single character string. */ + quote?: string + /** Separator used in CSV, has to be single character string. */ + separator?: string + /** The array of fields to assign extracted values to. */ + target_fields: Fields + /** Trim whitespaces in unquoted fields. */ + trim?: boolean +} + +export interface IngestDatabaseConfiguration { + /** The provider-assigned name of the IP geolocation database to download. */ + name: Name + maxmind?: IngestMaxmind + ipinfo?: IngestIpinfo +} + +export interface IngestDatabaseConfigurationFull { + web?: IngestWeb + local?: IngestLocal + /** The provider-assigned name of the IP geolocation database to download. */ + name: Name + maxmind?: IngestMaxmind + ipinfo?: IngestIpinfo +} + +export interface IngestDateIndexNameProcessor extends IngestProcessorBase { + /** An array of the expected date formats for parsing dates / timestamps in the document being preprocessed. + * Can be a java time pattern or one of the following formats: ISO8601, UNIX, UNIX_MS, or TAI64N. */ + date_formats?: string[] + /** How to round the date when formatting the date into the index name. Valid values are: + * `y` (year), `M` (month), `w` (week), `d` (day), `h` (hour), `m` (minute) and `s` (second). + * Supports template snippets. */ + date_rounding: string + /** The field to get the date or timestamp from. */ + field: Field + /** The format to be used when printing the parsed date into the index name. + * A valid java time pattern is expected here. + * Supports template snippets. */ + index_name_format?: string + /** A prefix of the index name to be prepended before the printed date. + * Supports template snippets. */ + index_name_prefix?: string + /** The locale to use when parsing the date from the document being preprocessed, relevant when parsing month names or week days. */ + locale?: string + /** The timezone to use when parsing the date and when date math index supports resolves expressions into concrete index names. */ + timezone?: string +} + +export interface IngestDateProcessor extends IngestProcessorBase { + /** The field to get the date from. */ + field: Field + /** An array of the expected date formats. + * Can be a java time pattern or one of the following formats: ISO8601, UNIX, UNIX_MS, or TAI64N. */ + formats: string[] + /** The locale to use when parsing the date, relevant when parsing month names or week days. + * Supports template snippets. */ + locale?: string + /** The field that will hold the parsed date. */ + target_field?: Field + /** The timezone to use when parsing the date. + * Supports template snippets. */ + timezone?: string + /** The format to use when writing the date to target_field. Must be a valid + * java time pattern. */ + output_format?: string +} + +export interface IngestDissectProcessor extends IngestProcessorBase { + /** The character(s) that separate the appended fields. */ + append_separator?: string + /** The field to dissect. */ + field: Field + /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean + /** The pattern to apply to the field. */ + pattern: string +} + +export interface IngestDocument { + /** Unique identifier for the document. + * This ID must be unique within the `_index`. */ + _id?: Id + /** Name of the index containing the document. */ + _index?: IndexName + /** JSON body for the document. */ + _source: any +} + +export interface IngestDocumentSimulationKeys { + /** Unique identifier for the document. This ID must be unique within the `_index`. */ + _id: Id + /** Name of the index containing the document. */ + _index: IndexName + _ingest: IngestIngest + /** Value used to send the document to a specific primary shard. */ + _routing?: string + /** JSON body for the document. */ + _source: Record + /** */ + _version?: SpecUtilsStringified + _version_type?: VersionType +} +export type IngestDocumentSimulation = IngestDocumentSimulationKeys +& { [property: string]: string | Id | IndexName | IngestIngest | Record | SpecUtilsStringified | VersionType } + +export interface IngestDotExpanderProcessor extends IngestProcessorBase { + /** The field to expand into an object field. + * If set to `*`, all top-level fields will be expanded. */ + field: Field + /** Controls the behavior when there is already an existing nested object that conflicts with the expanded field. + * When `false`, the processor will merge conflicts by combining the old and the new values into an array. + * When `true`, the value from the expanded field will overwrite the existing value. */ + override?: boolean + /** The field that contains the field to expand. + * Only required if the field to expand is part another object field, because the `field` option can only understand leaf fields. */ + path?: string +} + +export interface IngestDropProcessor extends IngestProcessorBase { +} + +export interface IngestEnrichProcessor extends IngestProcessorBase { + /** The field in the input document that matches the policies match_field used to retrieve the enrichment data. + * Supports template snippets. */ + field: Field + /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean + /** The maximum number of matched documents to include under the configured target field. + * The `target_field` will be turned into a json array if `max_matches` is higher than 1, otherwise `target_field` will become a json object. + * In order to avoid documents getting too large, the maximum allowed value is 128. */ + max_matches?: integer + /** If processor will update fields with pre-existing non-null-valued field. + * When set to `false`, such fields will not be touched. */ + override?: boolean + /** The name of the enrich policy to use. */ + policy_name: string + /** A spatial relation operator used to match the geoshape of incoming documents to documents in the enrich index. + * This option is only used for `geo_match` enrich policy types. */ + shape_relation?: GeoShapeRelation + /** Field added to incoming documents to contain enrich data. This field contains both the `match_field` and `enrich_fields` specified in the enrich policy. + * Supports template snippets. */ + target_field: Field +} + +export interface IngestFailProcessor extends IngestProcessorBase { + /** The error message thrown by the processor. + * Supports template snippets. */ + message: string +} + +export type IngestFieldAccessPattern = 'classic' | 'flexible' + +export type IngestFingerprintDigest = 'MD5' | 'SHA-1' | 'SHA-256' | 'SHA-512' | 'MurmurHash3' + +export interface IngestFingerprintProcessor extends IngestProcessorBase { + /** Array of fields to include in the fingerprint. For objects, the processor + * hashes both the field key and value. For other fields, the processor hashes + * only the field value. */ + fields: Fields + /** Output field for the fingerprint. */ + target_field?: Field + /** Salt value for the hash function. */ + salt?: string + /** The hash method used to compute the fingerprint. Must be one of MD5, SHA-1, + * SHA-256, SHA-512, or MurmurHash3. */ + method?: IngestFingerprintDigest + /** If true, the processor ignores any missing fields. If all fields are + * missing, the processor silently exits without modifying the document. */ + ignore_missing?: boolean +} + +export interface IngestForeachProcessor extends IngestProcessorBase { + /** Field containing array or object values. */ + field: Field + /** If `true`, the processor silently exits without changing the document if the `field` is `null` or missing. */ + ignore_missing?: boolean + /** Ingest processor to run on each element. */ + processor: IngestProcessorContainer +} + +export interface IngestGeoGridProcessor extends IngestProcessorBase { + /** The field to interpret as a geo-tile.= + * The field format is determined by the `tile_type`. */ + field: string + /** Three tile formats are understood: geohash, geotile and geohex. */ + tile_type: IngestGeoGridTileType + /** The field to assign the polygon shape to, by default, the `field` is updated in-place. */ + target_field?: Field + /** If specified and a parent tile exists, save that tile address to this field. */ + parent_field?: Field + /** If specified and children tiles exist, save those tile addresses to this field as an array of strings. */ + children_field?: Field + /** If specified and intersecting non-child tiles exist, save their addresses to this field as an array of strings. */ + non_children_field?: Field + /** If specified, save the tile precision (zoom) as an integer to this field. */ + precision_field?: Field + /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean + /** Which format to save the generated polygon in. */ + target_format?: IngestGeoGridTargetFormat +} + +export type IngestGeoGridTargetFormat = 'geojson' | 'wkt' + +export type IngestGeoGridTileType = 'geotile' | 'geohex' | 'geohash' + +export interface IngestGeoIpProcessor extends IngestProcessorBase { + /** The database filename referring to a database the module ships with (GeoLite2-City.mmdb, GeoLite2-Country.mmdb, or GeoLite2-ASN.mmdb) or a custom database in the ingest-geoip config directory. */ + database_file?: string + /** The field to get the ip address from for the geographical lookup. */ + field: Field + /** If `true`, only the first found geoip data will be returned, even if the field contains an array. */ + first_only?: boolean + /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean + /** Controls what properties are added to the `target_field` based on the geoip lookup. */ + properties?: string[] + /** The field that will hold the geographical information looked up from the MaxMind database. */ + target_field?: Field + /** If `true` (and if `ingest.geoip.downloader.eager.download` is `false`), the missing database is downloaded when the pipeline is created. + * Else, the download is triggered by when the pipeline is used as the `default_pipeline` or `final_pipeline` in an index. */ + download_database_on_pipeline_creation?: boolean +} + +export interface IngestGrokProcessor extends IngestProcessorBase { + /** Must be disabled or v1. If v1, the processor uses patterns with Elastic + * Common Schema (ECS) field names. */ + ecs_compatibility?: string + /** The field to use for grok expression parsing. */ + field: Field + /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean + /** A map of pattern-name and pattern tuples defining custom patterns to be used by the current processor. + * Patterns matching existing names will override the pre-existing definition. */ + pattern_definitions?: Record + /** An ordered list of grok expression to match and extract named captures with. + * Returns on the first expression in the list that matches. */ + patterns: GrokPattern[] + /** When `true`, `_ingest._grok_match_index` will be inserted into your matched document’s metadata with the index into the pattern found in `patterns` that matched. */ + trace_match?: boolean +} + +export interface IngestGsubProcessor extends IngestProcessorBase { + /** The field to apply the replacement to. */ + field: Field + /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean + /** The pattern to be replaced. */ + pattern: string + /** The string to replace the matching patterns with. */ + replacement: string + /** The field to assign the converted value to + * By default, the `field` is updated in-place. */ + target_field?: Field +} + +export interface IngestHtmlStripProcessor extends IngestProcessorBase { + /** The string-valued field to remove HTML tags from. */ + field: Field + /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document, */ + ignore_missing?: boolean + /** The field to assign the converted value to + * By default, the `field` is updated in-place. */ + target_field?: Field +} + +export interface IngestInferenceConfig { + /** Regression configuration for inference. */ + regression?: IngestInferenceConfigRegression + /** Classification configuration for inference. */ + classification?: IngestInferenceConfigClassification +} + +export interface IngestInferenceConfigClassification { + /** Specifies the number of top class predictions to return. */ + num_top_classes?: integer + /** Specifies the maximum number of feature importance values per document. */ + num_top_feature_importance_values?: integer + /** The field that is added to incoming documents to contain the inference prediction. */ + results_field?: Field + /** Specifies the field to which the top classes are written. */ + top_classes_results_field?: Field + /** Specifies the type of the predicted field to write. + * Valid values are: `string`, `number`, `boolean`. */ + prediction_field_type?: string +} + +export interface IngestInferenceConfigRegression { + /** The field that is added to incoming documents to contain the inference prediction. */ + results_field?: Field + /** Specifies the maximum number of feature importance values per document. */ + num_top_feature_importance_values?: integer +} + +export interface IngestInferenceProcessor extends IngestProcessorBase { + /** The ID or alias for the trained model, or the ID of the deployment. */ + model_id: Id + /** Field added to incoming documents to contain results objects. */ + target_field?: Field + /** Maps the document field names to the known field names of the model. + * This mapping takes precedence over any default mappings provided in the model configuration. */ + field_map?: Record + /** Contains the inference type and its options. */ + inference_config?: IngestInferenceConfig + /** Input fields for inference and output (destination) fields for the inference results. + * This option is incompatible with the target_field and field_map options. */ + input_output?: IngestInputConfig | IngestInputConfig[] + /** If true and any of the input fields defined in input_ouput are missing + * then those missing fields are quietly ignored, otherwise a missing field causes a failure. + * Only applies when using input_output configurations to explicitly list the input fields. */ + ignore_missing?: boolean +} + +export interface IngestIngest { + _redact?: IngestRedact + timestamp: DateTime + pipeline?: Name +} + +export interface IngestInputConfig { + input_field: string + output_field: string +} + +export interface IngestIpLocationProcessor extends IngestProcessorBase { + /** The database filename referring to a database the module ships with (GeoLite2-City.mmdb, GeoLite2-Country.mmdb, or GeoLite2-ASN.mmdb) or a custom database in the ingest-geoip config directory. */ + database_file?: string + /** The field to get the ip address from for the geographical lookup. */ + field: Field + /** If `true`, only the first found IP location data will be returned, even if the field contains an array. */ + first_only?: boolean + /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean + /** Controls what properties are added to the `target_field` based on the IP location lookup. */ + properties?: string[] + /** The field that will hold the geographical information looked up from the MaxMind database. */ + target_field?: Field + /** If `true` (and if `ingest.geoip.downloader.eager.download` is `false`), the missing database is downloaded when the pipeline is created. + * Else, the download is triggered by when the pipeline is used as the `default_pipeline` or `final_pipeline` in an index. */ + download_database_on_pipeline_creation?: boolean +} + +export interface IngestIpinfo { +} + +export interface IngestJoinProcessor extends IngestProcessorBase { + /** Field containing array values to join. */ + field: Field + /** The separator character. */ + separator: string + /** The field to assign the joined value to. + * By default, the field is updated in-place. */ + target_field?: Field +} + +export interface IngestJsonProcessor extends IngestProcessorBase { + /** Flag that forces the parsed JSON to be added at the top level of the document. + * `target_field` must not be set when this option is chosen. */ + add_to_root?: boolean + /** When set to `replace`, root fields that conflict with fields from the parsed JSON will be overridden. + * When set to `merge`, conflicting fields will be merged. + * Only applicable `if add_to_root` is set to true. */ + add_to_root_conflict_strategy?: IngestJsonProcessorConflictStrategy + /** When set to `true`, the JSON parser will not fail if the JSON contains duplicate keys. + * Instead, the last encountered value for any duplicate key wins. */ + allow_duplicate_keys?: boolean + /** The field to be parsed. */ + field: Field + /** The field that the converted structured object will be written into. + * Any existing content in this field will be overwritten. */ + target_field?: Field +} + +export type IngestJsonProcessorConflictStrategy = 'replace' | 'merge' + +export interface IngestKeyValueProcessor extends IngestProcessorBase { + /** List of keys to exclude from document. */ + exclude_keys?: string[] + /** The field to be parsed. + * Supports template snippets. */ + field: Field + /** Regex pattern to use for splitting key-value pairs. */ + field_split: string + /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean + /** List of keys to filter and insert into document. + * Defaults to including all keys. */ + include_keys?: string[] + /** Prefix to be added to extracted keys. */ + prefix?: string + /** If `true`. strip brackets `()`, `<>`, `[]` as well as quotes `'` and `"` from extracted values. */ + strip_brackets?: boolean + /** The field to insert the extracted keys into. + * Defaults to the root of the document. + * Supports template snippets. */ + target_field?: Field + /** String of characters to trim from extracted keys. */ + trim_key?: string + /** String of characters to trim from extracted values. */ + trim_value?: string + /** Regex pattern to use for splitting the key from the value within a key-value pair. */ + value_split: string +} + +export interface IngestLocal { + type: string +} + +export interface IngestLowercaseProcessor extends IngestProcessorBase { + /** The field to make lowercase. */ + field: Field + /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean + /** The field to assign the converted value to. + * By default, the field is updated in-place. */ + target_field?: Field +} + +export interface IngestMaxmind { + account_id: Id +} + +export interface IngestNetworkDirectionProcessor extends IngestProcessorBase { + /** Field containing the source IP address. */ + source_ip?: Field + /** Field containing the destination IP address. */ + destination_ip?: Field + /** Output field for the network direction. */ + target_field?: Field + /** List of internal networks. Supports IPv4 and IPv6 addresses and ranges in + * CIDR notation. Also supports the named ranges listed below. These may be + * constructed with template snippets. Must specify only one of + * internal_networks or internal_networks_field. */ + internal_networks?: string[] + /** A field on the given document to read the internal_networks configuration + * from. */ + internal_networks_field?: Field + /** If true and any required fields are missing, the processor quietly exits + * without modifying the document. */ + ignore_missing?: boolean +} + +export interface IngestPipeline { + /** Description of the ingest pipeline. */ + description?: string + /** Processors to run immediately after a processor failure. */ + on_failure?: IngestProcessorContainer[] + /** Processors used to perform transformations on documents before indexing. + * Processors run sequentially in the order specified. */ + processors?: IngestProcessorContainer[] + /** Version number used by external systems to track ingest pipelines. */ + version?: VersionNumber + /** Marks this ingest pipeline as deprecated. + * When a deprecated ingest pipeline is referenced as the default or final pipeline when creating or updating a non-deprecated index template, Elasticsearch will emit a deprecation warning. */ + deprecated?: boolean + /** Arbitrary metadata about the ingest pipeline. This map is not automatically generated by Elasticsearch. */ + _meta?: Metadata + /** Date and time when the pipeline was created. Only returned if the `human` query parameter is `true`. */ + created_date?: DateTime + /** Date and time when the pipeline was created, in milliseconds since the epoch. */ + created_date_millis?: EpochTime + /** Date and time when the pipeline was last modified. Only returned if the `human` query parameter is `true`. */ + modified_date?: DateTime + /** Date and time when the pipeline was last modified, in milliseconds since the epoch. */ + modified_date_millis?: EpochTime + /** Controls how processors in this pipeline should read and write data on a document's source. */ + field_access_pattern?: IngestFieldAccessPattern +} + +export interface IngestPipelineConfig { + /** Description of the ingest pipeline. */ + description?: string + /** Version number used by external systems to track ingest pipelines. */ + version?: VersionNumber + /** Processors used to perform transformations on documents before indexing. + * Processors run sequentially in the order specified. */ + processors: IngestProcessorContainer[] +} + +export interface IngestPipelineProcessor extends IngestProcessorBase { + /** The name of the pipeline to execute. + * Supports template snippets. */ + name: Name + /** Whether to ignore missing pipelines instead of failing. */ + ignore_missing_pipeline?: boolean +} + +export interface IngestPipelineProcessorResult { + doc?: IngestDocumentSimulation + tag?: string + processor_type?: string + status?: IngestPipelineSimulationStatusOptions + description?: string + ignored_error?: ErrorCause + error?: ErrorCause +} + +export type IngestPipelineSimulationStatusOptions = 'success' | 'error' | 'error_ignored' | 'skipped' | 'dropped' + +export interface IngestProcessorBase { + /** Description of the processor. + * Useful for describing the purpose of the processor or its configuration. */ + description?: string + /** Conditionally execute the processor. */ + if?: Script | ScriptSource + /** Ignore failures for the processor. */ + ignore_failure?: boolean + /** Handle failures for the processor. */ + on_failure?: IngestProcessorContainer[] + /** Identifier for the processor. + * Useful for debugging and metrics. */ + tag?: string +} + +export interface IngestProcessorContainer { + /** Appends one or more values to an existing array if the field already exists and it is an array. + * Converts a scalar to an array and appends one or more values to it if the field exists and it is a scalar. + * Creates an array containing the provided values if the field doesn’t exist. + * Accepts a single value or an array of values. */ + append?: IngestAppendProcessor + /** The attachment processor lets Elasticsearch extract file attachments in common formats (such as PPT, XLS, and PDF) by using the Apache text extraction library Tika. */ + attachment?: IngestAttachmentProcessor + /** Converts a human readable byte value (for example `1kb`) to its value in bytes (for example `1024`). + * If the field is an array of strings, all members of the array will be converted. + * Supported human readable units are "b", "kb", "mb", "gb", "tb", "pb" case insensitive. + * An error will occur if the field is not a supported format or resultant value exceeds 2^63. */ + bytes?: IngestBytesProcessor + /** Converts circle definitions of shapes to regular polygons which approximate them. */ + circle?: IngestCircleProcessor + /** Computes the Community ID for network flow data as defined in the + * Community ID Specification. You can use a community ID to correlate network + * events related to a single flow. */ + community_id?: IngestCommunityIDProcessor + /** Converts a field in the currently ingested document to a different type, such as converting a string to an integer. + * If the field value is an array, all members will be converted. */ + convert?: IngestConvertProcessor + /** Extracts fields from CSV line out of a single text field within a document. + * Any empty field in CSV will be skipped. */ + csv?: IngestCsvProcessor + /** Parses dates from fields, and then uses the date or timestamp as the timestamp for the document. */ + date?: IngestDateProcessor + /** The purpose of this processor is to point documents to the right time based index based on a date or timestamp field in a document by using the date math index name support. */ + date_index_name?: IngestDateIndexNameProcessor + /** Extracts structured fields out of a single text field by matching the text field against a delimiter-based pattern. */ + dissect?: IngestDissectProcessor + /** Expands a field with dots into an object field. + * This processor allows fields with dots in the name to be accessible by other processors in the pipeline. + * Otherwise these fields can’t be accessed by any processor. */ + dot_expander?: IngestDotExpanderProcessor + /** Drops the document without raising any errors. + * This is useful to prevent the document from getting indexed based on some condition. */ + drop?: IngestDropProcessor + /** The `enrich` processor can enrich documents with data from another index. */ + enrich?: IngestEnrichProcessor + /** Raises an exception. + * This is useful for when you expect a pipeline to fail and want to relay a specific message to the requester. */ + fail?: IngestFailProcessor + /** Computes a hash of the document’s content. You can use this hash for + * content fingerprinting. */ + fingerprint?: IngestFingerprintProcessor + /** Runs an ingest processor on each element of an array or object. */ + foreach?: IngestForeachProcessor + /** Currently an undocumented alias for GeoIP Processor. */ + ip_location?: IngestIpLocationProcessor + /** Converts geo-grid definitions of grid tiles or cells to regular bounding boxes or polygons which describe their shape. + * This is useful if there is a need to interact with the tile shapes as spatially indexable fields. */ + geo_grid?: IngestGeoGridProcessor + /** The `geoip` processor adds information about the geographical location of an IPv4 or IPv6 address. */ + geoip?: IngestGeoIpProcessor + /** Extracts structured fields out of a single text field within a document. + * You choose which field to extract matched fields from, as well as the grok pattern you expect will match. + * A grok pattern is like a regular expression that supports aliased expressions that can be reused. */ + grok?: IngestGrokProcessor + /** Converts a string field by applying a regular expression and a replacement. + * If the field is an array of string, all members of the array will be converted. + * If any non-string values are encountered, the processor will throw an exception. */ + gsub?: IngestGsubProcessor + /** Removes HTML tags from the field. + * If the field is an array of strings, HTML tags will be removed from all members of the array. */ + html_strip?: IngestHtmlStripProcessor + /** Uses a pre-trained data frame analytics model or a model deployed for natural language processing tasks to infer against the data that is being ingested in the pipeline. */ + inference?: IngestInferenceProcessor + /** Joins each element of an array into a single string using a separator character between each element. + * Throws an error when the field is not an array. */ + join?: IngestJoinProcessor + /** Converts a JSON string into a structured JSON object. */ + json?: IngestJsonProcessor + /** This processor helps automatically parse messages (or specific event fields) which are of the `foo=bar` variety. */ + kv?: IngestKeyValueProcessor + /** Converts a string to its lowercase equivalent. + * If the field is an array of strings, all members of the array will be converted. */ + lowercase?: IngestLowercaseProcessor + /** Calculates the network direction given a source IP address, destination IP + * address, and a list of internal networks. */ + network_direction?: IngestNetworkDirectionProcessor + /** Executes another pipeline. */ + pipeline?: IngestPipelineProcessor + /** The Redact processor uses the Grok rules engine to obscure text in the input document matching the given Grok patterns. + * The processor can be used to obscure Personal Identifying Information (PII) by configuring it to detect known patterns such as email or IP addresses. + * Text that matches a Grok pattern is replaced with a configurable string such as `` where an email address is matched or simply replace all matches with the text `` if preferred. */ + redact?: IngestRedactProcessor + /** Extracts the registered domain (also known as the effective top-level + * domain or eTLD), sub-domain, and top-level domain from a fully qualified + * domain name (FQDN). Uses the registered domains defined in the Mozilla + * Public Suffix List. */ + registered_domain?: IngestRegisteredDomainProcessor + /** Removes existing fields. + * If one field doesn’t exist, an exception will be thrown. */ + remove?: IngestRemoveProcessor + /** Renames an existing field. + * If the field doesn’t exist or the new name is already used, an exception will be thrown. */ + rename?: IngestRenameProcessor + /** Routes a document to another target index or data stream. + * When setting the `destination` option, the target is explicitly specified and the dataset and namespace options can’t be set. + * When the `destination` option is not set, this processor is in a data stream mode. Note that in this mode, the reroute processor can only be used on data streams that follow the data stream naming scheme. */ + reroute?: IngestRerouteProcessor + /** Runs an inline or stored script on incoming documents. + * The script runs in the `ingest` context. */ + script?: IngestScriptProcessor + /** Adds a field with the specified value. + * If the field already exists, its value will be replaced with the provided one. */ + set?: IngestSetProcessor + /** Sets user-related details (such as `username`, `roles`, `email`, `full_name`, `metadata`, `api_key`, `realm` and `authentication_type`) from the current authenticated user to the current document by pre-processing the ingest. */ + set_security_user?: IngestSetSecurityUserProcessor + /** Sorts the elements of an array ascending or descending. + * Homogeneous arrays of numbers will be sorted numerically, while arrays of strings or heterogeneous arrays of strings + numbers will be sorted lexicographically. + * Throws an error when the field is not an array. */ + sort?: IngestSortProcessor + /** Splits a field into an array using a separator character. + * Only works on string fields. */ + split?: IngestSplitProcessor + /** Terminates the current ingest pipeline, causing no further processors to be run. + * This will normally be executed conditionally, using the `if` option. */ + terminate?: IngestTerminateProcessor + /** Trims whitespace from a field. + * If the field is an array of strings, all members of the array will be trimmed. + * This only works on leading and trailing whitespace. */ + trim?: IngestTrimProcessor + /** Converts a string to its uppercase equivalent. + * If the field is an array of strings, all members of the array will be converted. */ + uppercase?: IngestUppercaseProcessor + /** URL-decodes a string. + * If the field is an array of strings, all members of the array will be decoded. */ + urldecode?: IngestUrlDecodeProcessor + /** Parses a Uniform Resource Identifier (URI) string and extracts its components as an object. + * This URI object includes properties for the URI’s domain, path, fragment, port, query, scheme, user info, username, and password. */ + uri_parts?: IngestUriPartsProcessor + /** The `user_agent` processor extracts details from the user agent string a browser sends with its web requests. + * This processor adds this information by default under the `user_agent` field. */ + user_agent?: IngestUserAgentProcessor +} + +export interface IngestRedact { + /** indicates if document has been redacted */ + _is_redacted: boolean +} + +export interface IngestRedactProcessor extends IngestProcessorBase { + /** The field to be redacted */ + field: Field + /** A list of grok expressions to match and redact named captures with */ + patterns: GrokPattern[] + pattern_definitions?: Record + /** Start a redacted section with this token */ + prefix?: string + /** End a redacted section with this token */ + suffix?: string + /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean + /** If `true` and the current license does not support running redact processors, then the processor quietly exits without modifying the document */ + skip_if_unlicensed?: boolean + /** If `true` then ingest metadata `_ingest._redact._is_redacted` is set to `true` if the document has been redacted */ + trace_redact?: boolean +} + +export interface IngestRegisteredDomainProcessor extends IngestProcessorBase { + /** Field containing the source FQDN. */ + field: Field + /** Object field containing extracted domain components. If an empty string, + * the processor adds components to the document’s root. */ + target_field?: Field + /** If true and any required fields are missing, the processor quietly exits + * without modifying the document. */ + ignore_missing?: boolean +} + +export interface IngestRemoveProcessor extends IngestProcessorBase { + /** Fields to be removed. Supports template snippets. */ + field: Fields + /** Fields to be kept. When set, all fields other than those specified are removed. */ + keep?: Fields + /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean +} + +export interface IngestRenameProcessor extends IngestProcessorBase { + /** The field to be renamed. + * Supports template snippets. */ + field: Field + /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean + /** The new name of the field. + * Supports template snippets. */ + target_field: Field +} + +export interface IngestRerouteProcessor extends IngestProcessorBase { + /** A static value for the target. Can’t be set when the dataset or namespace option is set. */ + destination?: string + /** Field references or a static value for the dataset part of the data stream name. + * In addition to the criteria for index names, cannot contain - and must be no longer than 100 characters. + * Example values are nginx.access and nginx.error. + * + * Supports field references with a mustache-like syntax (denoted as {{double}} or {{{triple}}} curly braces). + * When resolving field references, the processor replaces invalid characters with _. Uses the part + * of the index name as a fallback if all field references resolve to a null, missing, or non-string value. + * + * default {{data_stream.dataset}} */ + dataset?: string | string[] + /** Field references or a static value for the namespace part of the data stream name. See the criteria for + * index names for allowed characters. Must be no longer than 100 characters. + * + * Supports field references with a mustache-like syntax (denoted as {{double}} or {{{triple}}} curly braces). + * When resolving field references, the processor replaces invalid characters with _. Uses the part + * of the index name as a fallback if all field references resolve to a null, missing, or non-string value. + * + * default {{data_stream.namespace}} */ + namespace?: string | string[] +} + +export interface IngestScriptProcessor extends IngestProcessorBase { + /** ID of a stored script. + * If no `source` is specified, this parameter is required. */ + id?: Id + /** Script language. */ + lang?: ScriptLanguage + /** Object containing parameters for the script. */ + params?: Record + /** Inline script. + * If no `id` is specified, this parameter is required. */ + source?: ScriptSource +} + +export interface IngestSetProcessor extends IngestProcessorBase { + /** The origin field which will be copied to `field`, cannot set `value` simultaneously. + * Supported data types are `boolean`, `number`, `array`, `object`, `string`, `date`, etc. */ + copy_from?: Field + /** The field to insert, upsert, or update. + * Supports template snippets. */ + field: Field + /** If `true` and `value` is a template snippet that evaluates to `null` or the empty string, the processor quietly exits without modifying the document. */ + ignore_empty_value?: boolean + /** The media type for encoding `value`. + * Applies only when value is a template snippet. + * Must be one of `application/json`, `text/plain`, or `application/x-www-form-urlencoded`. */ + media_type?: string + /** If `true` processor will update fields with pre-existing non-null-valued field. + * When set to `false`, such fields will not be touched. */ + override?: boolean + /** The value to be set for the field. + * Supports template snippets. + * May specify only one of `value` or `copy_from`. */ + value?: any +} + +export interface IngestSetSecurityUserProcessor extends IngestProcessorBase { + /** The field to store the user information into. */ + field: Field + /** Controls what user related properties are added to the field. */ + properties?: string[] +} + +export type IngestShapeType = 'geo_shape' | 'shape' + +export interface IngestSimulateDocumentResult { + doc?: IngestDocumentSimulation + error?: ErrorCause + processor_results?: IngestPipelineProcessorResult[] +} + +export interface IngestSortProcessor extends IngestProcessorBase { + /** The field to be sorted. */ + field: Field + /** The sort order to use. + * Accepts `"asc"` or `"desc"`. */ + order?: SortOrder + /** The field to assign the sorted value to. + * By default, the field is updated in-place. */ + target_field?: Field +} + +export interface IngestSplitProcessor extends IngestProcessorBase { + /** The field to split. */ + field: Field + /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean + /** Preserves empty trailing fields, if any. */ + preserve_trailing?: boolean + /** A regex which matches the separator, for example, `,` or `\s+`. */ + separator: string + /** The field to assign the split value to. + * By default, the field is updated in-place. */ + target_field?: Field +} + +export interface IngestTerminateProcessor extends IngestProcessorBase { +} + +export interface IngestTrimProcessor extends IngestProcessorBase { + /** The string-valued field to trim whitespace from. */ + field: Field + /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean + /** The field to assign the trimmed value to. + * By default, the field is updated in-place. */ + target_field?: Field +} + +export interface IngestUppercaseProcessor extends IngestProcessorBase { + /** The field to make uppercase. */ + field: Field + /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean + /** The field to assign the converted value to. + * By default, the field is updated in-place. */ + target_field?: Field +} + +export interface IngestUriPartsProcessor extends IngestProcessorBase { + /** Field containing the URI string. */ + field: Field + /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean + /** If `true`, the processor copies the unparsed URI to `.original`. */ + keep_original?: boolean + /** If `true`, the processor removes the `field` after parsing the URI string. + * If parsing fails, the processor does not remove the `field`. */ + remove_if_successful?: boolean + /** Output field for the URI object. */ + target_field?: Field +} + +export interface IngestUrlDecodeProcessor extends IngestProcessorBase { + /** The field to decode. */ + field: Field + /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean + /** The field to assign the converted value to. + * By default, the field is updated in-place. */ + target_field?: Field +} + +export interface IngestUserAgentProcessor extends IngestProcessorBase { + /** The field containing the user agent string. */ + field: Field + /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ + ignore_missing?: boolean + /** The name of the file in the `config/ingest-user-agent` directory containing the regular expressions for parsing the user agent string. Both the directory and the file have to be created before starting Elasticsearch. If not specified, ingest-user-agent will use the `regexes.yaml` from uap-core it ships with. */ + regex_file?: string + /** The field that will be filled with the user agent details. */ + target_field?: Field + /** Controls what properties are added to `target_field`. */ + properties?: IngestUserAgentProperty[] + /** Extracts device type from the user agent string on a best-effort basis. + * @beta */ + extract_device_type?: boolean +} + +export type IngestUserAgentProperty = 'name' | 'os' | 'device' | 'original' | 'version' + +export interface IngestWeb { +} + +export interface IngestDeleteGeoipDatabaseRequest extends RequestBase { + /** A comma-separated list of geoip database configurations to delete */ + id: Ids + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never } +} + +export type IngestDeleteGeoipDatabaseResponse = AcknowledgedResponseBase + +export interface IngestDeleteIpLocationDatabaseRequest extends RequestBase { + /** A comma-separated list of IP location database configurations. */ + id: Ids + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. + * A value of `-1` indicates that the request should never time out. */ + master_timeout?: Duration + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. + * A value of `-1` indicates that the request should never time out. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never } +} + +export type IngestDeleteIpLocationDatabaseResponse = AcknowledgedResponseBase + +export interface IngestDeletePipelineRequest extends RequestBase { + /** Pipeline ID or wildcard expression of pipeline IDs used to limit the request. + * To delete all ingest pipelines in a cluster, use a value of `*`. */ + id: Id + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never } +} + +export type IngestDeletePipelineResponse = AcknowledgedResponseBase + +export interface IngestGeoIpStatsGeoIpDownloadStatistics { + /** Total number of successful database downloads. */ + successful_downloads: integer + /** Total number of failed database downloads. */ + failed_downloads: integer + /** Total milliseconds spent downloading databases. */ + total_download_time: DurationValue + /** Current number of databases available for use. */ + databases_count: integer + /** Total number of database updates skipped. */ + skipped_updates: integer + /** Total number of databases not updated after 30 days */ + expired_databases: integer +} + +export interface IngestGeoIpStatsGeoIpNodeDatabaseName { + /** Name of the database. */ + name: Name +} + +export interface IngestGeoIpStatsGeoIpNodeDatabases { + /** Downloaded databases for the node. */ + databases: IngestGeoIpStatsGeoIpNodeDatabaseName[] + /** Downloaded database files, including related license files. Elasticsearch stores these files in the node’s temporary directory: $ES_TMPDIR/geoip-databases/. */ + files_in_temp: string[] +} + +export interface IngestGeoIpStatsRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } +} + +export interface IngestGeoIpStatsResponse { + /** Download statistics for all GeoIP2 databases. */ + stats: IngestGeoIpStatsGeoIpDownloadStatistics + /** Downloaded GeoIP2 databases for each node. */ + nodes: Record +} + +export interface IngestGetGeoipDatabaseDatabaseConfigurationMetadata { + id: Id + version: long + modified_date_millis: EpochTime + database: IngestDatabaseConfiguration +} + +export interface IngestGetGeoipDatabaseRequest extends RequestBase { + /** A comma-separated list of database configuration IDs to retrieve. + * Wildcard (`*`) expressions are supported. + * To get all database configurations, omit this parameter or use `*`. */ + id?: Ids + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } +} + +export interface IngestGetGeoipDatabaseResponse { + databases: IngestGetGeoipDatabaseDatabaseConfigurationMetadata[] +} + +export interface IngestGetIpLocationDatabaseDatabaseConfigurationMetadata { + id: Id + version: VersionNumber + modified_date_millis?: EpochTime + modified_date?: EpochTime + database: IngestDatabaseConfigurationFull +} + +export interface IngestGetIpLocationDatabaseRequest extends RequestBase { + /** Comma-separated list of database configuration IDs to retrieve. + * Wildcard (`*`) expressions are supported. + * To get all database configurations, omit this parameter or use `*`. */ + id?: Ids + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } +} + +export interface IngestGetIpLocationDatabaseResponse { + databases: IngestGetIpLocationDatabaseDatabaseConfigurationMetadata[] +} + +export interface IngestGetPipelineRequest extends RequestBase { + /** Comma-separated list of pipeline IDs to retrieve. + * Wildcard (`*`) expressions are supported. + * To get all ingest pipelines, omit this parameter or use `*`. */ + id?: Id + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** Return pipelines without their definitions (default: false) */ + summary?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, master_timeout?: never, summary?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, master_timeout?: never, summary?: never } +} + +export type IngestGetPipelineResponse = Record + +export interface IngestProcessorGrokRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } +} + +export interface IngestProcessorGrokResponse { + patterns: Record +} + +export interface IngestPutGeoipDatabaseRequest extends RequestBase { + /** ID of the database configuration to create or update. */ + id: Id + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** The provider-assigned name of the IP geolocation database to download. */ + name: Name + /** The configuration necessary to identify which IP geolocation provider to use to download the database, as well as any provider-specific configuration necessary for such downloading. + * At present, the only supported provider is maxmind, and the maxmind provider requires that an account_id (string) is configured. */ + maxmind: IngestMaxmind + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never, name?: never, maxmind?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never, name?: never, maxmind?: never } +} + +export type IngestPutGeoipDatabaseResponse = AcknowledgedResponseBase + +export interface IngestPutIpLocationDatabaseRequest extends RequestBase { + /** The database configuration identifier. */ + id: Id + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. + * A value of `-1` indicates that the request should never time out. */ + master_timeout?: Duration + /** The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. + * If no response is received before the timeout expires, the cluster metadata update still applies but the response indicates that it was not completely acknowledged. + * A value of `-1` indicates that the request should never time out. */ + timeout?: Duration + configuration?: IngestDatabaseConfiguration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never, configuration?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never, configuration?: never } +} + +export type IngestPutIpLocationDatabaseResponse = AcknowledgedResponseBase + +export interface IngestPutPipelineRequest extends RequestBase { + /** ID of the ingest pipeline to create or update. */ + id: Id + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** Required version for optimistic concurrency control for pipeline updates */ + if_version?: VersionNumber + /** Optional metadata about the ingest pipeline. May have any contents. This map is not automatically generated by Elasticsearch. */ + _meta?: Metadata + /** Description of the ingest pipeline. */ + description?: string + /** Processors to run immediately after a processor failure. Each processor supports a processor-level `on_failure` value. If a processor without an `on_failure` value fails, Elasticsearch uses this pipeline-level parameter as a fallback. The processors in this parameter run sequentially in the order specified. Elasticsearch will not attempt to run the pipeline's remaining processors. */ + on_failure?: IngestProcessorContainer[] + /** Processors used to perform transformations on documents before indexing. Processors run sequentially in the order specified. */ + processors?: IngestProcessorContainer[] + /** Version number used by external systems to track ingest pipelines. This parameter is intended for external systems only. Elasticsearch does not use or validate pipeline version numbers. */ + version?: VersionNumber + /** Marks this ingest pipeline as deprecated. + * When a deprecated ingest pipeline is referenced as the default or final pipeline when creating or updating a non-deprecated index template, Elasticsearch will emit a deprecation warning. */ + deprecated?: boolean + /** Controls how processors in this pipeline should read and write data on a document's source. */ + field_access_pattern?: IngestFieldAccessPattern + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never, if_version?: never, _meta?: never, description?: never, on_failure?: never, processors?: never, version?: never, deprecated?: never, field_access_pattern?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never, if_version?: never, _meta?: never, description?: never, on_failure?: never, processors?: never, version?: never, deprecated?: never, field_access_pattern?: never } +} + +export type IngestPutPipelineResponse = AcknowledgedResponseBase + +export interface IngestSimulateRequest extends RequestBase { + /** The pipeline to test. + * If you don't specify a `pipeline` in the request body, this parameter is required. */ + id?: Id + /** If `true`, the response includes output data for each processor in the executed pipeline. */ + verbose?: boolean + /** Sample documents to test in the pipeline. */ + docs: IngestDocument[] + /** The pipeline to test. + * If you don't specify the `pipeline` request path parameter, this parameter is required. + * If you specify both this and the request path parameter, the API only uses the request path parameter. */ + pipeline?: IngestPipeline + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, verbose?: never, docs?: never, pipeline?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, verbose?: never, docs?: never, pipeline?: never } +} + +export interface IngestSimulateResponse { + docs: IngestSimulateDocumentResult[] +} + +export interface LicenseLicense { + expiry_date_in_millis: EpochTime + issue_date_in_millis: EpochTime + start_date_in_millis?: EpochTime + issued_to: string + issuer: string + max_nodes?: long | null + max_resource_units?: long + signature: string + type: LicenseLicenseType + uid: string +} + +export type LicenseLicenseStatus = 'active' | 'valid' | 'invalid' | 'expired' + +export type LicenseLicenseType = 'missing' | 'trial' | 'basic' | 'standard' | 'dev' | 'silver' | 'gold' | 'platinum' | 'enterprise' + +export interface LicenseDeleteRequest extends RequestBase { + /** The period to wait for a connection to the master node. */ + master_timeout?: Duration + /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never, timeout?: never } +} + +export type LicenseDeleteResponse = AcknowledgedResponseBase + +export interface LicenseGetLicenseInformation { + expiry_date?: DateTime + expiry_date_in_millis?: EpochTime + issue_date: DateTime + issue_date_in_millis: EpochTime + issued_to: string + issuer: string + max_nodes: long | null + max_resource_units?: integer | null + status: LicenseLicenseStatus + type: LicenseLicenseType + uid: Uuid + start_date_in_millis: EpochTime +} + +export interface LicenseGetRequest extends RequestBase { + /** If `true`, this parameter returns enterprise for Enterprise license types. If `false`, this parameter returns platinum for both platinum and enterprise license types. This behavior is maintained for backwards compatibility. + * This parameter is deprecated and will always be set to true in 8.x. */ + accept_enterprise?: boolean + /** Specifies whether to retrieve local information. + * From 9.2 onwards the default value is `true`, which means the information is retrieved from the responding node. + * In earlier versions the default is `false`, which means the information is retrieved from the elected master node. */ + local?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { accept_enterprise?: never, local?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { accept_enterprise?: never, local?: never } +} + +export interface LicenseGetResponse { + license: LicenseGetLicenseInformation +} + +export interface LicenseGetBasicStatusRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } +} + +export interface LicenseGetBasicStatusResponse { + eligible_to_start_basic: boolean +} + +export interface LicenseGetTrialStatusRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } +} + +export interface LicenseGetTrialStatusResponse { + eligible_to_start_trial: boolean +} + +export interface LicensePostAcknowledgement { + license: string[] + message: string +} + +export interface LicensePostRequest extends RequestBase { + /** Specifies whether you acknowledge the license changes. */ + acknowledge?: boolean + /** The period to wait for a connection to the master node. */ + master_timeout?: Duration + /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + license?: LicenseLicense + /** A sequence of one or more JSON documents containing the license information. */ + licenses?: LicenseLicense[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { acknowledge?: never, master_timeout?: never, timeout?: never, license?: never, licenses?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { acknowledge?: never, master_timeout?: never, timeout?: never, license?: never, licenses?: never } +} + +export interface LicensePostResponse { + acknowledge?: LicensePostAcknowledgement + acknowledged: boolean + license_status: LicenseLicenseStatus +} + +export interface LicensePostStartBasicRequest extends RequestBase { + /** whether the user has acknowledged acknowledge messages (default: false) */ + acknowledge?: boolean + /** Period to wait for a connection to the master node. */ + master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { acknowledge?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { acknowledge?: never, master_timeout?: never, timeout?: never } +} + +export interface LicensePostStartBasicResponse { + acknowledged: boolean + basic_was_started: boolean + error_message?: string + type?: LicenseLicenseType + acknowledge?: Record +} + +export interface LicensePostStartTrialRequest extends RequestBase { + /** whether the user has acknowledged acknowledge messages (default: false) */ + acknowledge?: boolean + /** The type of trial license to generate (default: "trial") */ + type?: string + /** Period to wait for a connection to the master node. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { acknowledge?: never, type?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { acknowledge?: never, type?: never, master_timeout?: never } +} + +export interface LicensePostStartTrialResponse { + acknowledged: boolean + error_message?: string + trial_was_started: boolean + type?: LicenseLicenseType +} + +export interface LogstashPipeline { + /** A description of the pipeline. + * This description is not used by Elasticsearch or Logstash. */ + description: string + /** The date the pipeline was last updated. + * It must be in the `yyyy-MM-dd'T'HH:mm:ss.SSSZZ` strict_date_time format. */ + last_modified: DateTime + /** The configuration for the pipeline. */ + pipeline: string + /** Optional metadata about the pipeline, which can have any contents. + * This metadata is not generated or used by Elasticsearch or Logstash. */ + pipeline_metadata: LogstashPipelineMetadata + /** Settings for the pipeline. + * It supports only flat keys in dot notation. */ + pipeline_settings: LogstashPipelineSettings + /** The user who last updated the pipeline. */ + username: string +} + +export interface LogstashPipelineMetadata { + type: string + version: string +} + +export interface LogstashPipelineSettings { + /** The number of workers that will, in parallel, execute the filter and output stages of the pipeline. */ + 'pipeline.workers': integer + /** The maximum number of events an individual worker thread will collect from inputs before attempting to execute its filters and outputs. */ + 'pipeline.batch.size': integer + /** When creating pipeline event batches, how long in milliseconds to wait for each event before dispatching an undersized batch to pipeline workers. */ + 'pipeline.batch.delay': integer + /** The internal queuing model to use for event buffering. */ + 'queue.type': string + /** The total capacity of the queue (`queue.type: persisted`) in number of bytes. */ + 'queue.max_bytes': string + /** The maximum number of written events before forcing a checkpoint when persistent queues are enabled (`queue.type: persisted`). */ + 'queue.checkpoint.writes': integer +} + +export interface LogstashDeletePipelineRequest extends RequestBase { + /** An identifier for the pipeline. */ + id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } +} + +export type LogstashDeletePipelineResponse = boolean + +export interface LogstashGetPipelineRequest extends RequestBase { + /** A comma-separated list of pipeline identifiers. */ + id?: Ids + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } +} + +export type LogstashGetPipelineResponse = Record + +export interface LogstashPutPipelineRequest extends RequestBase { + /** An identifier for the pipeline. + * Pipeline IDs must begin with a letter or underscore and contain only letters, underscores, dashes, hyphens and numbers. */ + id: Id + pipeline?: LogstashPipeline + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, pipeline?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, pipeline?: never } +} + +export type LogstashPutPipelineResponse = boolean + +export interface MigrationDeprecationsDeprecation { + /** Optional details about the deprecation warning. */ + details?: string + /** The level property describes the significance of the issue. */ + level: MigrationDeprecationsDeprecationLevel + /** Descriptive information about the deprecation warning. */ + message: string + /** A link to the breaking change documentation, where you can find more information about this change. */ + url: string + resolve_during_rolling_upgrade: boolean + _meta?: Record +} + +export type MigrationDeprecationsDeprecationLevel = 'none' | 'info' | 'warning' | 'critical' + +export interface MigrationDeprecationsRequest extends RequestBase { + /** Comma-separate list of data streams or indices to check. Wildcard (*) expressions are supported. */ + index?: IndexName + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never } +} + +export interface MigrationDeprecationsResponse { + /** Cluster-level deprecation warnings. */ + cluster_settings: MigrationDeprecationsDeprecation[] + /** Index warnings are sectioned off per index and can be filtered using an index-pattern in the query. + * This section includes warnings for the backing indices of data streams specified in the request path. */ + index_settings: Record + data_streams: Record + /** Node-level deprecation warnings. + * Since only a subset of your nodes might incorporate these settings, it is important to read the details section for more information about which nodes are affected. */ + node_settings: MigrationDeprecationsDeprecation[] + /** Machine learning-related deprecation warnings. */ + ml_settings: MigrationDeprecationsDeprecation[] + /** Template warnings are sectioned off per template and include deprecations for both component templates and + * index templates. */ + templates: Record + /** ILM policy warnings are sectioned off per policy. */ + ilm_policies: Record +} + +export interface MigrationGetFeatureUpgradeStatusMigrationFeature { + feature_name: string + minimum_index_version: VersionString + migration_status: MigrationGetFeatureUpgradeStatusMigrationStatus + indices: MigrationGetFeatureUpgradeStatusMigrationFeatureIndexInfo[] +} + +export interface MigrationGetFeatureUpgradeStatusMigrationFeatureIndexInfo { + index: IndexName + version: VersionString + failure_cause?: ErrorCause +} + +export type MigrationGetFeatureUpgradeStatusMigrationStatus = 'NO_MIGRATION_NEEDED' | 'MIGRATION_NEEDED' | 'IN_PROGRESS' | 'ERROR' + +export interface MigrationGetFeatureUpgradeStatusRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } +} + +export interface MigrationGetFeatureUpgradeStatusResponse { + features: MigrationGetFeatureUpgradeStatusMigrationFeature[] + migration_status: MigrationGetFeatureUpgradeStatusMigrationStatus +} + +export interface MigrationPostFeatureUpgradeMigrationFeature { + feature_name: string +} + +export interface MigrationPostFeatureUpgradeRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } +} + +export interface MigrationPostFeatureUpgradeResponse { + accepted: boolean + features?: MigrationPostFeatureUpgradeMigrationFeature[] + reason?: string +} + +export interface MlAdaptiveAllocationsSettings { + /** If true, adaptive_allocations is enabled */ + enabled: boolean + /** Specifies the minimum number of allocations to scale to. + * If set, it must be greater than or equal to 0. + * If not defined, the deployment scales to 0. */ + min_number_of_allocations?: integer + /** Specifies the maximum number of allocations to scale to. + * If set, it must be greater than or equal to min_number_of_allocations. */ + max_number_of_allocations?: integer +} + +export interface MlAnalysisConfig { + /** The size of the interval that the analysis is aggregated into, typically between `5m` and `1h`. This value should be either a whole number of days or equate to a + * whole number of buckets in one day. If the anomaly detection job uses a datafeed with aggregations, this value must also be divisible by the interval of the date histogram aggregation. */ + bucket_span?: Duration + /** If `categorization_field_name` is specified, you can also define the analyzer that is used to interpret the categorization field. This property cannot be used at the same time as `categorization_filters`. The categorization analyzer specifies how the `categorization_field` is interpreted by the categorization process. The `categorization_analyzer` field can be specified either as a string or as an object. If it is a string, it must refer to a built-in analyzer or one added by another plugin. */ + categorization_analyzer?: MlCategorizationAnalyzer + /** If this property is specified, the values of the specified field will be categorized. The resulting categories must be used in a detector by setting `by_field_name`, `over_field_name`, or `partition_field_name` to the keyword `mlcategory`. */ + categorization_field_name?: Field + /** If `categorization_field_name` is specified, you can also define optional filters. This property expects an array of regular expressions. The expressions are used to filter out matching sequences from the categorization field values. You can use this functionality to fine tune the categorization by excluding sequences from consideration when categories are defined. For example, you can exclude SQL statements that appear in your log files. This property cannot be used at the same time as `categorization_analyzer`. If you only want to define simple regular expression filters that are applied prior to tokenization, setting this property is the easiest method. If you also want to customize the tokenizer or post-tokenization filtering, use the `categorization_analyzer` property instead and include the filters as pattern_replace character filters. The effect is exactly the same. */ + categorization_filters?: string[] + /** Detector configuration objects specify which data fields a job analyzes. They also specify which analytical functions are used. You can specify multiple detectors for a job. If the detectors array does not contain at least one detector, no analysis can occur and an error is returned. */ + detectors: MlDetector[] + /** A comma separated list of influencer field names. Typically these can be the by, over, or partition fields that are used in the detector configuration. You might also want to use a field name that is not specifically named in a detector, but is available as part of the input data. When you use multiple detectors, the use of influencers is recommended as it aggregates results for each influencer entity. */ + influencers?: Field[] + /** The size of the window in which to expect data that is out of time order. If you specify a non-zero value, it must be greater than or equal to one second. NOTE: Latency is applicable only when you send data by using the post data API. */ + latency?: Duration + /** Advanced configuration option. Affects the pruning of models that have not been updated for the given time duration. The value must be set to a multiple of the `bucket_span`. If set too low, important information may be removed from the model. For jobs created in 8.1 and later, the default value is the greater of `30d` or 20 times `bucket_span`. */ + model_prune_window?: Duration + /** This functionality is reserved for internal use. It is not supported for use in customer environments and is not subject to the support SLA of official GA features. If set to `true`, the analysis will automatically find correlations between metrics for a given by field value and report anomalies when those correlations cease to hold. For example, suppose CPU and memory usage on host A is usually highly correlated with the same metrics on host B. Perhaps this correlation occurs because they are running a load-balanced application. If you enable this property, anomalies will be reported when, for example, CPU usage on host A is high and the value of CPU usage on host B is low. That is to say, you’ll see an anomaly when the CPU of host A is unusual given the CPU of host B. To use the `multivariate_by_fields` property, you must also specify `by_field_name` in your detector. */ + multivariate_by_fields?: boolean + /** Settings related to how categorization interacts with partition fields. */ + per_partition_categorization?: MlPerPartitionCategorization + /** If this property is specified, the data that is fed to the job is expected to be pre-summarized. This property value is the name of the field that contains the count of raw data points that have been summarized. The same `summary_count_field_name` applies to all detectors in the job. NOTE: The `summary_count_field_name` property cannot be used with the `metric` function. */ + summary_count_field_name?: Field +} + +export interface MlAnalysisConfigRead { + /** The size of the interval that the analysis is aggregated into, typically between `5m` and `1h`. */ + bucket_span: Duration + /** If `categorization_field_name` is specified, you can also define the analyzer that is used to interpret the categorization field. + * This property cannot be used at the same time as `categorization_filters`. + * The categorization analyzer specifies how the `categorization_field` is interpreted by the categorization process. */ + categorization_analyzer?: MlCategorizationAnalyzer + /** If this property is specified, the values of the specified field will be categorized. + * The resulting categories must be used in a detector by setting `by_field_name`, `over_field_name`, or `partition_field_name` to the keyword `mlcategory`. */ + categorization_field_name?: Field + /** If `categorization_field_name` is specified, you can also define optional filters. + * This property expects an array of regular expressions. + * The expressions are used to filter out matching sequences from the categorization field values. */ + categorization_filters?: string[] + /** An array of detector configuration objects. + * Detector configuration objects specify which data fields a job analyzes. + * They also specify which analytical functions are used. + * You can specify multiple detectors for a job. */ + detectors: MlDetectorRead[] + /** A comma separated list of influencer field names. + * Typically these can be the by, over, or partition fields that are used in the detector configuration. + * You might also want to use a field name that is not specifically named in a detector, but is available as part of the input data. + * When you use multiple detectors, the use of influencers is recommended as it aggregates results for each influencer entity. */ + influencers: Field[] + /** Advanced configuration option. + * Affects the pruning of models that have not been updated for the given time duration. + * The value must be set to a multiple of the `bucket_span`. + * If set too low, important information may be removed from the model. + * Typically, set to `30d` or longer. + * If not set, model pruning only occurs if the model memory status reaches the soft limit or the hard limit. + * For jobs created in 8.1 and later, the default value is the greater of `30d` or 20 times `bucket_span`. */ + model_prune_window?: Duration + /** The size of the window in which to expect data that is out of time order. + * Defaults to no latency. + * If you specify a non-zero value, it must be greater than or equal to one second. */ + latency?: Duration + /** This functionality is reserved for internal use. + * It is not supported for use in customer environments and is not subject to the support SLA of official GA features. + * If set to `true`, the analysis will automatically find correlations between metrics for a given by field value and report anomalies when those correlations cease to hold. */ + multivariate_by_fields?: boolean + /** Settings related to how categorization interacts with partition fields. */ + per_partition_categorization?: MlPerPartitionCategorization + /** If this property is specified, the data that is fed to the job is expected to be pre-summarized. + * This property value is the name of the field that contains the count of raw data points that have been summarized. + * The same `summary_count_field_name` applies to all detectors in the job. */ + summary_count_field_name?: Field +} + +export interface MlAnalysisLimits { + /** The maximum number of examples stored per category in memory and in the results data store. If you increase this value, more examples are available, however it requires that you have more storage available. If you set this value to 0, no examples are stored. NOTE: The `categorization_examples_limit` applies only to analysis that uses categorization. */ + categorization_examples_limit?: long + /** The approximate maximum amount of memory resources that are required for analytical processing. Once this limit is approached, data pruning becomes more aggressive. Upon exceeding this limit, new entities are not modeled. If the `xpack.ml.max_model_memory_limit` setting has a value greater than 0 and less than 1024mb, that value is used instead of the default. The default value is relatively small to ensure that high resource usage is a conscious decision. If you have jobs that are expected to analyze high cardinality fields, you will likely need to use a higher value. If you specify a number instead of a string, the units are assumed to be MiB. Specifying a string is recommended for clarity. If you specify a byte size unit of `b` or `kb` and the number does not equate to a discrete number of megabytes, it is rounded down to the closest MiB. The minimum valid value is 1 MiB. If you specify a value less than 1 MiB, an error occurs. If you specify a value for the `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to create jobs that have `model_memory_limit` values greater than that setting value. */ + model_memory_limit?: ByteSize +} + +export interface MlAnalysisMemoryLimit { + /** Limits can be applied for the resources required to hold the mathematical models in memory. These limits are approximate and can be set per job. They do not control the memory used by other processes, for example the Elasticsearch Java processes. */ + model_memory_limit: string +} + +export interface MlAnomaly { + /** The actual value for the bucket. */ + actual?: double[] + /** Information about the factors impacting the initial anomaly score. */ + anomaly_score_explanation?: MlAnomalyExplanation + /** The length of the bucket in seconds. This value matches the `bucket_span` that is specified in the job. */ + bucket_span: DurationValue + /** The field used to split the data. In particular, this property is used for analyzing the splits with respect to their own history. It is used for finding unusual values in the context of the split. */ + by_field_name?: string + /** The value of `by_field_name`. */ + by_field_value?: string + /** For population analysis, an over field must be specified in the detector. This property contains an array of anomaly records that are the causes for the anomaly that has been identified for the over field. This sub-resource contains the most anomalous records for the `over_field_name`. For scalability reasons, a maximum of the 10 most significant causes of the anomaly are returned. As part of the core analytical modeling, these low-level anomaly records are aggregated for their parent over field record. The `causes` resource contains similar elements to the record resource, namely `actual`, `typical`, `geo_results.actual_point`, `geo_results.typical_point`, `*_field_name` and `*_field_value`. Probability and scores are not applicable to causes. */ + causes?: MlAnomalyCause[] + /** A unique identifier for the detector. */ + detector_index: integer + /** Certain functions require a field to operate on, for example, `sum()`. For those functions, this value is the name of the field to be analyzed. */ + field_name?: string + /** The function in which the anomaly occurs, as specified in the detector configuration. For example, `max`. */ + function?: string + /** The description of the function in which the anomaly occurs, as specified in the detector configuration. */ + function_description?: string + /** If the detector function is `lat_long`, this object contains comma delimited strings for the latitude and longitude of the actual and typical values. */ + geo_results?: MlGeoResults + /** If influencers were specified in the detector configuration, this array contains influencers that contributed to or were to blame for an anomaly. */ + influencers?: MlInfluence[] + /** A normalized score between 0-100, which is based on the probability of the anomalousness of this record. This is the initial value that was calculated at the time the bucket was processed. */ + initial_record_score: double + /** If true, this is an interim result. In other words, the results are calculated based on partial input data. */ + is_interim: boolean + /** Identifier for the anomaly detection job. */ + job_id: string + /** The field used to split the data. In particular, this property is used for analyzing the splits with respect to the history of all splits. It is used for finding unusual values in the population of all splits. */ + over_field_name?: string + /** The value of `over_field_name`. */ + over_field_value?: string + /** The field used to segment the analysis. When you use this property, you have completely independent baselines for each value of this field. */ + partition_field_name?: string + /** The value of `partition_field_name`. */ + partition_field_value?: string + /** The probability of the individual anomaly occurring, in the range 0 to 1. For example, `0.0000772031`. This value can be held to a high precision of over 300 decimal places, so the `record_score` is provided as a human-readable and friendly interpretation of this. */ + probability: double + /** A normalized score between 0-100, which is based on the probability of the anomalousness of this record. Unlike `initial_record_score`, this value will be updated by a re-normalization process as new data is analyzed. */ + record_score: double + /** Internal. This is always set to `record`. */ + result_type: string + /** The start time of the bucket for which these results were calculated. */ + timestamp: EpochTime + /** The typical value for the bucket, according to analytical modeling. */ + typical?: double[] +} + +export interface MlAnomalyCause { + actual?: double[] + by_field_name?: Name + by_field_value?: string + correlated_by_field_value?: string + field_name?: Field + function?: string + function_description?: string + geo_results?: MlGeoResults + influencers?: MlInfluence[] + over_field_name?: Name + over_field_value?: string + partition_field_name?: string + partition_field_value?: string + probability: double + typical?: double[] +} + +export interface MlAnomalyExplanation { + /** Impact from the duration and magnitude of the detected anomaly relative to the historical average. */ + anomaly_characteristics_impact?: integer + /** Length of the detected anomaly in the number of buckets. */ + anomaly_length?: integer + /** Type of the detected anomaly: `spike` or `dip`. */ + anomaly_type?: string + /** Indicates reduction of anomaly score for the bucket with large confidence intervals. If a bucket has large confidence intervals, the score is reduced. */ + high_variance_penalty?: boolean + /** If the bucket contains fewer samples than expected, the score is reduced. */ + incomplete_bucket_penalty?: boolean + /** Lower bound of the 95% confidence interval. */ + lower_confidence_bound?: double + /** Impact of the deviation between actual and typical values in the past 12 buckets. */ + multi_bucket_impact?: integer + /** Impact of the deviation between actual and typical values in the current bucket. */ + single_bucket_impact?: integer + /** Typical (expected) value for this bucket. */ + typical_value?: double + /** Upper bound of the 95% confidence interval. */ + upper_confidence_bound?: double +} + +export interface MlApiKeyAuthorization { + /** The identifier for the API key. */ + id: string + /** The name of the API key. */ + name: string +} + +export type MlAppliesTo = 'actual' | 'typical' | 'diff_from_typical' | 'time' + +export interface MlBucketInfluencer { + /** A normalized score between 0-100, which is calculated for each bucket influencer. This score might be updated as + * newer data is analyzed. */ + anomaly_score: double + /** The length of the bucket in seconds. This value matches the bucket span that is specified in the job. */ + bucket_span: DurationValue + /** The field name of the influencer. */ + influencer_field_name: Field + /** The score between 0-100 for each bucket influencer. This score is the initial value that was calculated at the + * time the bucket was processed. */ + initial_anomaly_score: double + /** If true, this is an interim result. In other words, the results are calculated based on partial input data. */ + is_interim: boolean + /** Identifier for the anomaly detection job. */ + job_id: Id + /** The probability that the bucket has this behavior, in the range 0 to 1. This value can be held to a high precision + * of over 300 decimal places, so the `anomaly_score` is provided as a human-readable and friendly interpretation of + * this. */ + probability: double + /** Internal. */ + raw_anomaly_score: double + /** Internal. This value is always set to `bucket_influencer`. */ + result_type: string + /** The start time of the bucket for which these results were calculated. */ + timestamp: EpochTime + /** The start time of the bucket for which these results were calculated. */ + timestamp_string?: DateTime +} + +export interface MlBucketSummary { + /** The maximum anomaly score, between 0-100, for any of the bucket influencers. This is an overall, rate-limited + * score for the job. All the anomaly records in the bucket contribute to this score. This value might be updated as + * new data is analyzed. */ + anomaly_score: double + bucket_influencers: MlBucketInfluencer[] + /** The length of the bucket in seconds. This value matches the bucket span that is specified in the job. */ + bucket_span: DurationValue + /** The number of input data records processed in this bucket. */ + event_count: long + /** The maximum anomaly score for any of the bucket influencers. This is the initial value that was calculated at the + * time the bucket was processed. */ + initial_anomaly_score: double + /** If true, this is an interim result. In other words, the results are calculated based on partial input data. */ + is_interim: boolean + /** Identifier for the anomaly detection job. */ + job_id: Id + /** The amount of time, in milliseconds, that it took to analyze the bucket contents and calculate results. */ + processing_time_ms: DurationValue + /** Internal. This value is always set to bucket. */ + result_type: string + /** The start time of the bucket. This timestamp uniquely identifies the bucket. Events that occur exactly at the + * timestamp of the bucket are included in the results for the bucket. */ + timestamp: EpochTime + /** The start time of the bucket. This timestamp uniquely identifies the bucket. Events that occur exactly at the + * timestamp of the bucket are included in the results for the bucket. */ + timestamp_string?: DateTime +} + +export interface MlCalendarEvent { + /** A string that uniquely identifies a calendar. */ + calendar_id?: Id + event_id?: Id + /** A description of the scheduled event. */ + description: string + /** The timestamp for the end of the scheduled event in milliseconds since the epoch or ISO 8601 format. */ + end_time: DateTime + /** The timestamp for the beginning of the scheduled event in milliseconds since the epoch or ISO 8601 format. */ + start_time: DateTime + /** When true the model will not create results for this calendar period. */ + skip_result?: boolean + /** When true the model will not be updated for this calendar period. */ + skip_model_update?: boolean + /** Shift time by this many seconds. For example adjust time for daylight savings changes */ + force_time_shift?: integer +} + +export type MlCategorizationAnalyzer = string | MlCategorizationAnalyzerDefinition + +export interface MlCategorizationAnalyzerDefinition { + /** One or more character filters. In addition to the built-in character filters, other plugins can provide more character filters. If this property is not specified, no character filters are applied prior to categorization. If you are customizing some other aspect of the analyzer and you need to achieve the equivalent of `categorization_filters` (which are not permitted when some other aspect of the analyzer is customized), add them here as pattern replace character filters. */ + char_filter?: AnalysisCharFilter[] + /** One or more token filters. In addition to the built-in token filters, other plugins can provide more token filters. If this property is not specified, no token filters are applied prior to categorization. */ + filter?: AnalysisTokenFilter[] + /** The name or definition of the tokenizer to use after character filters are applied. This property is compulsory if `categorization_analyzer` is specified as an object. Machine learning provides a tokenizer called `ml_standard` that tokenizes in a way that has been determined to produce good categorization results on a variety of log file formats for logs in English. If you want to use that tokenizer but change the character or token filters, specify "tokenizer": "ml_standard" in your `categorization_analyzer`. Additionally, the `ml_classic` tokenizer is available, which tokenizes in the same way as the non-customizable tokenizer in old versions of the product (before 6.2). `ml_classic` was the default categorization tokenizer in versions 6.2 to 7.13, so if you need categorization identical to the default for jobs created in these versions, specify "tokenizer": "ml_classic" in your `categorization_analyzer`. */ + tokenizer?: AnalysisTokenizer +} + +export type MlCategorizationStatus = 'ok' | 'warn' + +export interface MlCategory { + /** A unique identifier for the category. category_id is unique at the job level, even when per-partition categorization is enabled. */ + category_id: ulong + /** A list of examples of actual values that matched the category. */ + examples: string[] + /** [experimental] A Grok pattern that could be used in Logstash or an ingest pipeline to extract fields from messages that match the category. This field is experimental and may be changed or removed in a future release. The Grok patterns that are found are not optimal, but are often a good starting point for manual tweaking. */ + grok_pattern?: GrokPattern + /** Identifier for the anomaly detection job. */ + job_id: Id + /** The maximum length of the fields that matched the category. The value is increased by 10% to enable matching for similar fields that have not been analyzed. */ + max_matching_length: ulong + /** If per-partition categorization is enabled, this property identifies the field used to segment the categorization. It is not present when per-partition categorization is disabled. */ + partition_field_name?: string + /** If per-partition categorization is enabled, this property identifies the value of the partition_field_name for the category. It is not present when per-partition categorization is disabled. */ + partition_field_value?: string + /** A regular expression that is used to search for values that match the category. */ + regex: string + /** A space separated list of the common tokens that are matched in values of the category. */ + terms: string + /** The number of messages that have been matched by this category. This is only guaranteed to have the latest accurate count after a job _flush or _close */ + num_matches?: long + /** A list of category_id entries that this current category encompasses. Any new message that is processed by the categorizer will match against this category and not any of the categories in this list. This is only guaranteed to have the latest accurate list of categories after a job _flush or _close */ + preferred_to_categories?: Id[] + p?: string + result_type: string + mlcategory: string +} + +export interface MlChunkingConfig { + /** If the mode is `auto`, the chunk size is dynamically calculated; + * this is the recommended value when the datafeed does not use aggregations. + * If the mode is `manual`, chunking is applied according to the specified `time_span`; + * use this mode when the datafeed uses aggregations. If the mode is `off`, no chunking is applied. */ + mode: MlChunkingMode + /** The time span that each search will be querying. This setting is applicable only when the `mode` is set to `manual`. */ + time_span?: Duration +} + +export type MlChunkingMode = 'auto' | 'manual' | 'off' + +export interface MlClassificationInferenceOptions { + /** Specifies the number of top class predictions to return. Defaults to 0. */ + num_top_classes?: integer + /** Specifies the maximum number of feature importance values per document. */ + num_top_feature_importance_values?: integer + /** Specifies the type of the predicted field to write. Acceptable values are: string, number, boolean. When boolean is provided 1.0 is transformed to true and 0.0 to false. */ + prediction_field_type?: string + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ + results_field?: string + /** Specifies the field to which the top classes are written. Defaults to top_classes. */ + top_classes_results_field?: string +} + +export interface MlCommonTokenizationConfig { + /** Should the tokenizer lower case the text */ + do_lower_case?: boolean + /** Maximum input sequence length for the model */ + max_sequence_length?: integer + /** Tokenization spanning options. Special value of -1 indicates no spanning takes place */ + span?: integer + /** Should tokenization input be automatically truncated before sending to the model for inference */ + truncate?: MlTokenizationTruncate + /** Is tokenization completed with special tokens */ + with_special_tokens?: boolean +} + +export type MlConditionOperator = 'gt' | 'gte' | 'lt' | 'lte' + +export type MlCustomSettings = any + +export interface MlDataCounts { + bucket_count: long + earliest_record_timestamp?: long + empty_bucket_count: long + input_bytes: long + input_field_count: long + input_record_count: long + invalid_date_count: long + job_id: Id + last_data_time?: long + latest_empty_bucket_timestamp?: long + latest_record_timestamp?: long + latest_sparse_bucket_timestamp?: long + latest_bucket_timestamp?: long + log_time?: long + missing_field_count: long + out_of_order_timestamp_count: long + processed_field_count: long + processed_record_count: long + sparse_bucket_count: long +} + +export interface MlDataDescription { + /** Only JSON format is supported at this time. */ + format?: string + /** The name of the field that contains the timestamp. */ + time_field?: Field + /** The time format, which can be `epoch`, `epoch_ms`, or a custom pattern. The value `epoch` refers to UNIX or Epoch time (the number of seconds since 1 Jan 1970). The value `epoch_ms` indicates that time is measured in milliseconds since the epoch. The `epoch` and `epoch_ms` time formats accept either integer or real values. Custom patterns must conform to the Java DateTimeFormatter class. When you use date-time formatting patterns, it is recommended that you provide the full date, time and time zone. For example: `yyyy-MM-dd'T'HH:mm:ssX`. If the pattern that you specify is not sufficient to produce a complete timestamp, job creation fails. */ + time_format?: string + field_delimiter?: string +} + +export interface MlDatafeed { + aggregations?: Record + /** @alias aggregations */ + aggs?: Record + /** The security privileges that the datafeed uses to run its queries. If Elastic Stack security features were disabled at the time of the most recent update to the datafeed, this property is omitted. */ + authorization?: MlDatafeedAuthorization + chunking_config?: MlChunkingConfig + datafeed_id: Id + /** The interval at which scheduled queries are made while the datafeed runs in real time. The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket span. For example: `150s`. When `frequency` is shorter than the bucket span, interim results for the last (partial) bucket are written then eventually overwritten by the full bucket results. If the datafeed uses aggregations, this value must be divisible by the interval of the date histogram aggregation. */ + frequency?: Duration + indices: string[] + indexes?: string[] + job_id: Id + max_empty_searches?: integer + query: QueryDslQueryContainer + query_delay?: Duration + script_fields?: Record + scroll_size?: integer + delayed_data_check_config: MlDelayedDataCheckConfig + runtime_mappings?: MappingRuntimeFields + indices_options?: IndicesOptions +} + +export interface MlDatafeedAuthorization { + /** If an API key was used for the most recent update to the datafeed, its name and identifier are listed in the response. */ + api_key?: MlApiKeyAuthorization + /** If a user ID was used for the most recent update to the datafeed, its roles at the time of the update are listed in the response. */ + roles?: string[] + /** If a service account was used for the most recent update to the datafeed, the account name is listed in the response. */ + service_account?: string +} + +export interface MlDatafeedConfig { + /** If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. */ + aggregations?: Record + /** If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. + * @alias aggregations */ + aggs?: Record + /** Datafeeds might be required to search over long time periods, for several months or years. This search is split into time chunks in order to ensure the load on Elasticsearch is managed. Chunking configuration controls how the size of these time chunks are calculated and is an advanced configuration option. */ + chunking_config?: MlChunkingConfig + /** A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. The default value is the job identifier. */ + datafeed_id?: Id + /** Specifies whether the datafeed checks for missing data and the size of the window. The datafeed can optionally search over indices that have already been read in an effort to determine whether any data has subsequently been added to the index. If missing data is found, it is a good indication that the `query_delay` option is set too low and the data is being indexed after the datafeed has passed that moment in time. This check runs only on real-time datafeeds. */ + delayed_data_check_config?: MlDelayedDataCheckConfig + /** The interval at which scheduled queries are made while the datafeed runs in real time. The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket span. For example: `150s`. When `frequency` is shorter than the bucket span, interim results for the last (partial) bucket are written then eventually overwritten by the full bucket results. If the datafeed uses aggregations, this value must be divisible by the interval of the date histogram aggregation. */ + frequency?: Duration + /** An array of index names. Wildcards are supported. If any indices are in remote clusters, the machine learning nodes must have the `remote_cluster_client` role. */ + indices?: Indices + /** An array of index names. Wildcards are supported. If any indices are in remote clusters, the machine learning nodes must have the `remote_cluster_client` role. + * @alias indices */ + indexes?: Indices + /** Specifies index expansion options that are used during search. */ + indices_options?: IndicesOptions + job_id?: Id + /** If a real-time datafeed has never seen any data (including during any initial training period) then it will automatically stop itself and close its associated job after this many real-time searches that return no documents. In other words, it will stop after `frequency` times `max_empty_searches` of real-time operation. If not set then a datafeed with no end time that sees no data will remain started until it is explicitly stopped. */ + max_empty_searches?: integer + /** The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this object is passed verbatim to Elasticsearch. */ + query?: QueryDslQueryContainer + /** The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default value is randomly selected between `60s` and `120s`. This randomness improves the query performance when there are multiple jobs running on the same node. */ + query_delay?: Duration + /** Specifies runtime fields for the datafeed search. */ + runtime_mappings?: MappingRuntimeFields + /** Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. The detector configuration objects in a job can contain functions that use these script fields. */ + script_fields?: Record + /** The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. The maximum value is the value of `index.max_result_window`, which is 10,000 by default. */ + scroll_size?: integer +} + +export interface MlDatafeedRunningState { + /** Indicates if the datafeed is "real-time"; meaning that the datafeed has no configured `end` time. */ + real_time_configured: boolean + /** Indicates whether the datafeed has finished running on the available past data. + * For datafeeds without a configured `end` time, this means that the datafeed is now running on "real-time" data. */ + real_time_running: boolean + /** Provides the latest time interval the datafeed has searched. */ + search_interval?: MlRunningStateSearchInterval +} + +export type MlDatafeedState = 'started' | 'stopped' | 'starting' | 'stopping' + +export interface MlDatafeedStats { + /** For started datafeeds only, contains messages relating to the selection of a node. */ + assignment_explanation?: string + /** A numerical character string that uniquely identifies the datafeed. + * This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. + * It must start and end with alphanumeric characters. */ + datafeed_id: Id + /** For started datafeeds only, this information pertains to the node upon which the datafeed is started. + * @remarks This property is not supported on Elastic Cloud Serverless. */ + node?: MlDiscoveryNodeCompact + /** The status of the datafeed, which can be one of the following values: `starting`, `started`, `stopping`, `stopped`. */ + state: MlDatafeedState + /** An object that provides statistical information about timing aspect of this datafeed. */ + timing_stats?: MlDatafeedTimingStats + /** An object containing the running state for this datafeed. + * It is only provided if the datafeed is started. */ + running_state?: MlDatafeedRunningState +} + +export interface MlDatafeedTimingStats { + /** The number of buckets processed. */ + bucket_count: long + /** The exponential average search time per hour, in milliseconds. */ + exponential_average_search_time_per_hour_ms: DurationValue + exponential_average_calculation_context?: MlExponentialAverageCalculationContext + /** Identifier for the anomaly detection job. */ + job_id: Id + /** The number of searches run by the datafeed. */ + search_count: long + /** The total time the datafeed spent searching, in milliseconds. */ + total_search_time_ms: DurationValue + /** The average search time per bucket, in milliseconds. */ + average_search_time_per_bucket_ms?: DurationValue +} + +export interface MlDataframeAnalysis { + /** Advanced configuration option. Machine learning uses loss guided tree growing, which means that the decision trees grow where the regularized loss decreases most quickly. This parameter affects loss calculations by acting as a multiplier of the tree depth. Higher alpha values result in shallower trees and faster training times. By default, this value is calculated during hyperparameter optimization. It must be greater than or equal to zero. */ + alpha?: double + /** Defines which field of the document is to be predicted. It must match one of the fields in the index being used to train. If this field is missing from a document, then that document will not be used for training, but a prediction with the trained model will be generated for it. It is also known as continuous target variable. + * For classification analysis, the data type of the field must be numeric (`integer`, `short`, `long`, `byte`), categorical (`ip` or `keyword`), or `boolean`. There must be no more than 30 different values in this field. + * For regression analysis, the data type of the field must be numeric. */ + dependent_variable: string + /** Advanced configuration option. Controls the fraction of data that is used to compute the derivatives of the loss function for tree training. A small value results in the use of a small fraction of the data. If this value is set to be less than 1, accuracy typically improves. However, too small a value may result in poor convergence for the ensemble and so require more trees. By default, this value is calculated during hyperparameter optimization. It must be greater than zero and less than or equal to 1. */ + downsample_factor?: double + /** Advanced configuration option. Specifies whether the training process should finish if it is not finding any better performing models. If disabled, the training process can take significantly longer and the chance of finding a better performing model is unremarkable. */ + early_stopping_enabled?: boolean + /** Advanced configuration option. The shrinkage applied to the weights. Smaller values result in larger forests which have a better generalization error. However, larger forests cause slower training. By default, this value is calculated during hyperparameter optimization. It must be a value between 0.001 and 1. */ + eta?: double + /** Advanced configuration option. Specifies the rate at which `eta` increases for each new tree that is added to the forest. For example, a rate of 1.05 increases `eta` by 5% for each extra tree. By default, this value is calculated during hyperparameter optimization. It must be between 0.5 and 2. */ + eta_growth_rate_per_tree?: double + /** Advanced configuration option. Defines the fraction of features that will be used when selecting a random bag for each candidate split. By default, this value is calculated during hyperparameter optimization. */ + feature_bag_fraction?: double + /** Advanced configuration option. A collection of feature preprocessors that modify one or more included fields. The analysis uses the resulting one or more features instead of the original document field. However, these features are ephemeral; they are not stored in the destination index. Multiple `feature_processors` entries can refer to the same document fields. Automatic categorical feature encoding still occurs for the fields that are unprocessed by a custom processor or that have categorical values. Use this property only if you want to override the automatic feature encoding of the specified fields. */ + feature_processors?: MlDataframeAnalysisFeatureProcessor[] + /** Advanced configuration option. Regularization parameter to prevent overfitting on the training data set. Multiplies a linear penalty associated with the size of individual trees in the forest. A high gamma value causes training to prefer small trees. A small gamma value results in larger individual trees and slower training. By default, this value is calculated during hyperparameter optimization. It must be a nonnegative value. */ + gamma?: double + /** Advanced configuration option. Regularization parameter to prevent overfitting on the training data set. Multiplies an L2 regularization term which applies to leaf weights of the individual trees in the forest. A high lambda value causes training to favor small leaf weights. This behavior makes the prediction function smoother at the expense of potentially not being able to capture relevant relationships between the features and the dependent variable. A small lambda value results in large individual trees and slower training. By default, this value is calculated during hyperparameter optimization. It must be a nonnegative value. */ + lambda?: double + /** Advanced configuration option. A multiplier responsible for determining the maximum number of hyperparameter optimization steps in the Bayesian optimization procedure. The maximum number of steps is determined based on the number of undefined hyperparameters times the maximum optimization rounds per hyperparameter. By default, this value is calculated during hyperparameter optimization. */ + max_optimization_rounds_per_hyperparameter?: integer + /** Advanced configuration option. Defines the maximum number of decision trees in the forest. The maximum value is 2000. By default, this value is calculated during hyperparameter optimization. */ + max_trees?: integer + /** Advanced configuration option. Defines the maximum number of decision trees in the forest. The maximum value is 2000. By default, this value is calculated during hyperparameter optimization. + * @alias max_trees */ + maximum_number_trees?: integer + /** Advanced configuration option. Specifies the maximum number of feature importance values per document to return. By default, no feature importance calculation occurs. */ + num_top_feature_importance_values?: integer + /** Defines the name of the prediction field in the results. Defaults to `_prediction`. */ + prediction_field_name?: Field + /** Defines the seed for the random generator that is used to pick training data. By default, it is randomly generated. Set it to a specific value to use the same training data each time you start a job (assuming other related parameters such as `source` and `analyzed_fields` are the same). */ + randomize_seed?: double + /** Advanced configuration option. Machine learning uses loss guided tree growing, which means that the decision trees grow where the regularized loss decreases most quickly. This soft limit combines with the `soft_tree_depth_tolerance` to penalize trees that exceed the specified depth; the regularized loss increases quickly beyond this depth. By default, this value is calculated during hyperparameter optimization. It must be greater than or equal to 0. */ + soft_tree_depth_limit?: integer + /** Advanced configuration option. This option controls how quickly the regularized loss increases when the tree depth exceeds `soft_tree_depth_limit`. By default, this value is calculated during hyperparameter optimization. It must be greater than or equal to 0.01. */ + soft_tree_depth_tolerance?: double + /** Defines what percentage of the eligible documents that will be used for training. Documents that are ignored by the analysis (for example those that contain arrays with more than one value) won’t be included in the calculation for used percentage. */ + training_percent?: Percentage +} + +export interface MlDataframeAnalysisAnalyzedFields { + /** An array of strings that defines the fields that will be excluded from the analysis. You do not need to add fields with unsupported data types to excludes, these fields are excluded from the analysis automatically. */ + includes?: string[] + /** An array of strings that defines the fields that will be included in the analysis. */ + excludes?: string[] +} + +export interface MlDataframeAnalysisClassification extends MlDataframeAnalysis { + class_assignment_objective?: string + /** Defines the number of categories for which the predicted probabilities are reported. It must be non-negative or -1. If it is -1 or greater than the total number of categories, probabilities are reported for all categories; if you have a large number of categories, there could be a significant effect on the size of your destination index. NOTE: To use the AUC ROC evaluation method, `num_top_classes` must be set to -1 or a value greater than or equal to the total number of categories. */ + num_top_classes?: integer +} + +export interface MlDataframeAnalysisContainer { + /** The configuration information necessary to perform classification. */ + classification?: MlDataframeAnalysisClassification + /** The configuration information necessary to perform outlier detection. NOTE: Advanced parameters are for fine-tuning classification analysis. They are set automatically by hyperparameter optimization to give the minimum validation error. It is highly recommended to use the default values unless you fully understand the function of these parameters. */ + outlier_detection?: MlDataframeAnalysisOutlierDetection + /** The configuration information necessary to perform regression. NOTE: Advanced parameters are for fine-tuning regression analysis. They are set automatically by hyperparameter optimization to give the minimum validation error. It is highly recommended to use the default values unless you fully understand the function of these parameters. */ + regression?: MlDataframeAnalysisRegression +} + +export interface MlDataframeAnalysisFeatureProcessor { + /** The configuration information necessary to perform frequency encoding. */ + frequency_encoding?: MlDataframeAnalysisFeatureProcessorFrequencyEncoding + /** The configuration information necessary to perform multi encoding. It allows multiple processors to be changed together. This way the output of a processor can then be passed to another as an input. */ + multi_encoding?: MlDataframeAnalysisFeatureProcessorMultiEncoding + /** The configuration information necessary to perform n-gram encoding. Features created by this encoder have the following name format: .. For example, if the feature_prefix is f, the feature name for the second unigram in a string is f.11. */ + n_gram_encoding?: MlDataframeAnalysisFeatureProcessorNGramEncoding + /** The configuration information necessary to perform one hot encoding. */ + one_hot_encoding?: MlDataframeAnalysisFeatureProcessorOneHotEncoding + /** The configuration information necessary to perform target mean encoding. */ + target_mean_encoding?: MlDataframeAnalysisFeatureProcessorTargetMeanEncoding +} + +export interface MlDataframeAnalysisFeatureProcessorFrequencyEncoding { + /** The resulting feature name. */ + feature_name: Name + field: Field + /** The resulting frequency map for the field value. If the field value is missing from the frequency_map, the resulting value is 0. */ + frequency_map: Record +} + +export interface MlDataframeAnalysisFeatureProcessorMultiEncoding { + /** The ordered array of custom processors to execute. Must be more than 1. */ + processors: integer[] +} + +export interface MlDataframeAnalysisFeatureProcessorNGramEncoding { + /** The feature name prefix. Defaults to ngram__. */ + feature_prefix?: string + /** The name of the text field to encode. */ + field: Field + /** Specifies the length of the n-gram substring. Defaults to 50. Must be greater than 0. */ + length?: integer + /** Specifies which n-grams to gather. It’s an array of integer values where the minimum value is 1, and a maximum value is 5. */ + n_grams: integer[] + /** Specifies the zero-indexed start of the n-gram substring. Negative values are allowed for encoding n-grams of string suffixes. Defaults to 0. */ + start?: integer + custom?: boolean +} + +export interface MlDataframeAnalysisFeatureProcessorOneHotEncoding { + /** The name of the field to encode. */ + field: Field + /** The one hot map mapping the field value with the column name. */ + hot_map: string +} + +export interface MlDataframeAnalysisFeatureProcessorTargetMeanEncoding { + /** The default value if field value is not found in the target_map. */ + default_value: integer + /** The resulting feature name. */ + feature_name: Name + /** The name of the field to encode. */ + field: Field + /** The field value to target mean transition map. */ + target_map: Record +} + +export interface MlDataframeAnalysisOutlierDetection { + /** Specifies whether the feature influence calculation is enabled. */ + compute_feature_influence?: boolean + /** The minimum outlier score that a document needs to have in order to calculate its feature influence score. Value range: 0-1. */ + feature_influence_threshold?: double + /** The method that outlier detection uses. Available methods are `lof`, `ldof`, `distance_kth_nn`, `distance_knn`, and `ensemble`. The default value is ensemble, which means that outlier detection uses an ensemble of different methods and normalises and combines their individual outlier scores to obtain the overall outlier score. */ + method?: string + /** Defines the value for how many nearest neighbors each method of outlier detection uses to calculate its outlier score. When the value is not set, different values are used for different ensemble members. This default behavior helps improve the diversity in the ensemble; only override it if you are confident that the value you choose is appropriate for the data set. */ + n_neighbors?: integer + /** The proportion of the data set that is assumed to be outlying prior to outlier detection. For example, 0.05 means it is assumed that 5% of values are real outliers and 95% are inliers. */ + outlier_fraction?: double + /** If true, the following operation is performed on the columns before computing outlier scores: `(x_i - mean(x_i)) / sd(x_i)`. */ + standardization_enabled?: boolean +} + +export interface MlDataframeAnalysisRegression extends MlDataframeAnalysis { + /** The loss function used during regression. Available options are `mse` (mean squared error), `msle` (mean squared logarithmic error), `huber` (Pseudo-Huber loss). */ + loss_function?: string + /** A positive number that is used as a parameter to the `loss_function`. */ + loss_function_parameter?: double +} + +export interface MlDataframeAnalytics { + /** An object containing information about the analysis job. */ + analysis_stats?: MlDataframeAnalyticsStatsContainer + /** For running jobs only, contains messages relating to the selection of a node to run the job. */ + assignment_explanation?: string + /** An object that provides counts for the quantity of documents skipped, used in training, or available for testing. */ + data_counts: MlDataframeAnalyticsStatsDataCounts + /** The unique identifier of the data frame analytics job. */ + id: Id + /** An object describing memory usage of the analytics. It is present only after the job is started and memory usage is reported. */ + memory_usage: MlDataframeAnalyticsStatsMemoryUsage + /** Contains properties for the node that runs the job. This information is available only for running jobs. + * @remarks This property is not supported on Elastic Cloud Serverless. */ + node?: NodeAttributes + /** The progress report of the data frame analytics job by phase. */ + progress: MlDataframeAnalyticsStatsProgress[] + /** The status of the data frame analytics job, which can be one of the following values: failed, started, starting, stopping, stopped. */ + state: MlDataframeState +} + +export interface MlDataframeAnalyticsAuthorization { + /** If an API key was used for the most recent update to the job, its name and identifier are listed in the response. */ + api_key?: MlApiKeyAuthorization + /** If a user ID was used for the most recent update to the job, its roles at the time of the update are listed in the response. */ + roles?: string[] + /** If a service account was used for the most recent update to the job, the account name is listed in the response. */ + service_account?: string +} + +export interface MlDataframeAnalyticsDestination { + /** Defines the destination index to store the results of the data frame analytics job. */ + index: IndexName + /** Defines the name of the field in which to store the results of the analysis. Defaults to `ml`. */ + results_field?: Field +} + +export interface MlDataframeAnalyticsFieldSelection { + /** Whether the field is selected to be included in the analysis. */ + is_included: boolean + /** Whether the field is required. */ + is_required: boolean + /** The feature type of this field for the analysis. May be categorical or numerical. */ + feature_type?: string + /** The mapping types of the field. */ + mapping_types: string[] + /** The field name. */ + name: Field + /** The reason a field is not selected to be included in the analysis. */ + reason?: string +} + +export interface MlDataframeAnalyticsMemoryEstimation { + /** Estimated memory usage under the assumption that overflowing to disk is allowed during data frame analytics. expected_memory_with_disk is usually smaller than expected_memory_without_disk as using disk allows to limit the main memory needed to perform data frame analytics. */ + expected_memory_with_disk: string + /** Estimated memory usage under the assumption that the whole data frame analytics should happen in memory (i.e. without overflowing to disk). */ + expected_memory_without_disk: string +} + +export interface MlDataframeAnalyticsSource { + /** Index or indices on which to perform the analysis. It can be a single index or index pattern as well as an array of indices or patterns. NOTE: If your source indices contain documents with the same IDs, only the document that is indexed last appears in the destination index. */ + index: Indices + /** The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this object is passed verbatim to Elasticsearch. By default, this property has the following value: {"match_all": {}}. */ + query?: QueryDslQueryContainer + /** Definitions of runtime fields that will become part of the mapping of the destination index. */ + runtime_mappings?: MappingRuntimeFields + /** Specify `includes` and/or `excludes patterns to select which fields will be present in the destination. Fields that are excluded cannot be included in the analysis. */ + _source?: MlDataframeAnalysisAnalyzedFields | string[] +} + +export interface MlDataframeAnalyticsStatsContainer { + /** An object containing information about the classification analysis job. */ + classification_stats?: MlDataframeAnalyticsStatsHyperparameters + /** An object containing information about the outlier detection job. */ + outlier_detection_stats?: MlDataframeAnalyticsStatsOutlierDetection + /** An object containing information about the regression analysis. */ + regression_stats?: MlDataframeAnalyticsStatsHyperparameters +} + +export interface MlDataframeAnalyticsStatsDataCounts { + /** The number of documents that are skipped during the analysis because they contained values that are not supported by the analysis. For example, outlier detection does not support missing fields so it skips documents with missing fields. Likewise, all types of analysis skip documents that contain arrays with more than one element. */ + skipped_docs_count: integer + /** The number of documents that are not used for training the model and can be used for testing. */ + test_docs_count: integer + /** The number of documents that are used for training the model. */ + training_docs_count: integer +} + +export interface MlDataframeAnalyticsStatsHyperparameters { + /** An object containing the parameters of the classification analysis job. */ + hyperparameters: MlHyperparameters + /** The number of iterations on the analysis. */ + iteration: integer + /** The timestamp when the statistics were reported in milliseconds since the epoch. */ + timestamp: EpochTime + /** An object containing time statistics about the data frame analytics job. */ + timing_stats: MlTimingStats + /** An object containing information about validation loss. */ + validation_loss: MlValidationLoss +} + +export interface MlDataframeAnalyticsStatsMemoryUsage { + /** This value is present when the status is hard_limit and it is a new estimate of how much memory the job needs. */ + memory_reestimate_bytes?: long + /** The number of bytes used at the highest peak of memory usage. */ + peak_usage_bytes: long + /** The memory usage status. */ + status: string + /** The timestamp when memory usage was calculated. */ + timestamp?: EpochTime +} + +export interface MlDataframeAnalyticsStatsOutlierDetection { + /** The list of job parameters specified by the user or determined by algorithmic heuristics. */ + parameters: MlOutlierDetectionParameters + /** The timestamp when the statistics were reported in milliseconds since the epoch. */ + timestamp: EpochTime + /** An object containing time statistics about the data frame analytics job. */ + timing_stats: MlTimingStats +} + +export interface MlDataframeAnalyticsStatsProgress { + /** Defines the phase of the data frame analytics job. */ + phase: string + /** The progress that the data frame analytics job has made expressed in percentage. */ + progress_percent: integer +} + +export interface MlDataframeAnalyticsSummary { + allow_lazy_start?: boolean + analysis: MlDataframeAnalysisContainer + analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] + /** The security privileges that the job uses to run its queries. If Elastic Stack security features were disabled at the time of the most recent update to the job, this property is omitted. */ + authorization?: MlDataframeAnalyticsAuthorization + create_time?: EpochTime + description?: string + dest: MlDataframeAnalyticsDestination + id: Id + max_num_threads?: integer + model_memory_limit?: string + source: MlDataframeAnalyticsSource + version?: VersionString + _meta?: Metadata +} + +export interface MlDataframeEvaluationClassification { + /** The field of the index which contains the ground truth. The data type of this field can be boolean or integer. If the data type is integer, the value has to be either 0 (false) or 1 (true). */ + actual_field: Field + /** The field in the index which contains the predicted value, in other words the results of the classification analysis. */ + predicted_field?: Field + /** The field of the index which is an array of documents of the form { "class_name": XXX, "class_probability": YYY }. This field must be defined as nested in the mappings. */ + top_classes_field?: Field + /** Specifies the metrics that are used for the evaluation. */ + metrics?: MlDataframeEvaluationClassificationMetrics +} + +export interface MlDataframeEvaluationClassificationMetrics extends MlDataframeEvaluationMetrics { + /** Accuracy of predictions (per-class and overall). */ + accuracy?: Record + /** Multiclass confusion matrix. */ + multiclass_confusion_matrix?: Record +} + +export interface MlDataframeEvaluationClassificationMetricsAucRoc { + /** Name of the only class that is treated as positive during AUC ROC calculation. Other classes are treated as negative ("one-vs-all" strategy). All the evaluated documents must have class_name in the list of their top classes. */ + class_name?: Name + /** Whether or not the curve should be returned in addition to the score. Default value is false. */ + include_curve?: boolean +} + +export interface MlDataframeEvaluationContainer { + /** Classification evaluation evaluates the results of a classification analysis which outputs a prediction that identifies to which of the classes each document belongs. */ + classification?: MlDataframeEvaluationClassification + /** Outlier detection evaluates the results of an outlier detection analysis which outputs the probability that each document is an outlier. */ + outlier_detection?: MlDataframeEvaluationOutlierDetection + /** Regression evaluation evaluates the results of a regression analysis which outputs a prediction of values. */ + regression?: MlDataframeEvaluationRegression +} + +export interface MlDataframeEvaluationMetrics { + /** The AUC ROC (area under the curve of the receiver operating characteristic) score and optionally the curve. It is calculated for a specific class (provided as "class_name") treated as positive. */ + auc_roc?: MlDataframeEvaluationClassificationMetricsAucRoc + /** Precision of predictions (per-class and average). */ + precision?: Record + /** Recall of predictions (per-class and average). */ + recall?: Record +} + +export interface MlDataframeEvaluationOutlierDetection { + /** The field of the index which contains the ground truth. The data type of this field can be boolean or integer. If the data type is integer, the value has to be either 0 (false) or 1 (true). */ + actual_field: Field + /** The field of the index that defines the probability of whether the item belongs to the class in question or not. It’s the field that contains the results of the analysis. */ + predicted_probability_field: Field + /** Specifies the metrics that are used for the evaluation. */ + metrics?: MlDataframeEvaluationOutlierDetectionMetrics +} + +export interface MlDataframeEvaluationOutlierDetectionMetrics extends MlDataframeEvaluationMetrics { + /** Accuracy of predictions (per-class and overall). */ + confusion_matrix?: Record +} + +export interface MlDataframeEvaluationRegression { + /** The field of the index which contains the ground truth. The data type of this field must be numerical. */ + actual_field: Field + /** The field in the index that contains the predicted value, in other words the results of the regression analysis. */ + predicted_field: Field + /** Specifies the metrics that are used for the evaluation. For more information on mse, msle, and huber, consult the Jupyter notebook on regression loss functions. */ + metrics?: MlDataframeEvaluationRegressionMetrics +} + +export interface MlDataframeEvaluationRegressionMetrics { + /** Average squared difference between the predicted values and the actual (ground truth) value. For more information, read this wiki article. */ + mse?: Record + /** Average squared difference between the logarithm of the predicted values and the logarithm of the actual (ground truth) value. */ + msle?: MlDataframeEvaluationRegressionMetricsMsle + /** Pseudo Huber loss function. */ + huber?: MlDataframeEvaluationRegressionMetricsHuber + /** Proportion of the variance in the dependent variable that is predictable from the independent variables. */ + r_squared?: Record +} + +export interface MlDataframeEvaluationRegressionMetricsHuber { + /** Approximates 1/2 (prediction - actual)2 for values much less than delta and approximates a straight line with slope delta for values much larger than delta. Defaults to 1. Delta needs to be greater than 0. */ + delta?: double +} + +export interface MlDataframeEvaluationRegressionMetricsMsle { + /** Defines the transition point at which you switch from minimizing quadratic error to minimizing quadratic log error. Defaults to 1. */ + offset?: double +} + +export type MlDataframeState = 'started' | 'stopped' | 'starting' | 'stopping' | 'failed' + +export interface MlDelayedDataCheckConfig { + /** The window of time that is searched for late data. This window of time ends with the latest finalized bucket. + * It defaults to null, which causes an appropriate `check_window` to be calculated when the real-time datafeed runs. + * In particular, the default `check_window` span calculation is based on the maximum of `2h` or `8 * bucket_span`. */ + check_window?: Duration + /** Specifies whether the datafeed periodically checks for delayed data. */ + enabled: boolean +} + +export type MlDeploymentAllocationState = 'started' | 'starting' | 'fully_allocated' + +export type MlDeploymentAssignmentState = 'started' | 'starting' | 'stopping' | 'failed' + +export interface MlDetectionRule { + /** The set of actions to be triggered when the rule applies. If more than one action is specified the effects of all actions are combined. */ + actions?: MlRuleAction[] + /** An array of numeric conditions when the rule applies. A rule must either have a non-empty scope or at least one condition. Multiple conditions are combined together with a logical AND. */ + conditions?: MlRuleCondition[] + /** A scope of series where the rule applies. A rule must either have a non-empty scope or at least one condition. By default, the scope includes all series. Scoping is allowed for any of the fields that are also specified in `by_field_name`, `over_field_name`, or `partition_field_name`. */ + scope?: Record +} + +export interface MlDetector { + /** The field used to split the data. In particular, this property is used for analyzing the splits with respect to their own history. It is used for finding unusual values in the context of the split. */ + by_field_name?: Field + /** Custom rules enable you to customize the way detectors operate. For example, a rule may dictate conditions under which results should be skipped. Kibana refers to custom rules as job rules. */ + custom_rules?: MlDetectionRule[] + /** A description of the detector. */ + detector_description?: string + /** A unique identifier for the detector. This identifier is based on the order of the detectors in the `analysis_config`, starting at zero. If you specify a value for this property, it is ignored. */ + detector_index?: integer + /** If set, frequent entities are excluded from influencing the anomaly results. Entities can be considered frequent over time or frequent in a population. If you are working with both over and by fields, you can set `exclude_frequent` to `all` for both fields, or to `by` or `over` for those specific fields. */ + exclude_frequent?: MlExcludeFrequent + /** The field that the detector uses in the function. If you use an event rate function such as count or rare, do not specify this field. The `field_name` cannot contain double quotes or backslashes. */ + field_name?: Field + /** The analysis function that is used. For example, `count`, `rare`, `mean`, `min`, `max`, or `sum`. */ + function?: string + /** The field used to split the data. In particular, this property is used for analyzing the splits with respect to the history of all splits. It is used for finding unusual values in the population of all splits. */ + over_field_name?: Field + /** The field used to segment the analysis. When you use this property, you have completely independent baselines for each value of this field. */ + partition_field_name?: Field + /** Defines whether a new series is used as the null series when there is no value for the by or partition fields. */ + use_null?: boolean +} + +export interface MlDetectorRead { + /** The field used to split the data. + * In particular, this property is used for analyzing the splits with respect to their own history. + * It is used for finding unusual values in the context of the split. */ + by_field_name?: Field + /** An array of custom rule objects, which enable you to customize the way detectors operate. + * For example, a rule may dictate to the detector conditions under which results should be skipped. + * Kibana refers to custom rules as job rules. */ + custom_rules?: MlDetectionRule[] + /** A description of the detector. */ + detector_description?: string + /** A unique identifier for the detector. + * This identifier is based on the order of the detectors in the `analysis_config`, starting at zero. */ + detector_index?: integer + /** Contains one of the following values: `all`, `none`, `by`, or `over`. + * If set, frequent entities are excluded from influencing the anomaly results. + * Entities can be considered frequent over time or frequent in a population. + * If you are working with both over and by fields, then you can set `exclude_frequent` to all for both fields, or to `by` or `over` for those specific fields. */ + exclude_frequent?: MlExcludeFrequent + /** The field that the detector uses in the function. + * If you use an event rate function such as `count` or `rare`, do not specify this field. */ + field_name?: Field + /** The analysis function that is used. + * For example, `count`, `rare`, `mean`, `min`, `max`, and `sum`. */ + function: string + /** The field used to split the data. + * In particular, this property is used for analyzing the splits with respect to the history of all splits. + * It is used for finding unusual values in the population of all splits. */ + over_field_name?: Field + /** The field used to segment the analysis. + * When you use this property, you have completely independent baselines for each value of this field. */ + partition_field_name?: Field + /** Defines whether a new series is used as the null series when there is no value for the by or partition fields. */ + use_null?: boolean +} + +export interface MlDetectorUpdate { + /** A unique identifier for the detector. + * This identifier is based on the order of the detectors in the `analysis_config`, starting at zero. */ + detector_index: integer + /** A description of the detector. */ + description?: string + /** An array of custom rule objects, which enable you to customize the way detectors operate. + * For example, a rule may dictate to the detector conditions under which results should be skipped. + * Kibana refers to custom rules as job rules. */ + custom_rules?: MlDetectionRule[] +} + +export type MlDiscoveryNode = Partial> + +export interface MlDiscoveryNodeCompact { + name: Name + ephemeral_id: Id + id: Id + transport_address: TransportAddress + attributes: Record +} + +export interface MlDiscoveryNodeContent { + name?: Name + ephemeral_id: Id + transport_address: TransportAddress + external_id: string + attributes: Record + roles: string[] + version: VersionString + min_index_version: integer + max_index_version: integer +} + +export type MlExcludeFrequent = 'all' | 'none' | 'by' | 'over' + +export interface MlExponentialAverageCalculationContext { + incremental_metric_value_ms: DurationValue + latest_timestamp?: EpochTime + previous_exponential_average_ms?: DurationValue +} + +export type MlFeatureExtractor = MlQueryFeatureExtractor + +export interface MlFillMaskInferenceOptions { + /** The string/token which will be removed from incoming documents and replaced with the inference prediction(s). + * In a response, this field contains the mask token for the specified model/tokenizer. Each model and tokenizer + * has a predefined mask token which cannot be changed. Thus, it is recommended not to set this value in requests. + * However, if this field is present in a request, its value must match the predefined value for that model/tokenizer, + * otherwise the request will fail. */ + mask_token?: string + /** Specifies the number of top class predictions to return. Defaults to 0. */ + num_top_classes?: integer + /** The tokenization options to update when inferring */ + tokenization?: MlTokenizationConfigContainer + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ + results_field?: string + vocabulary?: MlVocabulary +} + +export interface MlFillMaskInferenceUpdateOptions { + /** Specifies the number of top class predictions to return. Defaults to 0. */ + num_top_classes?: integer + /** The tokenization options to update when inferring */ + tokenization?: MlNlpTokenizationUpdateOptions + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ + results_field?: string +} + +export interface MlFilter { + /** A description of the filter. */ + description?: string + /** A string that uniquely identifies a filter. */ + filter_id: Id + /** An array of strings which is the filter item list. */ + items: string[] +} + +export interface MlFilterRef { + /** The identifier for the filter. */ + filter_id: Id + /** If set to `include`, the rule applies for values in the filter. If set to `exclude`, the rule applies for values not in the filter. */ + filter_type?: MlFilterType +} + +export type MlFilterType = 'include' | 'exclude' + +export interface MlGeoResults { + /** The actual value for the bucket formatted as a `geo_point`. */ + actual_point?: string + /** The typical value for the bucket formatted as a `geo_point`. */ + typical_point?: string +} + +export interface MlHyperparameter { + /** A positive number showing how much the parameter influences the variation of the loss function. For hyperparameters with values that are not specified by the user but tuned during hyperparameter optimization. */ + absolute_importance?: double + /** Name of the hyperparameter. */ + name: Name + /** A number between 0 and 1 showing the proportion of influence on the variation of the loss function among all tuned hyperparameters. For hyperparameters with values that are not specified by the user but tuned during hyperparameter optimization. */ + relative_importance?: double + /** Indicates if the hyperparameter is specified by the user (true) or optimized (false). */ + supplied: boolean + /** The value of the hyperparameter, either optimized or specified by the user. */ + value: double +} + +export interface MlHyperparameters { + /** Advanced configuration option. + * Machine learning uses loss guided tree growing, which means that the decision trees grow where the regularized loss decreases most quickly. + * This parameter affects loss calculations by acting as a multiplier of the tree depth. + * Higher alpha values result in shallower trees and faster training times. + * By default, this value is calculated during hyperparameter optimization. + * It must be greater than or equal to zero. */ + alpha?: double + /** Advanced configuration option. + * Regularization parameter to prevent overfitting on the training data set. + * Multiplies an L2 regularization term which applies to leaf weights of the individual trees in the forest. + * A high lambda value causes training to favor small leaf weights. + * This behavior makes the prediction function smoother at the expense of potentially not being able to capture relevant relationships between the features and the dependent variable. + * A small lambda value results in large individual trees and slower training. + * By default, this value is calculated during hyperparameter optimization. + * It must be a nonnegative value. */ + lambda?: double + /** Advanced configuration option. + * Regularization parameter to prevent overfitting on the training data set. + * Multiplies a linear penalty associated with the size of individual trees in the forest. + * A high gamma value causes training to prefer small trees. + * A small gamma value results in larger individual trees and slower training. + * By default, this value is calculated during hyperparameter optimization. + * It must be a nonnegative value. */ + gamma?: double + /** Advanced configuration option. + * The shrinkage applied to the weights. + * Smaller values result in larger forests which have a better generalization error. + * However, larger forests cause slower training. + * By default, this value is calculated during hyperparameter optimization. + * It must be a value between `0.001` and `1`. */ + eta?: double + /** Advanced configuration option. + * Specifies the rate at which `eta` increases for each new tree that is added to the forest. + * For example, a rate of 1.05 increases `eta` by 5% for each extra tree. + * By default, this value is calculated during hyperparameter optimization. + * It must be between `0.5` and `2`. */ + eta_growth_rate_per_tree?: double + /** Advanced configuration option. + * Defines the fraction of features that will be used when selecting a random bag for each candidate split. + * By default, this value is calculated during hyperparameter optimization. */ + feature_bag_fraction?: double + /** Advanced configuration option. + * Controls the fraction of data that is used to compute the derivatives of the loss function for tree training. + * A small value results in the use of a small fraction of the data. + * If this value is set to be less than 1, accuracy typically improves. + * However, too small a value may result in poor convergence for the ensemble and so require more trees. + * By default, this value is calculated during hyperparameter optimization. + * It must be greater than zero and less than or equal to 1. */ + downsample_factor?: double + /** If the algorithm fails to determine a non-trivial tree (more than a single leaf), this parameter determines how many of such consecutive failures are tolerated. + * Once the number of attempts exceeds the threshold, the forest training stops. */ + max_attempts_to_add_tree?: integer + /** Advanced configuration option. + * A multiplier responsible for determining the maximum number of hyperparameter optimization steps in the Bayesian optimization procedure. + * The maximum number of steps is determined based on the number of undefined hyperparameters times the maximum optimization rounds per hyperparameter. + * By default, this value is calculated during hyperparameter optimization. */ + max_optimization_rounds_per_hyperparameter?: integer + /** Advanced configuration option. + * Defines the maximum number of decision trees in the forest. + * The maximum value is 2000. + * By default, this value is calculated during hyperparameter optimization. */ + max_trees?: integer + /** The maximum number of folds for the cross-validation procedure. */ + num_folds?: integer + /** Determines the maximum number of splits for every feature that can occur in a decision tree when the tree is trained. */ + num_splits_per_feature?: integer + /** Advanced configuration option. + * Machine learning uses loss guided tree growing, which means that the decision trees grow where the regularized loss decreases most quickly. + * This soft limit combines with the `soft_tree_depth_tolerance` to penalize trees that exceed the specified depth; the regularized loss increases quickly beyond this depth. + * By default, this value is calculated during hyperparameter optimization. + * It must be greater than or equal to 0. */ + soft_tree_depth_limit?: integer + /** Advanced configuration option. + * This option controls how quickly the regularized loss increases when the tree depth exceeds `soft_tree_depth_limit`. + * By default, this value is calculated during hyperparameter optimization. + * It must be greater than or equal to 0.01. */ + soft_tree_depth_tolerance?: double +} + +export type MlInclude = 'definition' | 'feature_importance_baseline' | 'hyperparameters' | 'total_feature_importance' | 'definition_status' + +export interface MlInferenceConfigCreateContainer { + /** Regression configuration for inference. */ + regression?: MlRegressionInferenceOptions + /** Classification configuration for inference. */ + classification?: MlClassificationInferenceOptions + /** Text classification configuration for inference. */ + text_classification?: MlTextClassificationInferenceOptions + /** Zeroshot classification configuration for inference. */ + zero_shot_classification?: MlZeroShotClassificationInferenceOptions + /** Fill mask configuration for inference. */ + fill_mask?: MlFillMaskInferenceOptions + learning_to_rank?: MlLearningToRankConfig + /** Named entity recognition configuration for inference. */ + ner?: MlNerInferenceOptions + /** Pass through configuration for inference. */ + pass_through?: MlPassThroughInferenceOptions + /** Text embedding configuration for inference. */ + text_embedding?: MlTextEmbeddingInferenceOptions + /** Text expansion configuration for inference. */ + text_expansion?: MlTextExpansionInferenceOptions + /** Question answering configuration for inference. */ + question_answering?: MlQuestionAnsweringInferenceOptions +} + +export interface MlInferenceConfigUpdateContainer { + /** Regression configuration for inference. */ + regression?: MlRegressionInferenceOptions + /** Classification configuration for inference. */ + classification?: MlClassificationInferenceOptions + /** Text classification configuration for inference. */ + text_classification?: MlTextClassificationInferenceUpdateOptions + /** Zeroshot classification configuration for inference. */ + zero_shot_classification?: MlZeroShotClassificationInferenceUpdateOptions + /** Fill mask configuration for inference. */ + fill_mask?: MlFillMaskInferenceUpdateOptions + /** Named entity recognition configuration for inference. */ + ner?: MlNerInferenceUpdateOptions + /** Pass through configuration for inference. */ + pass_through?: MlPassThroughInferenceUpdateOptions + /** Text embedding configuration for inference. */ + text_embedding?: MlTextEmbeddingInferenceUpdateOptions + /** Text expansion configuration for inference. */ + text_expansion?: MlTextExpansionInferenceUpdateOptions + /** Question answering configuration for inference */ + question_answering?: MlQuestionAnsweringInferenceUpdateOptions +} + +export interface MlInferenceResponseResult { + /** If the model is trained for named entity recognition (NER) tasks, the response contains the recognized entities. */ + entities?: MlTrainedModelEntities[] + /** Indicates whether the input text was truncated to meet the model's maximum sequence length limit. This property + * is present only when it is true. */ + is_truncated?: boolean + /** If the model is trained for a text classification or zero shot classification task, the response is the + * predicted class. + * For named entity recognition (NER) tasks, it contains the annotated text output. + * For fill mask tasks, it contains the top prediction for replacing the mask token. + * For text embedding tasks, it contains the raw numerical text embedding values. + * For regression models, its a numerical value + * For classification models, it may be an integer, double, boolean or string depending on prediction type */ + predicted_value?: MlPredictedValue | MlPredictedValue[] + /** For fill mask tasks, the response contains the input text sequence with the mask token replaced by the predicted + * value. + * Additionally */ + predicted_value_sequence?: string + /** Specifies a probability for the predicted value. */ + prediction_probability?: double + /** Specifies a confidence score for the predicted value. */ + prediction_score?: double + /** For fill mask, text classification, and zero shot classification tasks, the response contains a list of top + * class entries. */ + top_classes?: MlTopClassEntry[] + /** If the request failed, the response contains the reason for the failure. */ + warning?: string + /** The feature importance for the inference results. Relevant only for classification or regression models */ + feature_importance?: MlTrainedModelInferenceFeatureImportance[] +} + +export interface MlInfluence { + influencer_field_name: string + influencer_field_values: string[] +} + +export interface MlInfluencer { + /** The length of the bucket in seconds. This value matches the bucket span that is specified in the job. */ + bucket_span: DurationValue + /** A normalized score between 0-100, which is based on the probability of the influencer in this bucket aggregated + * across detectors. Unlike `initial_influencer_score`, this value is updated by a re-normalization process as new + * data is analyzed. */ + influencer_score: double + /** The field name of the influencer. */ + influencer_field_name: Field + /** The entity that influenced, contributed to, or was to blame for the anomaly. */ + influencer_field_value: string + /** A normalized score between 0-100, which is based on the probability of the influencer aggregated across detectors. + * This is the initial value that was calculated at the time the bucket was processed. */ + initial_influencer_score: double + /** If true, this is an interim result. In other words, the results are calculated based on partial input data. */ + is_interim: boolean + /** Identifier for the anomaly detection job. */ + job_id: Id + /** The probability that the influencer has this behavior, in the range 0 to 1. This value can be held to a high + * precision of over 300 decimal places, so the `influencer_score` is provided as a human-readable and friendly + * interpretation of this value. */ + probability: double + /** Internal. This value is always set to `influencer`. */ + result_type: string + /** The start time of the bucket for which these results were calculated. */ + timestamp: EpochTime + /** Additional influencer properties are added, depending on the fields being analyzed. For example, if it’s + * analyzing `user_name` as an influencer, a field `user_name` is added to the result document. This + * information enables you to filter the anomaly results more easily. */ + foo?: string +} + +export interface MlJob { + /** Advanced configuration option. + * Specifies whether this job can open when there is insufficient machine learning node capacity for it to be immediately assigned to a node. */ + allow_lazy_open: boolean + /** The analysis configuration, which specifies how to analyze the data. + * After you create a job, you cannot change the analysis configuration; all the properties are informational. */ + analysis_config: MlAnalysisConfig + /** Limits can be applied for the resources required to hold the mathematical models in memory. + * These limits are approximate and can be set per job. + * They do not control the memory used by other processes, for example the Elasticsearch Java processes. */ + analysis_limits?: MlAnalysisLimits + /** Advanced configuration option. + * The time between each periodic persistence of the model. + * The default value is a randomized value between 3 to 4 hours, which avoids all jobs persisting at exactly the same time. + * The smallest allowed value is 1 hour. */ + background_persist_interval?: Duration + blocked?: MlJobBlocked + create_time?: DateTime + /** Advanced configuration option. + * Contains custom metadata about the job. */ + custom_settings?: MlCustomSettings + /** Advanced configuration option, which affects the automatic removal of old model snapshots for this job. + * It specifies a period of time (in days) after which only the first snapshot per day is retained. + * This period is relative to the timestamp of the most recent snapshot for this job. + * Valid values range from 0 to `model_snapshot_retention_days`. */ + daily_model_snapshot_retention_after_days?: long + /** The data description defines the format of the input data when you send data to the job by using the post data API. + * Note that when configuring a datafeed, these properties are automatically set. + * When data is received via the post data API, it is not stored in Elasticsearch. + * Only the results for anomaly detection are retained. */ + data_description: MlDataDescription + /** The datafeed, which retrieves data from Elasticsearch for analysis by the job. + * You can associate only one datafeed with each anomaly detection job. */ + datafeed_config?: MlDatafeed + /** Indicates that the process of deleting the job is in progress but not yet completed. + * It is only reported when `true`. */ + deleting?: boolean + /** A description of the job. */ + description?: string + /** If the job closed or failed, this is the time the job finished, otherwise it is `null`. + * This property is informational; you cannot change its value. */ + finished_time?: DateTime + /** A list of job groups. + * A job can belong to no groups or many. */ + groups?: string[] + /** Identifier for the anomaly detection job. + * This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. + * It must start and end with alphanumeric characters. */ + job_id: Id + /** Reserved for future use, currently set to `anomaly_detector`. */ + job_type?: string + /** The machine learning configuration version number at which the the job was created. */ + job_version?: VersionString + /** This advanced configuration option stores model information along with the results. + * It provides a more detailed view into anomaly detection. + * Model plot provides a simplified and indicative view of the model and its bounds. */ + model_plot_config?: MlModelPlotConfig + model_snapshot_id?: Id + /** Advanced configuration option, which affects the automatic removal of old model snapshots for this job. + * It specifies the maximum period of time (in days) that snapshots are retained. + * This period is relative to the timestamp of the most recent snapshot for this job. + * By default, snapshots ten days older than the newest snapshot are deleted. */ + model_snapshot_retention_days: long + /** Advanced configuration option. + * The period over which adjustments to the score are applied, as new data is seen. + * The default value is the longer of 30 days or 100 `bucket_spans`. */ + renormalization_window_days?: long + /** A text string that affects the name of the machine learning results index. + * The default value is `shared`, which generates an index named `.ml-anomalies-shared`. */ + results_index_name: IndexName + /** Advanced configuration option. + * The period of time (in days) that results are retained. + * Age is calculated relative to the timestamp of the latest bucket result. + * If this property has a non-null value, once per day at 00:30 (server time), results that are the specified number of days older than the latest bucket result are deleted from Elasticsearch. + * The default value is null, which means all results are retained. + * Annotations generated by the system also count as results for retention purposes; they are deleted after the same number of days as results. + * Annotations added by users are retained forever. */ + results_retention_days?: long +} + +export interface MlJobBlocked { + reason: MlJobBlockedReason + task_id?: TaskId +} + +export type MlJobBlockedReason = 'delete' | 'reset' | 'revert' + +export interface MlJobConfig { + /** Advanced configuration option. Specifies whether this job can open when there is insufficient machine learning node capacity for it to be immediately assigned to a node. */ + allow_lazy_open?: boolean + /** The analysis configuration, which specifies how to analyze the data. + * After you create a job, you cannot change the analysis configuration; all the properties are informational. */ + analysis_config: MlAnalysisConfig + /** Limits can be applied for the resources required to hold the mathematical models in memory. + * These limits are approximate and can be set per job. + * They do not control the memory used by other processes, for example the Elasticsearch Java processes. */ + analysis_limits?: MlAnalysisLimits + /** Advanced configuration option. + * The time between each periodic persistence of the model. + * The default value is a randomized value between 3 to 4 hours, which avoids all jobs persisting at exactly the same time. + * The smallest allowed value is 1 hour. */ + background_persist_interval?: Duration + /** Advanced configuration option. + * Contains custom metadata about the job. */ + custom_settings?: MlCustomSettings + /** Advanced configuration option, which affects the automatic removal of old model snapshots for this job. + * It specifies a period of time (in days) after which only the first snapshot per day is retained. + * This period is relative to the timestamp of the most recent snapshot for this job. */ + daily_model_snapshot_retention_after_days?: long + /** The data description defines the format of the input data when you send data to the job by using the post data API. + * Note that when configure a datafeed, these properties are automatically set. */ + data_description: MlDataDescription + /** The datafeed, which retrieves data from Elasticsearch for analysis by the job. + * You can associate only one datafeed with each anomaly detection job. */ + datafeed_config?: MlDatafeedConfig + /** A description of the job. */ + description?: string + /** A list of job groups. A job can belong to no groups or many. */ + groups?: string[] + /** Identifier for the anomaly detection job. + * This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. + * It must start and end with alphanumeric characters. */ + job_id?: Id + /** Reserved for future use, currently set to `anomaly_detector`. */ + job_type?: string + /** This advanced configuration option stores model information along with the results. + * It provides a more detailed view into anomaly detection. + * Model plot provides a simplified and indicative view of the model and its bounds. */ + model_plot_config?: MlModelPlotConfig + /** Advanced configuration option, which affects the automatic removal of old model snapshots for this job. + * It specifies the maximum period of time (in days) that snapshots are retained. + * This period is relative to the timestamp of the most recent snapshot for this job. + * The default value is `10`, which means snapshots ten days older than the newest snapshot are deleted. */ + model_snapshot_retention_days?: long + /** Advanced configuration option. + * The period over which adjustments to the score are applied, as new data is seen. + * The default value is the longer of 30 days or 100 `bucket_spans`. */ + renormalization_window_days?: long + /** A text string that affects the name of the machine learning results index. + * The default value is `shared`, which generates an index named `.ml-anomalies-shared`. */ + results_index_name?: IndexName + /** Advanced configuration option. + * The period of time (in days) that results are retained. + * Age is calculated relative to the timestamp of the latest bucket result. + * If this property has a non-null value, once per day at 00:30 (server time), results that are the specified number of days older than the latest bucket result are deleted from Elasticsearch. + * The default value is null, which means all results are retained. + * Annotations generated by the system also count as results for retention purposes; they are deleted after the same number of days as results. + * Annotations added by users are retained forever. */ + results_retention_days?: long +} + +export interface MlJobForecastStatistics { + memory_bytes?: MlJobStatistics + processing_time_ms?: MlJobStatistics + records?: MlJobStatistics + status?: Record + total: long + forecasted_jobs: integer +} + +export type MlJobState = 'closing' | 'closed' | 'opened' | 'failed' | 'opening' + +export interface MlJobStatistics { + avg: double + max: double + min: double + total: double +} + +export interface MlJobStats { + /** For open anomaly detection jobs only, contains messages relating to the selection of a node to run the job. */ + assignment_explanation?: string + /** An object that describes the quantity of input to the job and any related error counts. + * The `data_count` values are cumulative for the lifetime of a job. + * If a model snapshot is reverted or old results are deleted, the job counts are not reset. */ + data_counts: MlDataCounts + /** An object that provides statistical information about forecasts belonging to this job. + * Some statistics are omitted if no forecasts have been made. */ + forecasts_stats: MlJobForecastStatistics + /** Identifier for the anomaly detection job. */ + job_id: string + /** An object that provides information about the size and contents of the model. */ + model_size_stats: MlModelSizeStats + /** Contains properties for the node that runs the job. + * This information is available only for open jobs. + * @remarks This property is not supported on Elastic Cloud Serverless. */ + node?: MlDiscoveryNodeCompact + /** For open jobs only, the elapsed time for which the job has been open. */ + open_time?: DateTime + /** The status of the anomaly detection job, which can be one of the following values: `closed`, `closing`, `failed`, `opened`, `opening`. */ + state: MlJobState + /** An object that provides statistical information about timing aspect of this job. */ + timing_stats: MlJobTimingStats + /** Indicates that the process of deleting the job is in progress but not yet completed. It is only reported when `true`. */ + deleting?: boolean +} + +export interface MlJobTimingStats { + average_bucket_processing_time_ms?: DurationValue + bucket_count: long + exponential_average_bucket_processing_time_ms?: DurationValue + exponential_average_bucket_processing_time_per_hour_ms: DurationValue + job_id: Id + total_bucket_processing_time_ms: DurationValue + maximum_bucket_processing_time_ms?: DurationValue + minimum_bucket_processing_time_ms?: DurationValue +} + +export interface MlLearningToRankConfig { + default_params?: Record + feature_extractors?: Record[] + num_top_feature_importance_values: integer +} + +export type MlMemoryStatus = 'ok' | 'soft_limit' | 'hard_limit' + +export interface MlModelPackageConfig { + create_time?: EpochTime + description?: string + inference_config?: Record + metadata?: Metadata + minimum_version?: string + model_repository?: string + model_type?: string + packaged_model_id: Id + platform_architecture?: string + prefix_strings?: MlTrainedModelPrefixStrings + size?: ByteSize + sha256?: string + tags?: string[] + vocabulary_file?: string +} + +export interface MlModelPlotConfig { + /** If true, enables calculation and storage of the model change annotations for each entity that is being analyzed. */ + annotations_enabled?: boolean + /** If true, enables calculation and storage of the model bounds for each entity that is being analyzed. */ + enabled?: boolean + /** Limits data collection to this comma separated list of partition or by field values. If terms are not specified or it is an empty string, no filtering is applied. Wildcards are not supported. Only the specified terms can be viewed when using the Single Metric Viewer. */ + terms?: Field +} + +export interface MlModelSizeStats { + bucket_allocation_failures_count: long + job_id: Id + log_time: DateTime + memory_status: MlMemoryStatus + model_bytes: ByteSize + model_bytes_exceeded?: ByteSize + model_bytes_memory_limit?: ByteSize + output_memory_allocator_bytes?: ByteSize + peak_model_bytes?: ByteSize + assignment_memory_basis?: string + result_type: string + total_by_field_count: long + total_over_field_count: long + total_partition_field_count: long + categorization_status: MlCategorizationStatus + categorized_doc_count: integer + dead_category_count: integer + failed_category_count: integer + frequent_category_count: integer + rare_category_count: integer + total_category_count: integer + timestamp?: long +} + +export interface MlModelSnapshot { + /** An optional description of the job. */ + description?: string + /** A numerical character string that uniquely identifies the job that the snapshot was created for. */ + job_id: Id + /** The timestamp of the latest processed record. */ + latest_record_time_stamp?: integer + /** The timestamp of the latest bucket result. */ + latest_result_time_stamp?: integer + /** The minimum version required to be able to restore the model snapshot. */ + min_version: VersionString + /** Summary information describing the model. */ + model_size_stats?: MlModelSizeStats + /** If true, this snapshot will not be deleted during automatic cleanup of snapshots older than model_snapshot_retention_days. However, this snapshot will be deleted when the job is deleted. The default value is false. */ + retain: boolean + /** For internal use only. */ + snapshot_doc_count: long + /** A numerical character string that uniquely identifies the model snapshot. */ + snapshot_id: Id + /** The creation timestamp for the snapshot. */ + timestamp: long +} + +export interface MlModelSnapshotUpgrade { + job_id: Id + snapshot_id: Id + state: MlSnapshotUpgradeState + /** @remarks This property is not supported on Elastic Cloud Serverless. */ + node: MlDiscoveryNode + assignment_explanation: string +} + +export interface MlNerInferenceOptions { + /** The tokenization options */ + tokenization?: MlTokenizationConfigContainer + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ + results_field?: string + /** The token classification labels. Must be IOB formatted tags */ + classification_labels?: string[] + vocabulary?: MlVocabulary +} + +export interface MlNerInferenceUpdateOptions { + /** The tokenization options to update when inferring */ + tokenization?: MlNlpTokenizationUpdateOptions + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ + results_field?: string +} + +export interface MlNlpBertTokenizationConfig extends MlCommonTokenizationConfig { +} + +export interface MlNlpRobertaTokenizationConfig extends MlCommonTokenizationConfig { + /** Should the tokenizer prefix input with a space character */ + add_prefix_space?: boolean +} + +export interface MlNlpTokenizationUpdateOptions { + /** Truncate options to apply */ + truncate?: MlTokenizationTruncate + /** Span options to apply */ + span?: integer +} + +export interface MlOutlierDetectionParameters { + /** Specifies whether the feature influence calculation is enabled. */ + compute_feature_influence?: boolean + /** The minimum outlier score that a document needs to have in order to calculate its feature influence score. + * Value range: 0-1 */ + feature_influence_threshold?: double + /** The method that outlier detection uses. + * Available methods are `lof`, `ldof`, `distance_kth_nn`, `distance_knn`, and `ensemble`. + * The default value is ensemble, which means that outlier detection uses an ensemble of different methods and normalises and combines their individual outlier scores to obtain the overall outlier score. */ + method?: string + /** Defines the value for how many nearest neighbors each method of outlier detection uses to calculate its outlier score. + * When the value is not set, different values are used for different ensemble members. + * This default behavior helps improve the diversity in the ensemble; only override it if you are confident that the value you choose is appropriate for the data set. */ + n_neighbors?: integer + /** The proportion of the data set that is assumed to be outlying prior to outlier detection. + * For example, 0.05 means it is assumed that 5% of values are real outliers and 95% are inliers. */ + outlier_fraction?: double + /** If `true`, the following operation is performed on the columns before computing outlier scores: (x_i - mean(x_i)) / sd(x_i). */ + standardization_enabled?: boolean +} + +export interface MlOverallBucket { + /** The length of the bucket in seconds. Matches the job with the longest bucket_span value. */ + bucket_span: DurationValue + /** If true, this is an interim result. In other words, the results are calculated based on partial input data. */ + is_interim: boolean + /** An array of objects that contain the max_anomaly_score per job_id. */ + jobs: MlOverallBucketJob[] + /** The top_n average of the maximum bucket anomaly_score per job. */ + overall_score: double + /** Internal. This is always set to overall_bucket. */ + result_type: string + /** The start time of the bucket for which these results were calculated. */ + timestamp: EpochTime + /** The start time of the bucket for which these results were calculated. */ + timestamp_string?: DateTime +} + +export interface MlOverallBucketJob { + job_id: Id + max_anomaly_score: double +} + +export interface MlPage { + /** Skips the specified number of items. */ + from?: integer + /** Specifies the maximum number of items to obtain. */ + size?: integer +} + +export interface MlPassThroughInferenceOptions { + /** The tokenization options */ + tokenization?: MlTokenizationConfigContainer + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ + results_field?: string + vocabulary?: MlVocabulary +} + +export interface MlPassThroughInferenceUpdateOptions { + /** The tokenization options to update when inferring */ + tokenization?: MlNlpTokenizationUpdateOptions + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ + results_field?: string +} + +export interface MlPerPartitionCategorization { + /** To enable this setting, you must also set the `partition_field_name` property to the same value in every detector that uses the keyword `mlcategory`. Otherwise, job creation fails. */ + enabled?: boolean + /** This setting can be set to true only if per-partition categorization is enabled. If true, both categorization and subsequent anomaly detection stops for partitions where the categorization status changes to warn. This setting makes it viable to have a job where it is expected that categorization works well for some partitions but not others; you do not pay the cost of bad categorization forever in the partitions where it works badly. */ + stop_on_warn?: boolean +} + +export type MlPredictedValue = ScalarValue | ScalarValue[] + +export interface MlQueryFeatureExtractor { + default_score?: float + feature_name: string + query: QueryDslQueryContainer +} + +export interface MlQuestionAnsweringInferenceOptions { + /** Specifies the number of top class predictions to return. Defaults to 0. */ + num_top_classes?: integer + /** The tokenization options to update when inferring */ + tokenization?: MlTokenizationConfigContainer + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ + results_field?: string + /** The maximum answer length to consider */ + max_answer_length?: integer +} + +export interface MlQuestionAnsweringInferenceUpdateOptions { + /** The question to answer given the inference context */ + question: string + /** Specifies the number of top class predictions to return. Defaults to 0. */ + num_top_classes?: integer + /** The tokenization options to update when inferring */ + tokenization?: MlNlpTokenizationUpdateOptions + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ + results_field?: string + /** The maximum answer length to consider for extraction */ + max_answer_length?: integer +} + +export interface MlRegressionInferenceOptions { + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ + results_field?: Field + /** Specifies the maximum number of feature importance values per document. */ + num_top_feature_importance_values?: integer +} + +export type MlRoutingState = 'failed' | 'started' | 'starting' | 'stopped' | 'stopping' + +export type MlRuleAction = 'skip_result' | 'skip_model_update' + +export interface MlRuleCondition { + /** Specifies the result property to which the condition applies. If your detector uses `lat_long`, `metric`, `rare`, or `freq_rare` functions, you can only specify conditions that apply to time. */ + applies_to: MlAppliesTo + /** Specifies the condition operator. The available options are greater than, greater than or equals, less than, and less than or equals. */ + operator: MlConditionOperator + /** The value that is compared against the `applies_to` field using the operator. */ + value: double +} + +export interface MlRunningStateSearchInterval { + /** The end time. */ + end?: Duration + /** The end time as an epoch in milliseconds. */ + end_ms: DurationValue + /** The start time. */ + start?: Duration + /** The start time as an epoch in milliseconds. */ + start_ms: DurationValue +} + +export type MlSnapshotUpgradeState = 'loading_old_state' | 'saving_new_state' | 'stopped' | 'failed' + +export interface MlTextClassificationInferenceOptions { + /** Specifies the number of top class predictions to return. Defaults to 0. */ + num_top_classes?: integer + /** The tokenization options */ + tokenization?: MlTokenizationConfigContainer + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ + results_field?: string + /** Classification labels to apply other than the stored labels. Must have the same deminsions as the default configured labels */ + classification_labels?: string[] + vocabulary?: MlVocabulary +} + +export interface MlTextClassificationInferenceUpdateOptions { + /** Specifies the number of top class predictions to return. Defaults to 0. */ + num_top_classes?: integer + /** The tokenization options to update when inferring */ + tokenization?: MlNlpTokenizationUpdateOptions + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ + results_field?: string + /** Classification labels to apply other than the stored labels. Must have the same deminsions as the default configured labels */ + classification_labels?: string[] +} + +export interface MlTextEmbeddingInferenceOptions { + /** The number of dimensions in the embedding output */ + embedding_size?: integer + /** The tokenization options */ + tokenization?: MlTokenizationConfigContainer + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ + results_field?: string + vocabulary?: MlVocabulary +} + +export interface MlTextEmbeddingInferenceUpdateOptions { + tokenization?: MlNlpTokenizationUpdateOptions + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ + results_field?: string +} + +export interface MlTextExpansionInferenceOptions { + /** The tokenization options */ + tokenization?: MlTokenizationConfigContainer + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ + results_field?: string + vocabulary?: MlVocabulary +} + +export interface MlTextExpansionInferenceUpdateOptions { + tokenization?: MlNlpTokenizationUpdateOptions + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ + results_field?: string +} + +export interface MlTimingStats { + /** Runtime of the analysis in milliseconds. */ + elapsed_time: DurationValue + /** Runtime of the latest iteration of the analysis in milliseconds. */ + iteration_time?: DurationValue +} + +export interface MlTokenizationConfigContainer { + /** Indicates BERT tokenization and its options */ + bert?: MlNlpBertTokenizationConfig + /** Indicates BERT Japanese tokenization and its options */ + bert_ja?: MlNlpBertTokenizationConfig + /** Indicates MPNET tokenization and its options */ + mpnet?: MlNlpBertTokenizationConfig + /** Indicates RoBERTa tokenization and its options */ + roberta?: MlNlpRobertaTokenizationConfig + xlm_roberta?: MlXlmRobertaTokenizationConfig +} + +export type MlTokenizationTruncate = 'first' | 'second' | 'none' + +export interface MlTopClassEntry { + class_name: string + class_probability: double + class_score: double +} + +export interface MlTotalFeatureImportance { + /** The feature for which this importance was calculated. */ + feature_name: Name + /** A collection of feature importance statistics related to the training data set for this particular feature. */ + importance: MlTotalFeatureImportanceStatistics[] + /** If the trained model is a classification model, feature importance statistics are gathered per target class value. */ + classes: MlTotalFeatureImportanceClass[] +} + +export interface MlTotalFeatureImportanceClass { + /** The target class value. Could be a string, boolean, or number. */ + class_name: Name + /** A collection of feature importance statistics related to the training data set for this particular feature. */ + importance: MlTotalFeatureImportanceStatistics[] +} + +export interface MlTotalFeatureImportanceStatistics { + /** The average magnitude of this feature across all the training data. This value is the average of the absolute values of the importance for this feature. */ + mean_magnitude: double + /** The maximum importance value across all the training data for this feature. */ + max: integer + /** The minimum importance value across all the training data for this feature. */ + min: integer +} + +export interface MlTrainedModelAssignment { + adaptive_allocations?: MlAdaptiveAllocationsSettings | null + /** The overall assignment state. */ + assignment_state: MlDeploymentAssignmentState + max_assigned_allocations?: integer + reason?: string + /** The allocation state for each node. */ + routing_table: Record + /** The timestamp when the deployment started. */ + start_time: DateTime + task_parameters: MlTrainedModelAssignmentTaskParameters +} + +export interface MlTrainedModelAssignmentRoutingStateAndReason { + /** The reason for the current state. It is usually populated only when the + * `routing_state` is `failed`. */ + reason?: string + /** The current routing state. */ + routing_state: MlRoutingState +} + +export interface MlTrainedModelAssignmentRoutingTable { + /** The reason for the current state. It is usually populated only when the + * `routing_state` is `failed`. */ + reason?: string + /** The current routing state. */ + routing_state: MlRoutingState + /** Current number of allocations. */ + current_allocations: integer + /** Target number of allocations. */ + target_allocations: integer +} + +export interface MlTrainedModelAssignmentTaskParameters { + /** The size of the trained model in bytes. */ + model_bytes: ByteSize + /** The unique identifier for the trained model. */ + model_id: Id + /** The unique identifier for the trained model deployment. */ + deployment_id: Id + /** The size of the trained model cache. */ + cache_size?: ByteSize + /** The total number of allocations this model is assigned across ML nodes. */ + number_of_allocations: integer + priority: MlTrainingPriority + per_deployment_memory_bytes: ByteSize + per_allocation_memory_bytes: ByteSize + /** Number of inference requests are allowed in the queue at a time. */ + queue_capacity: integer + /** Number of threads per allocation. */ + threads_per_allocation: integer +} + +export interface MlTrainedModelConfig { + /** Identifier for the trained model. */ + model_id: Id + /** The model type */ + model_type?: MlTrainedModelType + /** A comma delimited string of tags. A trained model can have many tags, or none. */ + tags: string[] + /** The Elasticsearch version number in which the trained model was created. */ + version?: VersionString + compressed_definition?: string + /** Information on the creator of the trained model. */ + created_by?: string + /** The time when the trained model was created. */ + create_time?: DateTime + /** Any field map described in the inference configuration takes precedence. */ + default_field_map?: Record + /** The free-text description of the trained model. */ + description?: string + /** The estimated heap usage in bytes to keep the trained model in memory. */ + estimated_heap_memory_usage_bytes?: integer + /** The estimated number of operations to use the trained model. */ + estimated_operations?: integer + /** True if the full model definition is present. */ + fully_defined?: boolean + /** The default configuration for inference. This can be either a regression, classification, or one of the many NLP focused configurations. It must match the underlying definition.trained_model's target_type. For pre-packaged models such as ELSER the config is not required. */ + inference_config?: MlInferenceConfigCreateContainer + /** The input field names for the model definition. */ + input: MlTrainedModelConfigInput + /** The license level of the trained model. */ + license_level?: string + /** An object containing metadata about the trained model. For example, models created by data frame analytics contain analysis_config and input objects. */ + metadata?: MlTrainedModelConfigMetadata + model_size_bytes?: ByteSize + model_package?: MlModelPackageConfig + location?: MlTrainedModelLocation + platform_architecture?: string + prefix_strings?: MlTrainedModelPrefixStrings +} + +export interface MlTrainedModelConfigInput { + /** An array of input field names for the model. */ + field_names: Field[] +} + +export interface MlTrainedModelConfigMetadata { + model_aliases?: string[] + /** An object that contains the baseline for feature importance values. For regression analysis, it is a single value. For classification analysis, there is a value for each class. */ + feature_importance_baseline?: Record + /** List of the available hyperparameters optimized during the fine_parameter_tuning phase as well as specified by the user. */ + hyperparameters?: MlHyperparameter[] + /** An array of the total feature importance for each feature used from the training data set. This array of objects is returned if data frame analytics trained the model and the request includes total_feature_importance in the include request parameter. */ + total_feature_importance?: MlTotalFeatureImportance[] +} + +export interface MlTrainedModelDeploymentAllocationStatus { + /** The current number of nodes where the model is allocated. */ + allocation_count: integer + /** The detailed allocation state related to the nodes. */ + state: MlDeploymentAllocationState + /** The desired number of nodes for model allocation. */ + target_allocation_count: integer +} + +export interface MlTrainedModelDeploymentNodesStats { + /** The average time for each inference call to complete on this node. */ + average_inference_time_ms?: DurationValue + average_inference_time_ms_last_minute?: DurationValue + /** The average time for each inference call to complete on this node, excluding cache */ + average_inference_time_ms_excluding_cache_hits?: DurationValue + /** The number of errors when evaluating the trained model. */ + error_count?: integer + /** The total number of inference calls made against this node for this model. */ + inference_count?: long + inference_cache_hit_count?: long + inference_cache_hit_count_last_minute?: long + /** The epoch time stamp of the last inference call for the model on this node. */ + last_access?: EpochTime + /** Information pertaining to the node. + * @remarks This property is not supported on Elastic Cloud Serverless. */ + node?: MlDiscoveryNode + /** The number of allocations assigned to this node. */ + number_of_allocations?: integer + /** The number of inference requests queued to be processed. */ + number_of_pending_requests?: integer + peak_throughput_per_minute: long + /** The number of inference requests that were not processed because the queue was full. */ + rejected_execution_count?: integer + /** The current routing state and reason for the current routing state for this allocation. */ + routing_state: MlTrainedModelAssignmentRoutingStateAndReason + /** The epoch timestamp when the allocation started. */ + start_time?: EpochTime + /** The number of threads used by each allocation during inference. */ + threads_per_allocation?: integer + throughput_last_minute: integer + /** The number of inference requests that timed out before being processed. */ + timeout_count?: integer +} + +export interface MlTrainedModelDeploymentStats { + adaptive_allocations?: MlAdaptiveAllocationsSettings + /** The detailed allocation status for the deployment. */ + allocation_status?: MlTrainedModelDeploymentAllocationStatus + cache_size?: ByteSize + /** The unique identifier for the trained model deployment. */ + deployment_id: Id + /** The sum of `error_count` for all nodes in the deployment. */ + error_count?: integer + /** The sum of `inference_count` for all nodes in the deployment. */ + inference_count?: integer + /** The unique identifier for the trained model. */ + model_id: Id + /** The deployment stats for each node that currently has the model allocated. + * In serverless, stats are reported for a single unnamed virtual node. */ + nodes: MlTrainedModelDeploymentNodesStats[] + /** The number of allocations requested. */ + number_of_allocations?: integer + peak_throughput_per_minute: long + priority: MlTrainingPriority + /** The number of inference requests that can be queued before new requests are rejected. */ + queue_capacity?: integer + /** The sum of `rejected_execution_count` for all nodes in the deployment. + * Individual nodes reject an inference request if the inference queue is full. + * The queue size is controlled by the `queue_capacity` setting in the start + * trained model deployment API. */ + rejected_execution_count?: integer + /** The reason for the current deployment state. Usually only populated when + * the model is not deployed to a node. */ + reason?: string + /** The epoch timestamp when the deployment started. */ + start_time: EpochTime + /** The overall state of the deployment. */ + state?: MlDeploymentAssignmentState + /** The number of threads used be each allocation during inference. */ + threads_per_allocation?: integer + /** The sum of `timeout_count` for all nodes in the deployment. */ + timeout_count?: integer +} + +export interface MlTrainedModelEntities { + class_name: string + class_probability: double + entity: string + start_pos: integer + end_pos: integer +} + +export interface MlTrainedModelInferenceClassImportance { + class_name: string + importance: double +} + +export interface MlTrainedModelInferenceFeatureImportance { + feature_name: string + importance?: double + classes?: MlTrainedModelInferenceClassImportance[] +} + +export interface MlTrainedModelInferenceStats { + /** The number of times the model was loaded for inference and was not retrieved from the cache. + * If this number is close to the `inference_count`, the cache is not being appropriately used. + * This can be solved by increasing the cache size or its time-to-live (TTL). + * Refer to general machine learning settings for the appropriate settings. */ + cache_miss_count: integer + /** The number of failures when using the model for inference. */ + failure_count: integer + /** The total number of times the model has been called for inference. + * This is across all inference contexts, including all pipelines. */ + inference_count: integer + /** The number of inference calls where all the training features for the model were missing. */ + missing_all_fields_count: integer + /** The time when the statistics were last updated. */ + timestamp: EpochTime +} + +export interface MlTrainedModelLocation { + index: MlTrainedModelLocationIndex +} + +export interface MlTrainedModelLocationIndex { + name: IndexName +} + +export interface MlTrainedModelPrefixStrings { + /** String prepended to input at ingest */ + ingest?: string + /** String prepended to input at search */ + search?: string +} + +export interface MlTrainedModelSizeStats { + /** The size of the model in bytes. */ + model_size_bytes: ByteSize + /** The amount of memory required to load the model in bytes. */ + required_native_memory_bytes: ByteSize +} + +export interface MlTrainedModelStats { + /** A collection of deployment stats, which is present when the models are deployed. */ + deployment_stats?: MlTrainedModelDeploymentStats + /** A collection of inference stats fields. */ + inference_stats?: MlTrainedModelInferenceStats + /** A collection of ingest stats for the model across all nodes. + * The values are summations of the individual node statistics. + * The format matches the ingest section in the nodes stats API. */ + ingest?: Record + /** The unique identifier of the trained model. */ + model_id: Id + /** A collection of model size stats. */ + model_size_stats: MlTrainedModelSizeStats + /** The number of ingest pipelines that currently refer to the model. */ + pipeline_count: integer +} + +export type MlTrainedModelType = 'tree_ensemble' | 'lang_ident' | 'pytorch' + +export type MlTrainingPriority = 'normal' | 'low' + +export interface MlTransformAuthorization { + /** If an API key was used for the most recent update to the transform, its name and identifier are listed in the response. */ + api_key?: MlApiKeyAuthorization + /** If a user ID was used for the most recent update to the transform, its roles at the time of the update are listed in the response. */ + roles?: string[] + /** If a service account was used for the most recent update to the transform, the account name is listed in the response. */ + service_account?: string +} + +export interface MlValidationLoss { + /** Validation loss values for every added decision tree during the forest growing procedure. */ + fold_values: string[] + /** The type of the loss metric. For example, binomial_logistic. */ + loss_type: string +} + +export interface MlVocabulary { + index: IndexName +} + +export interface MlXlmRobertaTokenizationConfig extends MlCommonTokenizationConfig { +} + +export interface MlZeroShotClassificationInferenceOptions { + /** The tokenization options to update when inferring */ + tokenization?: MlTokenizationConfigContainer + /** Hypothesis template used when tokenizing labels for prediction */ + hypothesis_template?: string + /** The zero shot classification labels indicating entailment, neutral, and contradiction + * Must contain exactly and only entailment, neutral, and contradiction */ + classification_labels: string[] + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ + results_field?: string + /** Indicates if more than one true label exists. */ + multi_label?: boolean + /** The labels to predict. */ + labels?: string[] +} + +export interface MlZeroShotClassificationInferenceUpdateOptions { + /** The tokenization options to update when inferring */ + tokenization?: MlNlpTokenizationUpdateOptions + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ + results_field?: string + /** Update the configured multi label option. Indicates if more than one true label exists. Defaults to the configured value. */ + multi_label?: boolean + /** The labels to predict. */ + labels: string[] +} + +export interface MlClearTrainedModelDeploymentCacheRequest extends RequestBase { + /** The unique identifier of the trained model. */ + model_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { model_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { model_id?: never } +} + +export interface MlClearTrainedModelDeploymentCacheResponse { + cleared: boolean +} + +export interface MlCloseJobRequest extends RequestBase { + /** Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. You can close multiple anomaly detection jobs in a single API request by using a group name, a comma-separated list of jobs, or a wildcard expression. You can close all jobs by using `_all` or by specifying `*` as the job identifier. */ + job_id: Id + /** Refer to the description for the `allow_no_match` query parameter. */ + allow_no_match?: boolean + /** Refer to the descriptiion for the `force` query parameter. */ + force?: boolean + /** Refer to the description for the `timeout` query parameter. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, allow_no_match?: never, force?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, allow_no_match?: never, force?: never, timeout?: never } +} + +export interface MlCloseJobResponse { + closed: boolean +} + +export interface MlDeleteCalendarRequest extends RequestBase { + /** A string that uniquely identifies a calendar. */ + calendar_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { calendar_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { calendar_id?: never } +} + +export type MlDeleteCalendarResponse = AcknowledgedResponseBase + +export interface MlDeleteCalendarEventRequest extends RequestBase { + /** A string that uniquely identifies a calendar. */ + calendar_id: Id + /** Identifier for the scheduled event. + * You can obtain this identifier by using the get calendar events API. */ + event_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { calendar_id?: never, event_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { calendar_id?: never, event_id?: never } +} + +export type MlDeleteCalendarEventResponse = AcknowledgedResponseBase + +export interface MlDeleteCalendarJobRequest extends RequestBase { + /** A string that uniquely identifies a calendar. */ + calendar_id: Id + /** An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a + * comma-separated list of jobs or groups. */ + job_id: Ids + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { calendar_id?: never, job_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { calendar_id?: never, job_id?: never } +} + +export interface MlDeleteCalendarJobResponse { + /** A string that uniquely identifies a calendar. */ + calendar_id: Id + /** A description of the calendar. */ + description?: string + /** A list of anomaly detection job identifiers or group names. */ + job_ids: Ids +} + +export interface MlDeleteDataFrameAnalyticsRequest extends RequestBase { + /** Identifier for the data frame analytics job. */ + id: Id + /** If `true`, it deletes a job that is not stopped; this method is quicker than stopping and deleting the job. */ + force?: boolean + /** The time to wait for the job to be deleted. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, force?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, force?: never, timeout?: never } +} + +export type MlDeleteDataFrameAnalyticsResponse = AcknowledgedResponseBase + +export interface MlDeleteDatafeedRequest extends RequestBase { + /** A numerical character string that uniquely identifies the datafeed. This + * identifier can contain lowercase alphanumeric characters (a-z and 0-9), + * hyphens, and underscores. It must start and end with alphanumeric + * characters. */ + datafeed_id: Id + /** Use to forcefully delete a started datafeed; this method is quicker than + * stopping and deleting the datafeed. */ + force?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { datafeed_id?: never, force?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { datafeed_id?: never, force?: never } +} + +export type MlDeleteDatafeedResponse = AcknowledgedResponseBase + +export interface MlDeleteExpiredDataRequest extends RequestBase { + /** Identifier for an anomaly detection job. It can be a job identifier, a + * group name, or a wildcard expression. */ + job_id?: Id + /** The desired requests per second for the deletion processes. The default + * behavior is no throttling. */ + requests_per_second?: float + /** How long can the underlying delete processes run until they are canceled. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, requests_per_second?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, requests_per_second?: never, timeout?: never } +} + +export interface MlDeleteExpiredDataResponse { + deleted: boolean +} + +export interface MlDeleteFilterRequest extends RequestBase { + /** A string that uniquely identifies a filter. */ + filter_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { filter_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { filter_id?: never } +} + +export type MlDeleteFilterResponse = AcknowledgedResponseBase + +export interface MlDeleteForecastRequest extends RequestBase { + /** Identifier for the anomaly detection job. */ + job_id: Id + /** A comma-separated list of forecast identifiers. If you do not specify + * this optional parameter or if you specify `_all` or `*` the API deletes + * all forecasts from the job. */ + forecast_id?: Id + /** Specifies whether an error occurs when there are no forecasts. In + * particular, if this parameter is set to `false` and there are no + * forecasts associated with the job, attempts to delete all forecasts + * return an error. */ + allow_no_forecasts?: boolean + /** Specifies the period of time to wait for the completion of the delete + * operation. When this period of time elapses, the API fails and returns an + * error. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, forecast_id?: never, allow_no_forecasts?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, forecast_id?: never, allow_no_forecasts?: never, timeout?: never } +} + +export type MlDeleteForecastResponse = AcknowledgedResponseBase + +export interface MlDeleteJobRequest extends RequestBase { + /** Identifier for the anomaly detection job. */ + job_id: Id + /** Use to forcefully delete an opened job; this method is quicker than + * closing and deleting the job. */ + force?: boolean + /** Specifies whether annotations that have been added by the + * user should be deleted along with any auto-generated annotations when the job is + * reset. */ + delete_user_annotations?: boolean + /** Specifies whether the request should return immediately or wait until the + * job deletion completes. */ + wait_for_completion?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, force?: never, delete_user_annotations?: never, wait_for_completion?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, force?: never, delete_user_annotations?: never, wait_for_completion?: never } +} + +export type MlDeleteJobResponse = AcknowledgedResponseBase + +export interface MlDeleteModelSnapshotRequest extends RequestBase { + /** Identifier for the anomaly detection job. */ + job_id: Id + /** Identifier for the model snapshot. */ + snapshot_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, snapshot_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, snapshot_id?: never } +} + +export type MlDeleteModelSnapshotResponse = AcknowledgedResponseBase + +export interface MlDeleteTrainedModelRequest extends RequestBase { + /** The unique identifier of the trained model. */ + model_id: Id + /** Forcefully deletes a trained model that is referenced by ingest pipelines or has a started deployment. */ + force?: boolean + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { model_id?: never, force?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { model_id?: never, force?: never, timeout?: never } +} + +export type MlDeleteTrainedModelResponse = AcknowledgedResponseBase + +export interface MlDeleteTrainedModelAliasRequest extends RequestBase { + /** The model alias to delete. */ + model_alias: Name + /** The trained model ID to which the model alias refers. */ + model_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { model_alias?: never, model_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { model_alias?: never, model_id?: never } +} + +export type MlDeleteTrainedModelAliasResponse = AcknowledgedResponseBase + +export interface MlEstimateModelMemoryRequest extends RequestBase { + /** For a list of the properties that you can specify in the + * `analysis_config` component of the body of this API. */ + analysis_config?: MlAnalysisConfig + /** Estimates of the highest cardinality in a single bucket that is observed + * for influencer fields over the time period that the job analyzes data. + * To produce a good answer, values must be provided for all influencer + * fields. Providing values for fields that are not listed as `influencers` + * has no effect on the estimation. */ + max_bucket_cardinality?: Record + /** Estimates of the cardinality that is observed for fields over the whole + * time period that the job analyzes data. To produce a good answer, values + * must be provided for fields referenced in the `by_field_name`, + * `over_field_name` and `partition_field_name` of any detectors. Providing + * values for other fields has no effect on the estimation. It can be + * omitted from the request if no detectors have a `by_field_name`, + * `over_field_name` or `partition_field_name`. */ + overall_cardinality?: Record + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { analysis_config?: never, max_bucket_cardinality?: never, overall_cardinality?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { analysis_config?: never, max_bucket_cardinality?: never, overall_cardinality?: never } +} + +export interface MlEstimateModelMemoryResponse { + model_memory_estimate: string +} + +export interface MlEvaluateDataFrameConfusionMatrixItem { + actual_class: Name + actual_class_doc_count: integer + predicted_classes: MlEvaluateDataFrameConfusionMatrixPrediction[] + other_predicted_class_doc_count: integer +} + +export interface MlEvaluateDataFrameConfusionMatrixPrediction { + predicted_class: Name + count: integer +} + +export interface MlEvaluateDataFrameConfusionMatrixThreshold { + /** True Positive */ + tp: integer + /** False Positive */ + fp: integer + /** True Negative */ + tn: integer + /** False Negative */ + fn: integer +} + +export interface MlEvaluateDataFrameDataframeClassificationSummary { + /** The AUC ROC (area under the curve of the receiver operating characteristic) score and optionally the curve. + * It is calculated for a specific class (provided as "class_name") treated as positive. */ + auc_roc?: MlEvaluateDataFrameDataframeEvaluationSummaryAucRoc + /** Accuracy of predictions (per-class and overall). */ + accuracy?: MlEvaluateDataFrameDataframeClassificationSummaryAccuracy + /** Multiclass confusion matrix. */ + multiclass_confusion_matrix?: MlEvaluateDataFrameDataframeClassificationSummaryMulticlassConfusionMatrix + /** Precision of predictions (per-class and average). */ + precision?: MlEvaluateDataFrameDataframeClassificationSummaryPrecision + /** Recall of predictions (per-class and average). */ + recall?: MlEvaluateDataFrameDataframeClassificationSummaryRecall +} + +export interface MlEvaluateDataFrameDataframeClassificationSummaryAccuracy { + classes: MlEvaluateDataFrameDataframeEvaluationClass[] + overall_accuracy: double +} + +export interface MlEvaluateDataFrameDataframeClassificationSummaryMulticlassConfusionMatrix { + confusion_matrix: MlEvaluateDataFrameConfusionMatrixItem[] + other_actual_class_count: integer +} + +export interface MlEvaluateDataFrameDataframeClassificationSummaryPrecision { + classes: MlEvaluateDataFrameDataframeEvaluationClass[] + avg_precision: double +} + +export interface MlEvaluateDataFrameDataframeClassificationSummaryRecall { + classes: MlEvaluateDataFrameDataframeEvaluationClass[] + avg_recall: double +} + +export interface MlEvaluateDataFrameDataframeEvaluationClass extends MlEvaluateDataFrameDataframeEvaluationValue { + class_name: Name +} + +export interface MlEvaluateDataFrameDataframeEvaluationSummaryAucRoc extends MlEvaluateDataFrameDataframeEvaluationValue { + curve?: MlEvaluateDataFrameDataframeEvaluationSummaryAucRocCurveItem[] +} + +export interface MlEvaluateDataFrameDataframeEvaluationSummaryAucRocCurveItem { + tpr: double + fpr: double + threshold: double +} + +export interface MlEvaluateDataFrameDataframeEvaluationValue { + value: double +} + +export interface MlEvaluateDataFrameDataframeOutlierDetectionSummary { + /** The AUC ROC (area under the curve of the receiver operating characteristic) score and optionally the curve. */ + auc_roc?: MlEvaluateDataFrameDataframeEvaluationSummaryAucRoc + /** Set the different thresholds of the outlier score at where the metric is calculated. */ + precision?: Record + /** Set the different thresholds of the outlier score at where the metric is calculated. */ + recall?: Record + /** Set the different thresholds of the outlier score at where the metrics (`tp` - true positive, `fp` - false positive, `tn` - true negative, `fn` - false negative) are calculated. */ + confusion_matrix?: Record +} + +export interface MlEvaluateDataFrameDataframeRegressionSummary { + /** Pseudo Huber loss function. */ + huber?: MlEvaluateDataFrameDataframeEvaluationValue + /** Average squared difference between the predicted values and the actual (`ground truth`) value. */ + mse?: MlEvaluateDataFrameDataframeEvaluationValue + /** Average squared difference between the logarithm of the predicted values and the logarithm of the actual (`ground truth`) value. */ + msle?: MlEvaluateDataFrameDataframeEvaluationValue + /** Proportion of the variance in the dependent variable that is predictable from the independent variables. */ + r_squared?: MlEvaluateDataFrameDataframeEvaluationValue +} + +export interface MlEvaluateDataFrameRequest extends RequestBase { + /** Defines the type of evaluation you want to perform. */ + evaluation: MlDataframeEvaluationContainer + /** Defines the `index` in which the evaluation will be performed. */ + index: IndexName + /** A query clause that retrieves a subset of data from the source index. */ + query?: QueryDslQueryContainer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { evaluation?: never, index?: never, query?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { evaluation?: never, index?: never, query?: never } +} + +export type MlEvaluateDataFrameResponse = MlEvaluateDataFrameResponseBody + +export interface MlEvaluateDataFrameResponseBody { + /** Evaluation results for a classification analysis. + * It outputs a prediction that identifies to which of the classes each document belongs. */ + classification?: MlEvaluateDataFrameDataframeClassificationSummary + /** Evaluation results for an outlier detection analysis. + * It outputs the probability that each document is an outlier. */ + outlier_detection?: MlEvaluateDataFrameDataframeOutlierDetectionSummary + /** Evaluation results for a regression analysis which outputs a prediction of values. */ + regression?: MlEvaluateDataFrameDataframeRegressionSummary +} + +export interface MlExplainDataFrameAnalyticsRequest extends RequestBase { + /** Identifier for the data frame analytics job. This identifier can contain + * lowercase alphanumeric characters (a-z and 0-9), hyphens, and + * underscores. It must start and end with alphanumeric characters. */ + id?: Id + /** The configuration of how to source the analysis data. It requires an + * index. Optionally, query and _source may be specified. */ + source?: MlDataframeAnalyticsSource + /** The destination configuration, consisting of index and optionally + * results_field (ml by default). */ + dest?: MlDataframeAnalyticsDestination + /** The analysis configuration, which contains the information necessary to + * perform one of the following types of analysis: classification, outlier + * detection, or regression. */ + analysis?: MlDataframeAnalysisContainer + /** A description of the job. */ + description?: string + /** The approximate maximum amount of memory resources that are permitted for + * analytical processing. If your `elasticsearch.yml` file contains an + * `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to + * create data frame analytics jobs that have `model_memory_limit` values + * greater than that setting. */ + model_memory_limit?: string + /** The maximum number of threads to be used by the analysis. Using more + * threads may decrease the time necessary to complete the analysis at the + * cost of using more CPU. Note that the process may use additional threads + * for operational functionality other than the analysis itself. */ + max_num_threads?: integer + /** Specify includes and/or excludes patterns to select which fields will be + * included in the analysis. The patterns specified in excludes are applied + * last, therefore excludes takes precedence. In other words, if the same + * field is specified in both includes and excludes, then the field will not + * be included in the analysis. */ + analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] + /** Specifies whether this job can start when there is insufficient machine + * learning node capacity for it to be immediately assigned to a node. */ + allow_lazy_start?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, source?: never, dest?: never, analysis?: never, description?: never, model_memory_limit?: never, max_num_threads?: never, analyzed_fields?: never, allow_lazy_start?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, source?: never, dest?: never, analysis?: never, description?: never, model_memory_limit?: never, max_num_threads?: never, analyzed_fields?: never, allow_lazy_start?: never } +} + +export interface MlExplainDataFrameAnalyticsResponse { + /** An array of objects that explain selection for each field, sorted by the field names. */ + field_selection: MlDataframeAnalyticsFieldSelection[] + /** An array of objects that explain selection for each field, sorted by the field names. */ + memory_estimation: MlDataframeAnalyticsMemoryEstimation +} + +export interface MlFlushJobRequest extends RequestBase { + /** Identifier for the anomaly detection job. */ + job_id: Id + /** Refer to the description for the `advance_time` query parameter. */ + advance_time?: DateTime + /** Refer to the description for the `calc_interim` query parameter. */ + calc_interim?: boolean + /** Refer to the description for the `end` query parameter. */ + end?: DateTime + /** Refer to the description for the `skip_time` query parameter. */ + skip_time?: DateTime + /** Refer to the description for the `start` query parameter. */ + start?: DateTime + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, advance_time?: never, calc_interim?: never, end?: never, skip_time?: never, start?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, advance_time?: never, calc_interim?: never, end?: never, skip_time?: never, start?: never } +} + +export interface MlFlushJobResponse { + flushed: boolean + /** Provides the timestamp (in milliseconds since the epoch) of the end of + * the last bucket that was processed. */ + last_finalized_bucket_end?: integer +} + +export interface MlForecastRequest extends RequestBase { + /** Identifier for the anomaly detection job. The job must be open when you + * create a forecast; otherwise, an error occurs. */ + job_id: Id + /** Refer to the description for the `duration` query parameter. */ + duration?: Duration + /** Refer to the description for the `expires_in` query parameter. */ + expires_in?: Duration + /** Refer to the description for the `max_model_memory` query parameter. */ + max_model_memory?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, duration?: never, expires_in?: never, max_model_memory?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, duration?: never, expires_in?: never, max_model_memory?: never } +} + +export interface MlForecastResponse { + acknowledged: boolean + forecast_id: Id +} + +export interface MlGetBucketsRequest extends RequestBase { + /** Identifier for the anomaly detection job. */ + job_id: Id + /** The timestamp of a single bucket result. If you do not specify this + * parameter, the API returns information about all buckets. */ + timestamp?: DateTime + /** Skips the specified number of buckets. */ + from?: integer + /** Specifies the maximum number of buckets to obtain. */ + size?: integer + /** Refer to the description for the `anomaly_score` query parameter. */ + anomaly_score?: double + /** Refer to the description for the `desc` query parameter. */ + desc?: boolean + /** Refer to the description for the `end` query parameter. */ + end?: DateTime + /** Refer to the description for the `exclude_interim` query parameter. */ + exclude_interim?: boolean + /** Refer to the description for the `expand` query parameter. */ + expand?: boolean + page?: MlPage + /** Refer to the desription for the `sort` query parameter. */ + sort?: Field + /** Refer to the description for the `start` query parameter. */ + start?: DateTime + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, timestamp?: never, from?: never, size?: never, anomaly_score?: never, desc?: never, end?: never, exclude_interim?: never, expand?: never, page?: never, sort?: never, start?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, timestamp?: never, from?: never, size?: never, anomaly_score?: never, desc?: never, end?: never, exclude_interim?: never, expand?: never, page?: never, sort?: never, start?: never } +} + +export interface MlGetBucketsResponse { + buckets: MlBucketSummary[] + count: long +} + +export interface MlGetCalendarEventsRequest extends RequestBase { + /** A string that uniquely identifies a calendar. You can get information for multiple calendars by using a comma-separated list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier. */ + calendar_id: Id + /** Specifies to get events with timestamps earlier than this time. */ + end?: DateTime + /** Skips the specified number of events. */ + from?: integer + /** Specifies to get events for a specific anomaly detection job identifier or job group. It must be used with a calendar identifier of `_all` or `*`. */ + job_id?: Id + /** Specifies the maximum number of events to obtain. */ + size?: integer + /** Specifies to get events with timestamps after this time. */ + start?: DateTime + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { calendar_id?: never, end?: never, from?: never, job_id?: never, size?: never, start?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { calendar_id?: never, end?: never, from?: never, job_id?: never, size?: never, start?: never } +} + +export interface MlGetCalendarEventsResponse { + count: long + events: MlCalendarEvent[] +} + +export interface MlGetCalendarsCalendar { + /** A string that uniquely identifies a calendar. */ + calendar_id: Id + /** A description of the calendar. */ + description?: string + /** An array of anomaly detection job identifiers. */ + job_ids: Id[] +} + +export interface MlGetCalendarsRequest extends RequestBase { + /** A string that uniquely identifies a calendar. You can get information for multiple calendars by using a comma-separated list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier. */ + calendar_id?: Id + /** Skips the specified number of calendars. This parameter is supported only when you omit the calendar identifier. */ + from?: integer + /** Specifies the maximum number of calendars to obtain. This parameter is supported only when you omit the calendar identifier. */ + size?: integer + /** This object is supported only when you omit the calendar identifier. */ + page?: MlPage + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { calendar_id?: never, from?: never, size?: never, page?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { calendar_id?: never, from?: never, size?: never, page?: never } +} + +export interface MlGetCalendarsResponse { + calendars: MlGetCalendarsCalendar[] + count: long +} + +export interface MlGetCategoriesRequest extends RequestBase { + /** Identifier for the anomaly detection job. */ + job_id: Id + /** Identifier for the category, which is unique in the job. If you specify + * neither the category ID nor the partition_field_value, the API returns + * information about all categories. If you specify only the + * partition_field_value, it returns information about all categories for + * the specified partition. */ + category_id?: CategoryId + /** Skips the specified number of categories. */ + from?: integer + /** Only return categories for the specified partition. */ + partition_field_value?: string + /** Specifies the maximum number of categories to obtain. */ + size?: integer + /** Configures pagination. + * This parameter has the `from` and `size` properties. */ + page?: MlPage + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, category_id?: never, from?: never, partition_field_value?: never, size?: never, page?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, category_id?: never, from?: never, partition_field_value?: never, size?: never, page?: never } +} + +export interface MlGetCategoriesResponse { + categories: MlCategory[] + count: long +} + +export interface MlGetDataFrameAnalyticsRequest extends RequestBase { + /** Identifier for the data frame analytics job. If you do not specify this + * option, the API returns information for the first hundred data frame + * analytics jobs. */ + id?: Id + /** Specifies what to do when the request: + * + * 1. Contains wildcard expressions and there are no data frame analytics + * jobs that match. + * 2. Contains the `_all` string or no identifiers and there are no matches. + * 3. Contains wildcard expressions and there are only partial matches. + * + * The default value returns an empty data_frame_analytics array when there + * are no matches and the subset of results when there are partial matches. + * If this parameter is `false`, the request returns a 404 status code when + * there are no matches or only partial matches. */ + allow_no_match?: boolean + /** Skips the specified number of data frame analytics jobs. */ + from?: integer + /** Specifies the maximum number of data frame analytics jobs to obtain. */ + size?: integer + /** Indicates if certain fields should be removed from the configuration on + * retrieval. This allows the configuration to be in an acceptable format to + * be retrieved and then added to another cluster. */ + exclude_generated?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, allow_no_match?: never, from?: never, size?: never, exclude_generated?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, allow_no_match?: never, from?: never, size?: never, exclude_generated?: never } +} + +export interface MlGetDataFrameAnalyticsResponse { + count: integer + /** An array of data frame analytics job resources, which are sorted by the id value in ascending order. */ + data_frame_analytics: MlDataframeAnalyticsSummary[] +} + +export interface MlGetDataFrameAnalyticsStatsRequest extends RequestBase { + /** Identifier for the data frame analytics job. If you do not specify this + * option, the API returns information for the first hundred data frame + * analytics jobs. */ + id?: Id + /** Specifies what to do when the request: + * + * 1. Contains wildcard expressions and there are no data frame analytics + * jobs that match. + * 2. Contains the `_all` string or no identifiers and there are no matches. + * 3. Contains wildcard expressions and there are only partial matches. + * + * The default value returns an empty data_frame_analytics array when there + * are no matches and the subset of results when there are partial matches. + * If this parameter is `false`, the request returns a 404 status code when + * there are no matches or only partial matches. */ + allow_no_match?: boolean + /** Skips the specified number of data frame analytics jobs. */ + from?: integer + /** Specifies the maximum number of data frame analytics jobs to obtain. */ + size?: integer + /** Defines whether the stats response should be verbose. */ + verbose?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, allow_no_match?: never, from?: never, size?: never, verbose?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, allow_no_match?: never, from?: never, size?: never, verbose?: never } +} + +export interface MlGetDataFrameAnalyticsStatsResponse { + count: long + /** An array of objects that contain usage information for data frame analytics jobs, which are sorted by the id value in ascending order. */ + data_frame_analytics: MlDataframeAnalytics[] +} + +export interface MlGetDatafeedStatsRequest extends RequestBase { + /** Identifier for the datafeed. It can be a datafeed identifier or a + * wildcard expression. If you do not specify one of these options, the API + * returns information about all datafeeds. */ + datafeed_id?: Ids + /** Specifies what to do when the request: + * + * 1. Contains wildcard expressions and there are no datafeeds that match. + * 2. Contains the `_all` string or no identifiers and there are no matches. + * 3. Contains wildcard expressions and there are only partial matches. + * + * The default value is `true`, which returns an empty `datafeeds` array + * when there are no matches and the subset of results when there are + * partial matches. If this parameter is `false`, the request returns a + * `404` status code when there are no matches or only partial matches. */ + allow_no_match?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { datafeed_id?: never, allow_no_match?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { datafeed_id?: never, allow_no_match?: never } +} + +export interface MlGetDatafeedStatsResponse { + count: long + datafeeds: MlDatafeedStats[] +} + +export interface MlGetDatafeedsRequest extends RequestBase { + /** Identifier for the datafeed. It can be a datafeed identifier or a + * wildcard expression. If you do not specify one of these options, the API + * returns information about all datafeeds. */ + datafeed_id?: Ids + /** Specifies what to do when the request: + * + * 1. Contains wildcard expressions and there are no datafeeds that match. + * 2. Contains the `_all` string or no identifiers and there are no matches. + * 3. Contains wildcard expressions and there are only partial matches. + * + * The default value is `true`, which returns an empty `datafeeds` array + * when there are no matches and the subset of results when there are + * partial matches. If this parameter is `false`, the request returns a + * `404` status code when there are no matches or only partial matches. */ + allow_no_match?: boolean + /** Indicates if certain fields should be removed from the configuration on + * retrieval. This allows the configuration to be in an acceptable format to + * be retrieved and then added to another cluster. */ + exclude_generated?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { datafeed_id?: never, allow_no_match?: never, exclude_generated?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { datafeed_id?: never, allow_no_match?: never, exclude_generated?: never } +} + +export interface MlGetDatafeedsResponse { + count: long + datafeeds: MlDatafeed[] +} + +export interface MlGetFiltersRequest extends RequestBase { + /** A string that uniquely identifies a filter. */ + filter_id?: Ids + /** Skips the specified number of filters. */ + from?: integer + /** Specifies the maximum number of filters to obtain. */ + size?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { filter_id?: never, from?: never, size?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { filter_id?: never, from?: never, size?: never } +} + +export interface MlGetFiltersResponse { + count: long + filters: MlFilter[] +} + +export interface MlGetInfluencersRequest extends RequestBase { + /** Identifier for the anomaly detection job. */ + job_id: Id + /** If true, the results are sorted in descending order. */ + desc?: boolean + /** Returns influencers with timestamps earlier than this time. + * The default value means it is unset and results are not limited to + * specific timestamps. */ + end?: DateTime + /** If true, the output excludes interim results. By default, interim results + * are included. */ + exclude_interim?: boolean + /** Returns influencers with anomaly scores greater than or equal to this + * value. */ + influencer_score?: double + /** Skips the specified number of influencers. */ + from?: integer + /** Specifies the maximum number of influencers to obtain. */ + size?: integer + /** Specifies the sort field for the requested influencers. By default, the + * influencers are sorted by the `influencer_score` value. */ + sort?: Field + /** Returns influencers with timestamps after this time. The default value + * means it is unset and results are not limited to specific timestamps. */ + start?: DateTime + /** Configures pagination. + * This parameter has the `from` and `size` properties. */ + page?: MlPage + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, desc?: never, end?: never, exclude_interim?: never, influencer_score?: never, from?: never, size?: never, sort?: never, start?: never, page?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, desc?: never, end?: never, exclude_interim?: never, influencer_score?: never, from?: never, size?: never, sort?: never, start?: never, page?: never } +} + +export interface MlGetInfluencersResponse { + count: long + /** Array of influencer objects */ + influencers: MlInfluencer[] +} + +export interface MlGetJobStatsRequest extends RequestBase { + /** Identifier for the anomaly detection job. It can be a job identifier, a + * group name, a comma-separated list of jobs, or a wildcard expression. If + * you do not specify one of these options, the API returns information for + * all anomaly detection jobs. */ + job_id?: Id + /** Specifies what to do when the request: + * + * 1. Contains wildcard expressions and there are no jobs that match. + * 2. Contains the _all string or no identifiers and there are no matches. + * 3. Contains wildcard expressions and there are only partial matches. + * + * If `true`, the API returns an empty `jobs` array when + * there are no matches and the subset of results when there are partial + * matches. If `false`, the API returns a `404` status + * code when there are no matches or only partial matches. */ + allow_no_match?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, allow_no_match?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, allow_no_match?: never } +} + +export interface MlGetJobStatsResponse { + count: long + jobs: MlJobStats[] +} + +export interface MlGetJobsRequest extends RequestBase { + /** Identifier for the anomaly detection job. It can be a job identifier, a + * group name, or a wildcard expression. If you do not specify one of these + * options, the API returns information for all anomaly detection jobs. */ + job_id?: Ids + /** Specifies what to do when the request: + * + * 1. Contains wildcard expressions and there are no jobs that match. + * 2. Contains the _all string or no identifiers and there are no matches. + * 3. Contains wildcard expressions and there are only partial matches. + * + * The default value is `true`, which returns an empty `jobs` array when + * there are no matches and the subset of results when there are partial + * matches. If this parameter is `false`, the request returns a `404` status + * code when there are no matches or only partial matches. */ + allow_no_match?: boolean + /** Indicates if certain fields should be removed from the configuration on + * retrieval. This allows the configuration to be in an acceptable format to + * be retrieved and then added to another cluster. */ + exclude_generated?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, allow_no_match?: never, exclude_generated?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, allow_no_match?: never, exclude_generated?: never } +} + +export interface MlGetJobsResponse { + count: long + jobs: MlJob[] +} + +export interface MlGetMemoryStatsJvmStats { + /** Maximum amount of memory available for use by the heap. */ + heap_max?: ByteSize + /** Maximum amount of memory, in bytes, available for use by the heap. */ + heap_max_in_bytes: integer + /** Amount of Java heap currently being used for caching inference models. */ + java_inference?: ByteSize + /** Amount of Java heap, in bytes, currently being used for caching inference models. */ + java_inference_in_bytes: integer + /** Maximum amount of Java heap to be used for caching inference models. */ + java_inference_max?: ByteSize + /** Maximum amount of Java heap, in bytes, to be used for caching inference models. */ + java_inference_max_in_bytes: integer +} + +export interface MlGetMemoryStatsMemMlStats { + /** Amount of native memory set aside for anomaly detection jobs. */ + anomaly_detectors?: ByteSize + /** Amount of native memory, in bytes, set aside for anomaly detection jobs. */ + anomaly_detectors_in_bytes: integer + /** Amount of native memory set aside for data frame analytics jobs. */ + data_frame_analytics?: ByteSize + /** Amount of native memory, in bytes, set aside for data frame analytics jobs. */ + data_frame_analytics_in_bytes: integer + /** Maximum amount of native memory (separate to the JVM heap) that may be used by machine learning native processes. */ + max?: ByteSize + /** Maximum amount of native memory (separate to the JVM heap), in bytes, that may be used by machine learning native processes. */ + max_in_bytes: integer + /** Amount of native memory set aside for loading machine learning native code shared libraries. */ + native_code_overhead?: ByteSize + /** Amount of native memory, in bytes, set aside for loading machine learning native code shared libraries. */ + native_code_overhead_in_bytes: integer + /** Amount of native memory set aside for trained models that have a PyTorch model_type. */ + native_inference?: ByteSize + /** Amount of native memory, in bytes, set aside for trained models that have a PyTorch model_type. */ + native_inference_in_bytes: integer +} + +export interface MlGetMemoryStatsMemStats { + /** If the amount of physical memory has been overridden using the es.total_memory_bytes system property + * then this reports the overridden value. Otherwise it reports the same value as total. */ + adjusted_total?: ByteSize + /** If the amount of physical memory has been overridden using the `es.total_memory_bytes` system property + * then this reports the overridden value in bytes. Otherwise it reports the same value as `total_in_bytes`. */ + adjusted_total_in_bytes: integer + /** Total amount of physical memory. */ + total?: ByteSize + /** Total amount of physical memory in bytes. */ + total_in_bytes: integer + /** Contains statistics about machine learning use of native memory on the node. */ + ml: MlGetMemoryStatsMemMlStats +} + +export interface MlGetMemoryStatsMemory { + attributes: Record + /** Contains Java Virtual Machine (JVM) statistics for the node. */ + jvm: MlGetMemoryStatsJvmStats + /** Contains statistics about memory usage for the node. */ + mem: MlGetMemoryStatsMemStats + /** Human-readable identifier for the node. Based on the Node name setting setting. */ + name: Name + /** Roles assigned to the node. */ + roles: string[] + /** The host and port where transport HTTP connections are accepted. */ + transport_address: TransportAddress + ephemeral_id: Id +} + +export interface MlGetMemoryStatsRequest extends RequestBase { + /** The names of particular nodes in the cluster to target. For example, `nodeId1,nodeId2` or + * `ml:true` */ + node_id?: Id + /** Period to wait for a connection to the master node. If no response is received before the timeout + * expires, the request fails and returns an error. */ + master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request + * fails and returns an error. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { node_id?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { node_id?: never, master_timeout?: never, timeout?: never } +} + +export interface MlGetMemoryStatsResponse { + _nodes: NodeStatistics + cluster_name: Name + nodes: Record +} + +export interface MlGetModelSnapshotUpgradeStatsRequest extends RequestBase { + /** Identifier for the anomaly detection job. */ + job_id: Id + /** A numerical character string that uniquely identifies the model snapshot. You can get information for multiple + * snapshots by using a comma-separated list or a wildcard expression. You can get all snapshots by using `_all`, + * by specifying `*` as the snapshot ID, or by omitting the snapshot ID. */ + snapshot_id: Id + /** Specifies what to do when the request: + * + * - Contains wildcard expressions and there are no jobs that match. + * - Contains the _all string or no identifiers and there are no matches. + * - Contains wildcard expressions and there are only partial matches. + * + * The default value is true, which returns an empty jobs array when there are no matches and the subset of results + * when there are partial matches. If this parameter is false, the request returns a 404 status code when there are + * no matches or only partial matches. */ + allow_no_match?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, snapshot_id?: never, allow_no_match?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, snapshot_id?: never, allow_no_match?: never } +} + +export interface MlGetModelSnapshotUpgradeStatsResponse { + count: long + model_snapshot_upgrades: MlModelSnapshotUpgrade[] +} + +export interface MlGetModelSnapshotsRequest extends RequestBase { + /** Identifier for the anomaly detection job. */ + job_id: Id + /** A numerical character string that uniquely identifies the model snapshot. You can get information for multiple + * snapshots by using a comma-separated list or a wildcard expression. You can get all snapshots by using `_all`, + * by specifying `*` as the snapshot ID, or by omitting the snapshot ID. */ + snapshot_id?: Id + /** Skips the specified number of snapshots. */ + from?: integer + /** Specifies the maximum number of snapshots to obtain. */ + size?: integer + /** Refer to the description for the `desc` query parameter. */ + desc?: boolean + /** Refer to the description for the `end` query parameter. */ + end?: DateTime + page?: MlPage + /** Refer to the description for the `sort` query parameter. */ + sort?: Field + /** Refer to the description for the `start` query parameter. */ + start?: DateTime + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, snapshot_id?: never, from?: never, size?: never, desc?: never, end?: never, page?: never, sort?: never, start?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, snapshot_id?: never, from?: never, size?: never, desc?: never, end?: never, page?: never, sort?: never, start?: never } +} + +export interface MlGetModelSnapshotsResponse { + count: long + model_snapshots: MlModelSnapshot[] +} + +export interface MlGetOverallBucketsRequest extends RequestBase { + /** Identifier for the anomaly detection job. It can be a job identifier, a + * group name, a comma-separated list of jobs or groups, or a wildcard + * expression. + * + * You can summarize the bucket results for all anomaly detection jobs by + * using `_all` or by specifying `*` as the ``. */ + job_id: Id + /** Refer to the description for the `allow_no_match` query parameter. */ + allow_no_match?: boolean + /** Refer to the description for the `bucket_span` query parameter. */ + bucket_span?: Duration + /** Refer to the description for the `end` query parameter. */ + end?: DateTime + /** Refer to the description for the `exclude_interim` query parameter. */ + exclude_interim?: boolean + /** Refer to the description for the `overall_score` query parameter. */ + overall_score?: double + /** Refer to the description for the `start` query parameter. */ + start?: DateTime + /** Refer to the description for the `top_n` query parameter. */ + top_n?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, allow_no_match?: never, bucket_span?: never, end?: never, exclude_interim?: never, overall_score?: never, start?: never, top_n?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, allow_no_match?: never, bucket_span?: never, end?: never, exclude_interim?: never, overall_score?: never, start?: never, top_n?: never } +} + +export interface MlGetOverallBucketsResponse { + count: long + /** Array of overall bucket objects */ + overall_buckets: MlOverallBucket[] +} + +export interface MlGetRecordsRequest extends RequestBase { + /** Identifier for the anomaly detection job. */ + job_id: Id + /** Skips the specified number of records. */ + from?: integer + /** Specifies the maximum number of records to obtain. */ + size?: integer + /** Refer to the description for the `desc` query parameter. */ + desc?: boolean + /** Refer to the description for the `end` query parameter. */ + end?: DateTime + /** Refer to the description for the `exclude_interim` query parameter. */ + exclude_interim?: boolean + page?: MlPage + /** Refer to the description for the `record_score` query parameter. */ + record_score?: double + /** Refer to the description for the `sort` query parameter. */ + sort?: Field + /** Refer to the description for the `start` query parameter. */ + start?: DateTime + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, from?: never, size?: never, desc?: never, end?: never, exclude_interim?: never, page?: never, record_score?: never, sort?: never, start?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, from?: never, size?: never, desc?: never, end?: never, exclude_interim?: never, page?: never, record_score?: never, sort?: never, start?: never } +} + +export interface MlGetRecordsResponse { + count: long + records: MlAnomaly[] +} + +export interface MlGetTrainedModelsRequest extends RequestBase { + /** The unique identifier of the trained model or a model alias. + * + * You can get information for multiple trained models in a single API + * request by using a comma-separated list of model IDs or a wildcard + * expression. */ + model_id?: Ids + /** Specifies what to do when the request: + * + * - Contains wildcard expressions and there are no models that match. + * - Contains the _all string or no identifiers and there are no matches. + * - Contains wildcard expressions and there are only partial matches. + * + * If true, it returns an empty array when there are no matches and the + * subset of results when there are partial matches. */ + allow_no_match?: boolean + /** Specifies whether the included model definition should be returned as a + * JSON map (true) or in a custom compressed format (false). */ + decompress_definition?: boolean + /** Indicates if certain fields should be removed from the configuration on + * retrieval. This allows the configuration to be in an acceptable format to + * be retrieved and then added to another cluster. */ + exclude_generated?: boolean + /** Skips the specified number of models. */ + from?: integer + /** A comma delimited string of optional fields to include in the response + * body. */ + include?: MlInclude + /** Specifies the maximum number of models to obtain. */ + size?: integer + /** A comma delimited string of tags. A trained model can have many tags, or + * none. When supplied, only trained models that contain all the supplied + * tags are returned. */ + tags?: string | string[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { model_id?: never, allow_no_match?: never, decompress_definition?: never, exclude_generated?: never, from?: never, include?: never, size?: never, tags?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { model_id?: never, allow_no_match?: never, decompress_definition?: never, exclude_generated?: never, from?: never, include?: never, size?: never, tags?: never } +} + +export interface MlGetTrainedModelsResponse { + count: integer + /** An array of trained model resources, which are sorted by the model_id value in ascending order. */ + trained_model_configs: MlTrainedModelConfig[] +} + +export interface MlGetTrainedModelsStatsRequest extends RequestBase { + /** The unique identifier of the trained model or a model alias. It can be a + * comma-separated list or a wildcard expression. */ + model_id?: Ids + /** Specifies what to do when the request: + * + * - Contains wildcard expressions and there are no models that match. + * - Contains the _all string or no identifiers and there are no matches. + * - Contains wildcard expressions and there are only partial matches. + * + * If true, it returns an empty array when there are no matches and the + * subset of results when there are partial matches. */ + allow_no_match?: boolean + /** Skips the specified number of models. */ + from?: integer + /** Specifies the maximum number of models to obtain. */ + size?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { model_id?: never, allow_no_match?: never, from?: never, size?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { model_id?: never, allow_no_match?: never, from?: never, size?: never } +} + +export interface MlGetTrainedModelsStatsResponse { + /** The total number of trained model statistics that matched the requested ID patterns. Could be higher than the number of items in the trained_model_stats array as the size of the array is restricted by the supplied size parameter. */ + count: integer + /** An array of trained model statistics, which are sorted by the model_id value in ascending order. */ + trained_model_stats: MlTrainedModelStats[] +} + +export interface MlInferTrainedModelRequest extends RequestBase { + /** The unique identifier of the trained model. */ + model_id: Id + /** Controls the amount of time to wait for inference results. */ + timeout?: Duration + /** An array of objects to pass to the model for inference. The objects should contain a fields matching your + * configured trained model input. Typically, for NLP models, the field name is `text_field`. + * Currently, for NLP models, only a single value is allowed. */ + docs: Record[] + /** The inference configuration updates to apply on the API call */ + inference_config?: MlInferenceConfigUpdateContainer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { model_id?: never, timeout?: never, docs?: never, inference_config?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { model_id?: never, timeout?: never, docs?: never, inference_config?: never } +} + +export interface MlInferTrainedModelResponse { + inference_results: MlInferenceResponseResult[] +} + +export interface MlInfoAnomalyDetectors { + categorization_analyzer: MlCategorizationAnalyzer + categorization_examples_limit: integer + model_memory_limit: string + model_snapshot_retention_days: integer + daily_model_snapshot_retention_after_days: integer +} + +export interface MlInfoDatafeeds { + scroll_size: integer +} + +export interface MlInfoDefaults { + anomaly_detectors: MlInfoAnomalyDetectors + datafeeds: MlInfoDatafeeds +} + +export interface MlInfoLimits { + max_single_ml_node_processors?: integer + total_ml_processors?: integer + max_model_memory_limit?: ByteSize + effective_max_model_memory_limit?: ByteSize + total_ml_memory: ByteSize +} + +export interface MlInfoNativeCode { + build_hash: string + version: VersionString +} + +export interface MlInfoRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } +} + +export interface MlInfoResponse { + defaults: MlInfoDefaults + limits: MlInfoLimits + upgrade_mode: boolean + native_code: MlInfoNativeCode +} + +export interface MlOpenJobRequest extends RequestBase { + /** Identifier for the anomaly detection job. */ + job_id: Id + /** Refer to the description for the `timeout` query parameter. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, timeout?: never } +} + +export interface MlOpenJobResponse { + opened: boolean + /** The ID of the node that the job was started on. In serverless this will be the "serverless". + * If the job is allowed to open lazily and has not yet been assigned to a node, this value is an empty string. */ + node: NodeId +} + +export interface MlPostCalendarEventsRequest extends RequestBase { + /** A string that uniquely identifies a calendar. */ + calendar_id: Id + /** A list of one of more scheduled events. The event’s start and end times can be specified as integer milliseconds since the epoch or as a string in ISO 8601 format. */ + events: MlCalendarEvent[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { calendar_id?: never, events?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { calendar_id?: never, events?: never } +} + +export interface MlPostCalendarEventsResponse { + events: MlCalendarEvent[] +} + +export interface MlPostDataRequest extends RequestBase { + /** Identifier for the anomaly detection job. The job must have a state of open to receive and process the data. */ + job_id: Id + /** Specifies the end of the bucket resetting range. */ + reset_end?: DateTime + /** Specifies the start of the bucket resetting range. */ + reset_start?: DateTime + data?: TData[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, reset_end?: never, reset_start?: never, data?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, reset_end?: never, reset_start?: never, data?: never } +} + +export interface MlPostDataResponse { + job_id: Id + processed_record_count: long + processed_field_count: long + input_bytes: long + input_field_count: long + invalid_date_count: long + missing_field_count: long + out_of_order_timestamp_count: long + empty_bucket_count: long + sparse_bucket_count: long + bucket_count: long + earliest_record_timestamp?: EpochTime + latest_record_timestamp?: EpochTime + last_data_time?: EpochTime + latest_empty_bucket_timestamp?: EpochTime + latest_sparse_bucket_timestamp?: EpochTime + input_record_count: long + log_time?: EpochTime +} + +export interface MlPreviewDataFrameAnalyticsDataframePreviewConfig { + source: MlDataframeAnalyticsSource + analysis: MlDataframeAnalysisContainer + model_memory_limit?: string + max_num_threads?: integer + analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] +} + +export interface MlPreviewDataFrameAnalyticsRequest extends RequestBase { + /** Identifier for the data frame analytics job. */ + id?: Id + /** A data frame analytics config as described in create data frame analytics + * jobs. Note that `id` and `dest` don’t need to be provided in the context of + * this API. */ + config?: MlPreviewDataFrameAnalyticsDataframePreviewConfig + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, config?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, config?: never } +} + +export interface MlPreviewDataFrameAnalyticsResponse { + /** An array of objects that contain feature name and value pairs. The features have been processed and indicate what will be sent to the model for training. */ + feature_values: Record[] +} + +export interface MlPreviewDatafeedRequest extends RequestBase { + /** A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase + * alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric + * characters. NOTE: If you use this path parameter, you cannot provide datafeed or anomaly detection job + * configuration details in the request body. */ + datafeed_id?: Id + /** The start time from where the datafeed preview should begin */ + start?: DateTime + /** The end time when the datafeed preview should stop */ + end?: DateTime + /** The datafeed definition to preview. */ + datafeed_config?: MlDatafeedConfig + /** The configuration details for the anomaly detection job that is associated with the datafeed. If the + * `datafeed_config` object does not include a `job_id` that references an existing anomaly detection job, you must + * supply this `job_config` object. If you include both a `job_id` and a `job_config`, the latter information is + * used. You cannot specify a `job_config` object unless you also supply a `datafeed_config` object. */ + job_config?: MlJobConfig + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { datafeed_id?: never, start?: never, end?: never, datafeed_config?: never, job_config?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { datafeed_id?: never, start?: never, end?: never, datafeed_config?: never, job_config?: never } +} + +export type MlPreviewDatafeedResponse = TDocument[] + +export interface MlPutCalendarRequest extends RequestBase { + /** A string that uniquely identifies a calendar. */ + calendar_id: Id + /** An array of anomaly detection job identifiers. */ + job_ids?: Id[] + /** A description of the calendar. */ + description?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { calendar_id?: never, job_ids?: never, description?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { calendar_id?: never, job_ids?: never, description?: never } +} + +export interface MlPutCalendarResponse { + /** A string that uniquely identifies a calendar. */ + calendar_id: Id + /** A description of the calendar. */ + description?: string + /** A list of anomaly detection job identifiers or group names. */ + job_ids: Ids +} + +export interface MlPutCalendarJobRequest extends RequestBase { + /** A string that uniquely identifies a calendar. */ + calendar_id: Id + /** An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a comma-separated list of jobs or groups. */ + job_id: Ids + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { calendar_id?: never, job_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { calendar_id?: never, job_id?: never } +} + +export interface MlPutCalendarJobResponse { + /** A string that uniquely identifies a calendar. */ + calendar_id: Id + /** A description of the calendar. */ + description?: string + /** A list of anomaly detection job identifiers or group names. */ + job_ids: Ids +} + +export interface MlPutDataFrameAnalyticsRequest extends RequestBase { + /** Identifier for the data frame analytics job. This identifier can contain + * lowercase alphanumeric characters (a-z and 0-9), hyphens, and + * underscores. It must start and end with alphanumeric characters. */ + id: Id + /** Specifies whether this job can start when there is insufficient machine + * learning node capacity for it to be immediately assigned to a node. If + * set to `false` and a machine learning node with capacity to run the job + * cannot be immediately found, the API returns an error. If set to `true`, + * the API does not return an error; the job waits in the `starting` state + * until sufficient machine learning node capacity is available. This + * behavior is also affected by the cluster-wide + * `xpack.ml.max_lazy_ml_nodes` setting. */ + allow_lazy_start?: boolean + /** The analysis configuration, which contains the information necessary to + * perform one of the following types of analysis: classification, outlier + * detection, or regression. */ + analysis: MlDataframeAnalysisContainer + /** Specifies `includes` and/or `excludes` patterns to select which fields + * will be included in the analysis. The patterns specified in `excludes` + * are applied last, therefore `excludes` takes precedence. In other words, + * if the same field is specified in both `includes` and `excludes`, then + * the field will not be included in the analysis. If `analyzed_fields` is + * not set, only the relevant fields will be included. For example, all the + * numeric fields for outlier detection. + * The supported fields vary for each type of analysis. Outlier detection + * requires numeric or `boolean` data to analyze. The algorithms don’t + * support missing values therefore fields that have data types other than + * numeric or boolean are ignored. Documents where included fields contain + * missing values, null values, or an array are also ignored. Therefore the + * `dest` index may contain documents that don’t have an outlier score. + * Regression supports fields that are numeric, `boolean`, `text`, + * `keyword`, and `ip` data types. It is also tolerant of missing values. + * Fields that are supported are included in the analysis, other fields are + * ignored. Documents where included fields contain an array with two or + * more values are also ignored. Documents in the `dest` index that don’t + * contain a results field are not included in the regression analysis. + * Classification supports fields that are numeric, `boolean`, `text`, + * `keyword`, and `ip` data types. It is also tolerant of missing values. + * Fields that are supported are included in the analysis, other fields are + * ignored. Documents where included fields contain an array with two or + * more values are also ignored. Documents in the `dest` index that don’t + * contain a results field are not included in the classification analysis. + * Classification analysis can be improved by mapping ordinal variable + * values to a single number. For example, in case of age ranges, you can + * model the values as `0-14 = 0`, `15-24 = 1`, `25-34 = 2`, and so on. */ + analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] + /** A description of the job. */ + description?: string + /** The destination configuration. */ + dest: MlDataframeAnalyticsDestination + /** The maximum number of threads to be used by the analysis. Using more + * threads may decrease the time necessary to complete the analysis at the + * cost of using more CPU. Note that the process may use additional threads + * for operational functionality other than the analysis itself. */ + max_num_threads?: integer + _meta?: Metadata + /** The approximate maximum amount of memory resources that are permitted for + * analytical processing. If your `elasticsearch.yml` file contains an + * `xpack.ml.max_model_memory_limit` setting, an error occurs when you try + * to create data frame analytics jobs that have `model_memory_limit` values + * greater than that setting. */ + model_memory_limit?: string + /** The configuration of how to source the analysis data. */ + source: MlDataframeAnalyticsSource + headers?: HttpHeaders + version?: VersionString + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, allow_lazy_start?: never, analysis?: never, analyzed_fields?: never, description?: never, dest?: never, max_num_threads?: never, _meta?: never, model_memory_limit?: never, source?: never, headers?: never, version?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, allow_lazy_start?: never, analysis?: never, analyzed_fields?: never, description?: never, dest?: never, max_num_threads?: never, _meta?: never, model_memory_limit?: never, source?: never, headers?: never, version?: never } +} + +export interface MlPutDataFrameAnalyticsResponse { + authorization?: MlDataframeAnalyticsAuthorization + allow_lazy_start: boolean + analysis: MlDataframeAnalysisContainer + analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] + create_time: EpochTime + description?: string + dest: MlDataframeAnalyticsDestination + id: Id + max_num_threads: integer + _meta?: Metadata + model_memory_limit: string + source: MlDataframeAnalyticsSource + version: VersionString +} + +export interface MlPutDatafeedRequest extends RequestBase { + /** A numerical character string that uniquely identifies the datafeed. + * This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. + * It must start and end with alphanumeric characters. */ + datafeed_id: Id + /** If true, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` + * string or when no indices are specified. */ + allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines + * whether wildcard expressions match hidden data streams. Supports comma-separated values. */ + expand_wildcards?: ExpandWildcards + /** If true, concrete, expanded, or aliased indices are ignored when frozen. */ + ignore_throttled?: boolean + /** If true, unavailable indices (missing or closed) are ignored. */ + ignore_unavailable?: boolean + /** If set, the datafeed performs aggregation searches. + * Support for aggregations is limited and should be used only with low cardinality data. */ + aggregations?: Record + /** If set, the datafeed performs aggregation searches. + * Support for aggregations is limited and should be used only with low cardinality data. + * @alias aggregations */ + aggs?: Record + /** Datafeeds might be required to search over long time periods, for several months or years. + * This search is split into time chunks in order to ensure the load on Elasticsearch is managed. + * Chunking configuration controls how the size of these time chunks are calculated; + * it is an advanced configuration option. */ + chunking_config?: MlChunkingConfig + /** Specifies whether the datafeed checks for missing data and the size of the window. + * The datafeed can optionally search over indices that have already been read in an effort to determine whether + * any data has subsequently been added to the index. If missing data is found, it is a good indication that the + * `query_delay` is set too low and the data is being indexed after the datafeed has passed that moment in time. + * This check runs only on real-time datafeeds. */ + delayed_data_check_config?: MlDelayedDataCheckConfig + /** The interval at which scheduled queries are made while the datafeed runs in real time. + * The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible + * fraction of the bucket span. When `frequency` is shorter than the bucket span, interim results for the last + * (partial) bucket are written then eventually overwritten by the full bucket results. If the datafeed uses + * aggregations, this value must be divisible by the interval of the date histogram aggregation. */ + frequency?: Duration + /** An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the master + * nodes and the machine learning nodes must have the `remote_cluster_client` role. */ + indices?: Indices + /** An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the master + * nodes and the machine learning nodes must have the `remote_cluster_client` role. + * @alias indices */ + indexes?: Indices + /** Specifies index expansion options that are used during search */ + indices_options?: IndicesOptions + /** Identifier for the anomaly detection job. */ + job_id?: Id + /** If a real-time datafeed has never seen any data (including during any initial training period), it automatically + * stops and closes the associated job after this many real-time searches return no documents. In other words, + * it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no + * end time that sees no data remains started until it is explicitly stopped. By default, it is not set. */ + max_empty_searches?: integer + /** The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an + * Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this + * object is passed verbatim to Elasticsearch. */ + query?: QueryDslQueryContainer + /** The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might + * not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default + * value is randomly selected between `60s` and `120s`. This randomness improves the query performance + * when there are multiple jobs running on the same node. */ + query_delay?: Duration + /** Specifies runtime fields for the datafeed search. */ + runtime_mappings?: MappingRuntimeFields + /** Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. + * The detector configuration objects in a job can contain functions that use these script fields. */ + script_fields?: Record + /** The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. + * The maximum value is the value of `index.max_result_window`, which is 10,000 by default. */ + scroll_size?: integer + headers?: HttpHeaders + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { datafeed_id?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, aggregations?: never, aggs?: never, chunking_config?: never, delayed_data_check_config?: never, frequency?: never, indices?: never, indexes?: never, indices_options?: never, job_id?: never, max_empty_searches?: never, query?: never, query_delay?: never, runtime_mappings?: never, script_fields?: never, scroll_size?: never, headers?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { datafeed_id?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, aggregations?: never, aggs?: never, chunking_config?: never, delayed_data_check_config?: never, frequency?: never, indices?: never, indexes?: never, indices_options?: never, job_id?: never, max_empty_searches?: never, query?: never, query_delay?: never, runtime_mappings?: never, script_fields?: never, scroll_size?: never, headers?: never } +} + +export interface MlPutDatafeedResponse { + aggregations?: Record + authorization?: MlDatafeedAuthorization + chunking_config: MlChunkingConfig + delayed_data_check_config?: MlDelayedDataCheckConfig + datafeed_id: Id + frequency?: Duration + indices: string[] + job_id: Id + indices_options?: IndicesOptions + max_empty_searches?: integer + query: QueryDslQueryContainer + query_delay: Duration + runtime_mappings?: MappingRuntimeFields + script_fields?: Record + scroll_size: integer +} + +export interface MlPutFilterRequest extends RequestBase { + /** A string that uniquely identifies a filter. */ + filter_id: Id + /** A description of the filter. */ + description?: string + /** The items of the filter. A wildcard `*` can be used at the beginning or the end of an item. + * Up to 10000 items are allowed in each filter. */ + items?: string[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { filter_id?: never, description?: never, items?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { filter_id?: never, description?: never, items?: never } +} + +export interface MlPutFilterResponse { + description: string + filter_id: Id + items: string[] +} + +export interface MlPutJobRequest extends RequestBase { + /** The identifier for the anomaly detection job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. */ + job_id: Id + /** If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the + * `_all` string or when no indices are specified. */ + allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines + * whether wildcard expressions match hidden data streams. Supports comma-separated values. */ + expand_wildcards?: ExpandWildcards + /** If `true`, concrete, expanded or aliased indices are ignored when frozen. */ + ignore_throttled?: boolean + /** If `true`, unavailable indices (missing or closed) are ignored. */ + ignore_unavailable?: boolean + /** Advanced configuration option. Specifies whether this job can open when there is insufficient machine learning node capacity for it to be immediately assigned to a node. By default, if a machine learning node with capacity to run the job cannot immediately be found, the open anomaly detection jobs API returns an error. However, this is also subject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this option is set to true, the open anomaly detection jobs API does not return an error and the job waits in the opening state until sufficient machine learning node capacity is available. */ + allow_lazy_open?: boolean + /** Specifies how to analyze the data. After you create a job, you cannot change the analysis configuration; all the properties are informational. */ + analysis_config: MlAnalysisConfig + /** Limits can be applied for the resources required to hold the mathematical models in memory. These limits are approximate and can be set per job. They do not control the memory used by other processes, for example the Elasticsearch Java processes. */ + analysis_limits?: MlAnalysisLimits + /** Advanced configuration option. The time between each periodic persistence of the model. The default value is a randomized value between 3 to 4 hours, which avoids all jobs persisting at exactly the same time. The smallest allowed value is 1 hour. For very large models (several GB), persistence could take 10-20 minutes, so do not set the `background_persist_interval` value too low. */ + background_persist_interval?: Duration + /** Advanced configuration option. Contains custom meta data about the job. */ + custom_settings?: MlCustomSettings + /** Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies a period of time (in days) after which only the first snapshot per day is retained. This period is relative to the timestamp of the most recent snapshot for this job. Valid values range from 0 to `model_snapshot_retention_days`. */ + daily_model_snapshot_retention_after_days?: long + /** Defines the format of the input data when you send data to the job by using the post data API. Note that when configure a datafeed, these properties are automatically set. When data is received via the post data API, it is not stored in Elasticsearch. Only the results for anomaly detection are retained. */ + data_description: MlDataDescription + /** Defines a datafeed for the anomaly detection job. If Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. */ + datafeed_config?: MlDatafeedConfig + /** A description of the job. */ + description?: string + /** A list of job groups. A job can belong to no groups or many. */ + groups?: string[] + /** This advanced configuration option stores model information along with the results. It provides a more detailed view into anomaly detection. If you enable model plot it can add considerable overhead to the performance of the system; it is not feasible for jobs with many entities. Model plot provides a simplified and indicative view of the model and its bounds. It does not display complex features such as multivariate correlations or multimodal data. As such, anomalies may occasionally be reported which cannot be seen in the model plot. Model plot config can be configured when the job is created or updated later. It must be disabled if performance issues are experienced. */ + model_plot_config?: MlModelPlotConfig + /** Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies the maximum period of time (in days) that snapshots are retained. This period is relative to the timestamp of the most recent snapshot for this job. By default, snapshots ten days older than the newest snapshot are deleted. */ + model_snapshot_retention_days?: long + /** Advanced configuration option. The period over which adjustments to the score are applied, as new data is seen. The default value is the longer of 30 days or 100 bucket spans. */ + renormalization_window_days?: long + /** A text string that affects the name of the machine learning results index. By default, the job generates an index named `.ml-anomalies-shared`. */ + results_index_name?: IndexName + /** Advanced configuration option. The period of time (in days) that results are retained. Age is calculated relative to the timestamp of the latest bucket result. If this property has a non-null value, once per day at 00:30 (server time), results that are the specified number of days older than the latest bucket result are deleted from Elasticsearch. The default value is null, which means all results are retained. Annotations generated by the system also count as results for retention purposes; they are deleted after the same number of days as results. Annotations added by users are retained forever. */ + results_retention_days?: long + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, allow_lazy_open?: never, analysis_config?: never, analysis_limits?: never, background_persist_interval?: never, custom_settings?: never, daily_model_snapshot_retention_after_days?: never, data_description?: never, datafeed_config?: never, description?: never, groups?: never, model_plot_config?: never, model_snapshot_retention_days?: never, renormalization_window_days?: never, results_index_name?: never, results_retention_days?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, allow_lazy_open?: never, analysis_config?: never, analysis_limits?: never, background_persist_interval?: never, custom_settings?: never, daily_model_snapshot_retention_after_days?: never, data_description?: never, datafeed_config?: never, description?: never, groups?: never, model_plot_config?: never, model_snapshot_retention_days?: never, renormalization_window_days?: never, results_index_name?: never, results_retention_days?: never } +} + +export interface MlPutJobResponse { + allow_lazy_open: boolean + analysis_config: MlAnalysisConfigRead + analysis_limits: MlAnalysisLimits + background_persist_interval?: Duration + create_time: DateTime + custom_settings?: MlCustomSettings + daily_model_snapshot_retention_after_days: long + data_description: MlDataDescription + datafeed_config?: MlDatafeed + description?: string + groups?: string[] + job_id: Id + job_type: string + job_version: string + model_plot_config?: MlModelPlotConfig + model_snapshot_id?: Id + model_snapshot_retention_days: long + renormalization_window_days?: long + results_index_name: string + results_retention_days?: long +} + +export interface MlPutTrainedModelAggregateOutput { + logistic_regression?: MlPutTrainedModelWeights + weighted_sum?: MlPutTrainedModelWeights + weighted_mode?: MlPutTrainedModelWeights + exponent?: MlPutTrainedModelWeights +} + +export interface MlPutTrainedModelDefinition { + /** Collection of preprocessors */ + preprocessors?: MlPutTrainedModelPreprocessor[] + /** The definition of the trained model. */ + trained_model: MlPutTrainedModelTrainedModel +} + +export interface MlPutTrainedModelEnsemble { + aggregate_output?: MlPutTrainedModelAggregateOutput + classification_labels?: string[] + feature_names?: string[] + target_type?: string + trained_models: MlPutTrainedModelTrainedModel[] +} + +export interface MlPutTrainedModelFrequencyEncodingPreprocessor { + field: string + feature_name: string + frequency_map: Record +} + +export interface MlPutTrainedModelInput { + field_names: Names +} + +export interface MlPutTrainedModelOneHotEncodingPreprocessor { + field: string + hot_map: Record +} + +export interface MlPutTrainedModelPreprocessor { + frequency_encoding?: MlPutTrainedModelFrequencyEncodingPreprocessor + one_hot_encoding?: MlPutTrainedModelOneHotEncodingPreprocessor + target_mean_encoding?: MlPutTrainedModelTargetMeanEncodingPreprocessor +} + +export interface MlPutTrainedModelRequest extends RequestBase { + /** The unique identifier of the trained model. */ + model_id: Id + /** If set to `true` and a `compressed_definition` is provided, + * the request defers definition decompression and skips relevant + * validations. */ + defer_definition_decompression?: boolean + /** Whether to wait for all child operations (e.g. model download) + * to complete. */ + wait_for_completion?: boolean + /** The compressed (GZipped and Base64 encoded) inference definition of the + * model. If compressed_definition is specified, then definition cannot be + * specified. */ + compressed_definition?: string + /** The inference definition for the model. If definition is specified, then + * compressed_definition cannot be specified. */ + definition?: MlPutTrainedModelDefinition + /** A human-readable description of the inference trained model. */ + description?: string + /** The default configuration for inference. This can be either a regression + * or classification configuration. It must match the underlying + * definition.trained_model's target_type. For pre-packaged models such as + * ELSER the config is not required. */ + inference_config?: MlInferenceConfigCreateContainer + /** The input field names for the model definition. */ + input?: MlPutTrainedModelInput + /** An object map that contains metadata about the model. */ + metadata?: any + /** The model type. */ + model_type?: MlTrainedModelType + /** The estimated memory usage in bytes to keep the trained model in memory. + * This property is supported only if defer_definition_decompression is true + * or the model definition is not supplied. */ + model_size_bytes?: long + /** The platform architecture (if applicable) of the trained mode. If the model + * only works on one platform, because it is heavily optimized for a particular + * processor architecture and OS combination, then this field specifies which. + * The format of the string must match the platform identifiers used by Elasticsearch, + * so one of, `linux-x86_64`, `linux-aarch64`, `darwin-x86_64`, `darwin-aarch64`, + * or `windows-x86_64`. For portable models (those that work independent of processor + * architecture or OS features), leave this field unset. */ + platform_architecture?: string + /** An array of tags to organize the model. */ + tags?: string[] + /** Optional prefix strings applied at inference */ + prefix_strings?: MlTrainedModelPrefixStrings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { model_id?: never, defer_definition_decompression?: never, wait_for_completion?: never, compressed_definition?: never, definition?: never, description?: never, inference_config?: never, input?: never, metadata?: never, model_type?: never, model_size_bytes?: never, platform_architecture?: never, tags?: never, prefix_strings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { model_id?: never, defer_definition_decompression?: never, wait_for_completion?: never, compressed_definition?: never, definition?: never, description?: never, inference_config?: never, input?: never, metadata?: never, model_type?: never, model_size_bytes?: never, platform_architecture?: never, tags?: never, prefix_strings?: never } +} + +export type MlPutTrainedModelResponse = MlTrainedModelConfig + +export interface MlPutTrainedModelTargetMeanEncodingPreprocessor { + field: string + feature_name: string + target_map: Record + default_value: double +} + +export interface MlPutTrainedModelTrainedModel { + /** The definition for a binary decision tree. */ + tree?: MlPutTrainedModelTrainedModelTree + /** The definition of a node in a tree. + * There are two major types of nodes: leaf nodes and not-leaf nodes. + * - Leaf nodes only need node_index and leaf_value defined. + * - All other nodes need split_feature, left_child, right_child, threshold, decision_type, and default_left defined. */ + tree_node?: MlPutTrainedModelTrainedModelTreeNode + /** The definition for an ensemble model */ + ensemble?: MlPutTrainedModelEnsemble +} + +export interface MlPutTrainedModelTrainedModelTree { + classification_labels?: string[] + feature_names: string[] + target_type?: string + tree_structure: MlPutTrainedModelTrainedModelTreeNode[] +} + +export interface MlPutTrainedModelTrainedModelTreeNode { + decision_type?: string + default_left?: boolean + leaf_value?: double + left_child?: integer + node_index: integer + right_child?: integer + split_feature?: integer + split_gain?: integer + threshold?: double +} + +export interface MlPutTrainedModelWeights { + weights: double +} + +export interface MlPutTrainedModelAliasRequest extends RequestBase { + /** The alias to create or update. This value cannot end in numbers. */ + model_alias: Name + /** The identifier for the trained model that the alias refers to. */ + model_id: Id + /** Specifies whether the alias gets reassigned to the specified trained + * model if it is already assigned to a different model. If the alias is + * already assigned and this parameter is false, the API returns an error. */ + reassign?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { model_alias?: never, model_id?: never, reassign?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { model_alias?: never, model_id?: never, reassign?: never } +} + +export type MlPutTrainedModelAliasResponse = AcknowledgedResponseBase + +export interface MlPutTrainedModelDefinitionPartRequest extends RequestBase { + /** The unique identifier of the trained model. */ + model_id: Id + /** The definition part number. When the definition is loaded for inference the definition parts are streamed in the + * order of their part number. The first part must be `0` and the final part must be `total_parts - 1`. */ + part: integer + /** The definition part for the model. Must be a base64 encoded string. */ + definition: string + /** The total uncompressed definition length in bytes. Not base64 encoded. */ + total_definition_length: long + /** The total number of parts that will be uploaded. Must be greater than 0. */ + total_parts: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { model_id?: never, part?: never, definition?: never, total_definition_length?: never, total_parts?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { model_id?: never, part?: never, definition?: never, total_definition_length?: never, total_parts?: never } +} + +export type MlPutTrainedModelDefinitionPartResponse = AcknowledgedResponseBase + +export interface MlPutTrainedModelVocabularyRequest extends RequestBase { + /** The unique identifier of the trained model. */ + model_id: Id + /** The model vocabulary, which must not be empty. */ + vocabulary: string[] + /** The optional model merges if required by the tokenizer. */ + merges?: string[] + /** The optional vocabulary value scores if required by the tokenizer. */ + scores?: double[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { model_id?: never, vocabulary?: never, merges?: never, scores?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { model_id?: never, vocabulary?: never, merges?: never, scores?: never } +} + +export type MlPutTrainedModelVocabularyResponse = AcknowledgedResponseBase + +export interface MlResetJobRequest extends RequestBase { + /** The ID of the job to reset. */ + job_id: Id + /** Should this request wait until the operation has completed before + * returning. */ + wait_for_completion?: boolean + /** Specifies whether annotations that have been added by the + * user should be deleted along with any auto-generated annotations when the job is + * reset. */ + delete_user_annotations?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, wait_for_completion?: never, delete_user_annotations?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, wait_for_completion?: never, delete_user_annotations?: never } +} + +export type MlResetJobResponse = AcknowledgedResponseBase + +export interface MlRevertModelSnapshotRequest extends RequestBase { + /** Identifier for the anomaly detection job. */ + job_id: Id + /** You can specify `empty` as the . Reverting to the empty + * snapshot means the anomaly detection job starts learning a new model from + * scratch when it is started. */ + snapshot_id: Id + /** Refer to the description for the `delete_intervening_results` query parameter. */ + delete_intervening_results?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, snapshot_id?: never, delete_intervening_results?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, snapshot_id?: never, delete_intervening_results?: never } +} + +export interface MlRevertModelSnapshotResponse { + model: MlModelSnapshot +} + +export interface MlSetUpgradeModeRequest extends RequestBase { + /** When `true`, it enables `upgrade_mode` which temporarily halts all job + * and datafeed tasks and prohibits new job and datafeed tasks from + * starting. */ + enabled?: boolean + /** The time to wait for the request to be completed. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { enabled?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { enabled?: never, timeout?: never } +} + +export type MlSetUpgradeModeResponse = AcknowledgedResponseBase + +export interface MlStartDataFrameAnalyticsRequest extends RequestBase { + /** Identifier for the data frame analytics job. This identifier can contain + * lowercase alphanumeric characters (a-z and 0-9), hyphens, and + * underscores. It must start and end with alphanumeric characters. */ + id: Id + /** Controls the amount of time to wait until the data frame analytics job + * starts. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, timeout?: never } +} + +export interface MlStartDataFrameAnalyticsResponse { + acknowledged: boolean + /** The ID of the node that the job was started on. If the job is allowed to open lazily and has not yet been assigned to a node, this value is an empty string. + * The node ID of the node the job has been assigned to, or + * an empty string if it hasn't been assigned to a node. In + * serverless if the job has been assigned to run then the + * node ID will be "serverless". */ + node: NodeId +} + +export interface MlStartDatafeedRequest extends RequestBase { + /** A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase + * alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric + * characters. */ + datafeed_id: Id + /** Refer to the description for the `end` query parameter. */ + end?: DateTime + /** Refer to the description for the `start` query parameter. */ + start?: DateTime + /** Refer to the description for the `timeout` query parameter. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { datafeed_id?: never, end?: never, start?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { datafeed_id?: never, end?: never, start?: never, timeout?: never } +} + +export interface MlStartDatafeedResponse { + /** The ID of the node that the job was started on. In serverless this will be the "serverless". + * If the job is allowed to open lazily and has not yet been assigned to a node, this value is an empty string. */ + node: NodeIds + /** For a successful response, this value is always `true`. On failure, an exception is returned instead. */ + started: boolean +} + +export interface MlStartTrainedModelDeploymentRequest extends RequestBase { + /** The unique identifier of the trained model. Currently, only PyTorch models are supported. */ + model_id: Id + /** The inference cache size (in memory outside the JVM heap) per node for the model. + * The default value is the same size as the `model_size_bytes`. To disable the cache, + * `0b` can be provided. */ + cache_size?: ByteSize + /** A unique identifier for the deployment of the model. + * @remarks This property is not supported on Elastic Cloud Serverless. */ + deployment_id?: string + /** The number of model allocations on each node where the model is deployed. + * All allocations on a node share the same copy of the model in memory but use + * a separate set of threads to evaluate the model. + * Increasing this value generally increases the throughput. + * If this setting is greater than the number of hardware threads + * it will automatically be changed to a value less than the number of hardware threads. + * If adaptive_allocations is enabled, do not set this value, because it’s automatically set. */ + number_of_allocations?: integer + /** The deployment priority. */ + priority?: MlTrainingPriority + /** Specifies the number of inference requests that are allowed in the queue. After the number of requests exceeds + * this value, new requests are rejected with a 429 error. */ + queue_capacity?: integer + /** Sets the number of threads used by each model allocation during inference. This generally increases + * the inference speed. The inference process is a compute-bound process; any number + * greater than the number of available hardware threads on the machine does not increase the + * inference speed. If this setting is greater than the number of hardware threads + * it will automatically be changed to a value less than the number of hardware threads. */ + threads_per_allocation?: integer + /** Specifies the amount of time to wait for the model to deploy. */ + timeout?: Duration + /** Specifies the allocation status to wait for before returning. */ + wait_for?: MlDeploymentAllocationState + /** Adaptive allocations configuration. When enabled, the number of allocations + * is set based on the current load. + * If adaptive_allocations is enabled, do not set the number of allocations manually. */ + adaptive_allocations?: MlAdaptiveAllocationsSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { model_id?: never, cache_size?: never, deployment_id?: never, number_of_allocations?: never, priority?: never, queue_capacity?: never, threads_per_allocation?: never, timeout?: never, wait_for?: never, adaptive_allocations?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { model_id?: never, cache_size?: never, deployment_id?: never, number_of_allocations?: never, priority?: never, queue_capacity?: never, threads_per_allocation?: never, timeout?: never, wait_for?: never, adaptive_allocations?: never } +} + +export interface MlStartTrainedModelDeploymentResponse { + assignment: MlTrainedModelAssignment +} + +export interface MlStopDataFrameAnalyticsRequest extends RequestBase { + /** Identifier for the data frame analytics job. This identifier can contain + * lowercase alphanumeric characters (a-z and 0-9), hyphens, and + * underscores. It must start and end with alphanumeric characters. */ + id: Id + /** Specifies what to do when the request: + * + * 1. Contains wildcard expressions and there are no data frame analytics + * jobs that match. + * 2. Contains the _all string or no identifiers and there are no matches. + * 3. Contains wildcard expressions and there are only partial matches. + * + * The default value is true, which returns an empty data_frame_analytics + * array when there are no matches and the subset of results when there are + * partial matches. If this parameter is false, the request returns a 404 + * status code when there are no matches or only partial matches. */ + allow_no_match?: boolean + /** If true, the data frame analytics job is stopped forcefully. */ + force?: boolean + /** Controls the amount of time to wait until the data frame analytics job + * stops. Defaults to 20 seconds. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, allow_no_match?: never, force?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, allow_no_match?: never, force?: never, timeout?: never } +} + +export interface MlStopDataFrameAnalyticsResponse { + stopped: boolean +} + +export interface MlStopDatafeedRequest extends RequestBase { + /** Identifier for the datafeed. You can stop multiple datafeeds in a single API request by using a comma-separated + * list of datafeeds or a wildcard expression. You can close all datafeeds by using `_all` or by specifying `*` as + * the identifier. */ + datafeed_id: Id + /** Refer to the description for the `allow_no_match` query parameter. */ + allow_no_match?: boolean + /** Refer to the description for the `force` query parameter. */ + force?: boolean + /** Refer to the description for the `timeout` query parameter. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { datafeed_id?: never, allow_no_match?: never, force?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { datafeed_id?: never, allow_no_match?: never, force?: never, timeout?: never } +} + +export interface MlStopDatafeedResponse { + stopped: boolean +} + +export interface MlStopTrainedModelDeploymentRequest extends RequestBase { + /** The unique identifier of the trained model. */ + model_id: Id + /** Specifies what to do when the request: contains wildcard expressions and there are no deployments that match; + * contains the `_all` string or no identifiers and there are no matches; or contains wildcard expressions and + * there are only partial matches. By default, it returns an empty array when there are no matches and the subset of results when there are partial matches. + * If `false`, the request returns a 404 status code when there are no matches or only partial matches. */ + allow_no_match?: boolean + /** Forcefully stops the deployment, even if it is used by ingest pipelines. You can't use these pipelines until you + * restart the model deployment. */ + force?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { model_id?: never, allow_no_match?: never, force?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { model_id?: never, allow_no_match?: never, force?: never } +} + +export interface MlStopTrainedModelDeploymentResponse { + stopped: boolean +} + +export interface MlUpdateDataFrameAnalyticsRequest extends RequestBase { + /** Identifier for the data frame analytics job. This identifier can contain + * lowercase alphanumeric characters (a-z and 0-9), hyphens, and + * underscores. It must start and end with alphanumeric characters. */ + id: Id + /** A description of the job. */ + description?: string + /** The approximate maximum amount of memory resources that are permitted for + * analytical processing. If your `elasticsearch.yml` file contains an + * `xpack.ml.max_model_memory_limit` setting, an error occurs when you try + * to create data frame analytics jobs that have `model_memory_limit` values + * greater than that setting. */ + model_memory_limit?: string + /** The maximum number of threads to be used by the analysis. Using more + * threads may decrease the time necessary to complete the analysis at the + * cost of using more CPU. Note that the process may use additional threads + * for operational functionality other than the analysis itself. */ + max_num_threads?: integer + /** Specifies whether this job can start when there is insufficient machine + * learning node capacity for it to be immediately assigned to a node. */ + allow_lazy_start?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, description?: never, model_memory_limit?: never, max_num_threads?: never, allow_lazy_start?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, description?: never, model_memory_limit?: never, max_num_threads?: never, allow_lazy_start?: never } +} + +export interface MlUpdateDataFrameAnalyticsResponse { + authorization?: MlDataframeAnalyticsAuthorization + allow_lazy_start: boolean + analysis: MlDataframeAnalysisContainer + analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] + create_time: long + description?: string + dest: MlDataframeAnalyticsDestination + id: Id + max_num_threads: integer + model_memory_limit: string + source: MlDataframeAnalyticsSource + version: VersionString +} + +export interface MlUpdateDatafeedRequest extends RequestBase { + /** A numerical character string that uniquely identifies the datafeed. + * This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. + * It must start and end with alphanumeric characters. */ + datafeed_id: Id + /** If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the + * `_all` string or when no indices are specified. */ + allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines + * whether wildcard expressions match hidden data streams. Supports comma-separated values. */ + expand_wildcards?: ExpandWildcards + /** If `true`, concrete, expanded or aliased indices are ignored when frozen. */ + ignore_throttled?: boolean + /** If `true`, unavailable indices (missing or closed) are ignored. */ + ignore_unavailable?: boolean + /** If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only + * with low cardinality data. */ + aggregations?: Record + /** Datafeeds might search over long time periods, for several months or years. This search is split into time + * chunks in order to ensure the load on Elasticsearch is managed. Chunking configuration controls how the size of + * these time chunks are calculated; it is an advanced configuration option. */ + chunking_config?: MlChunkingConfig + /** Specifies whether the datafeed checks for missing data and the size of the window. The datafeed can optionally + * search over indices that have already been read in an effort to determine whether any data has subsequently been + * added to the index. If missing data is found, it is a good indication that the `query_delay` is set too low and + * the data is being indexed after the datafeed has passed that moment in time. This check runs only on real-time + * datafeeds. */ + delayed_data_check_config?: MlDelayedDataCheckConfig + /** The interval at which scheduled queries are made while the datafeed runs in real time. The default value is + * either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket + * span. When `frequency` is shorter than the bucket span, interim results for the last (partial) bucket are + * written then eventually overwritten by the full bucket results. If the datafeed uses aggregations, this value + * must be divisible by the interval of the date histogram aggregation. */ + frequency?: Duration + /** An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine + * learning nodes must have the `remote_cluster_client` role. */ + indices?: string[] + /** An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine + * learning nodes must have the `remote_cluster_client` role. + * @alias indices */ + indexes?: string[] + /** Specifies index expansion options that are used during search. */ + indices_options?: IndicesOptions + job_id?: Id + /** If a real-time datafeed has never seen any data (including during any initial training period), it automatically + * stops and closes the associated job after this many real-time searches return no documents. In other words, + * it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no + * end time that sees no data remains started until it is explicitly stopped. By default, it is not set. */ + max_empty_searches?: integer + /** The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an + * Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this + * object is passed verbatim to Elasticsearch. Note that if you change the query, the analyzed data is also + * changed. Therefore, the time required to learn might be long and the understandability of the results is + * unpredictable. If you want to make significant changes to the source data, it is recommended that you + * clone the job and datafeed and make the amendments in the clone. Let both run in parallel and close one + * when you are satisfied with the results of the job. */ + query?: QueryDslQueryContainer + /** The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might + * not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default + * value is randomly selected between `60s` and `120s`. This randomness improves the query performance + * when there are multiple jobs running on the same node. */ + query_delay?: Duration + /** Specifies runtime fields for the datafeed search. */ + runtime_mappings?: MappingRuntimeFields + /** Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. + * The detector configuration objects in a job can contain functions that use these script fields. */ + script_fields?: Record + /** The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. + * The maximum value is the value of `index.max_result_window`. */ + scroll_size?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { datafeed_id?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, aggregations?: never, chunking_config?: never, delayed_data_check_config?: never, frequency?: never, indices?: never, indexes?: never, indices_options?: never, job_id?: never, max_empty_searches?: never, query?: never, query_delay?: never, runtime_mappings?: never, script_fields?: never, scroll_size?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { datafeed_id?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, aggregations?: never, chunking_config?: never, delayed_data_check_config?: never, frequency?: never, indices?: never, indexes?: never, indices_options?: never, job_id?: never, max_empty_searches?: never, query?: never, query_delay?: never, runtime_mappings?: never, script_fields?: never, scroll_size?: never } +} + +export interface MlUpdateDatafeedResponse { + authorization?: MlDatafeedAuthorization + aggregations?: Record + chunking_config: MlChunkingConfig + delayed_data_check_config?: MlDelayedDataCheckConfig + datafeed_id: Id + /** The interval at which scheduled queries are made while the datafeed runs in real time. The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket span. For example: `150s`. When `frequency` is shorter than the bucket span, interim results for the last (partial) bucket are written then eventually overwritten by the full bucket results. If the datafeed uses aggregations, this value must be divisible by the interval of the date histogram aggregation. */ + frequency?: Duration + indices: string[] + indices_options?: IndicesOptions + job_id: Id + max_empty_searches?: integer + query: QueryDslQueryContainer + query_delay: Duration + runtime_mappings?: MappingRuntimeFields + script_fields?: Record + scroll_size: integer +} + +export interface MlUpdateFilterRequest extends RequestBase { + /** A string that uniquely identifies a filter. */ + filter_id: Id + /** The items to add to the filter. */ + add_items?: string[] + /** A description for the filter. */ + description?: string + /** The items to remove from the filter. */ + remove_items?: string[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { filter_id?: never, add_items?: never, description?: never, remove_items?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { filter_id?: never, add_items?: never, description?: never, remove_items?: never } +} + +export interface MlUpdateFilterResponse { + description: string + filter_id: Id + items: string[] +} + +export interface MlUpdateJobRequest extends RequestBase { + /** Identifier for the job. */ + job_id: Id + /** Advanced configuration option. Specifies whether this job can open when + * there is insufficient machine learning node capacity for it to be + * immediately assigned to a node. If `false` and a machine learning node + * with capacity to run the job cannot immediately be found, the open + * anomaly detection jobs API returns an error. However, this is also + * subject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this + * option is set to `true`, the open anomaly detection jobs API does not + * return an error and the job waits in the opening state until sufficient + * machine learning node capacity is available. */ + allow_lazy_open?: boolean + analysis_limits?: MlAnalysisMemoryLimit + /** Advanced configuration option. The time between each periodic persistence + * of the model. + * The default value is a randomized value between 3 to 4 hours, which + * avoids all jobs persisting at exactly the same time. The smallest allowed + * value is 1 hour. + * For very large models (several GB), persistence could take 10-20 minutes, + * so do not set the value too low. + * If the job is open when you make the update, you must stop the datafeed, + * close the job, then reopen the job and restart the datafeed for the + * changes to take effect. */ + background_persist_interval?: Duration + /** Advanced configuration option. Contains custom meta data about the job. + * For example, it can contain custom URL information as shown in Adding + * custom URLs to machine learning results. */ + custom_settings?: Record + categorization_filters?: string[] + /** A description of the job. */ + description?: string + model_plot_config?: MlModelPlotConfig + model_prune_window?: Duration + /** Advanced configuration option, which affects the automatic removal of old + * model snapshots for this job. It specifies a period of time (in days) + * after which only the first snapshot per day is retained. This period is + * relative to the timestamp of the most recent snapshot for this job. Valid + * values range from 0 to `model_snapshot_retention_days`. For jobs created + * before version 7.8.0, the default value matches + * `model_snapshot_retention_days`. */ + daily_model_snapshot_retention_after_days?: long + /** Advanced configuration option, which affects the automatic removal of old + * model snapshots for this job. It specifies the maximum period of time (in + * days) that snapshots are retained. This period is relative to the + * timestamp of the most recent snapshot for this job. */ + model_snapshot_retention_days?: long + /** Advanced configuration option. The period over which adjustments to the + * score are applied, as new data is seen. */ + renormalization_window_days?: long + /** Advanced configuration option. The period of time (in days) that results + * are retained. Age is calculated relative to the timestamp of the latest + * bucket result. If this property has a non-null value, once per day at + * 00:30 (server time), results that are the specified number of days older + * than the latest bucket result are deleted from Elasticsearch. The default + * value is null, which means all results are retained. */ + results_retention_days?: long + /** A list of job groups. A job can belong to no groups or many. */ + groups?: string[] + /** An array of detector update objects. */ + detectors?: MlDetectorUpdate[] + /** Settings related to how categorization interacts with partition fields. */ + per_partition_categorization?: MlPerPartitionCategorization + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, allow_lazy_open?: never, analysis_limits?: never, background_persist_interval?: never, custom_settings?: never, categorization_filters?: never, description?: never, model_plot_config?: never, model_prune_window?: never, daily_model_snapshot_retention_after_days?: never, model_snapshot_retention_days?: never, renormalization_window_days?: never, results_retention_days?: never, groups?: never, detectors?: never, per_partition_categorization?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, allow_lazy_open?: never, analysis_limits?: never, background_persist_interval?: never, custom_settings?: never, categorization_filters?: never, description?: never, model_plot_config?: never, model_prune_window?: never, daily_model_snapshot_retention_after_days?: never, model_snapshot_retention_days?: never, renormalization_window_days?: never, results_retention_days?: never, groups?: never, detectors?: never, per_partition_categorization?: never } +} + +export interface MlUpdateJobResponse { + allow_lazy_open: boolean + analysis_config: MlAnalysisConfigRead + analysis_limits: MlAnalysisLimits + background_persist_interval?: Duration + create_time: EpochTime + finished_time?: EpochTime + custom_settings?: Record + daily_model_snapshot_retention_after_days: long + data_description: MlDataDescription + datafeed_config?: MlDatafeed + description?: string + groups?: string[] + job_id: Id + job_type: string + job_version: VersionString + model_plot_config?: MlModelPlotConfig + model_snapshot_id?: Id + model_snapshot_retention_days: long + renormalization_window_days?: long + results_index_name: IndexName + results_retention_days?: long +} + +export interface MlUpdateModelSnapshotRequest extends RequestBase { + /** Identifier for the anomaly detection job. */ + job_id: Id + /** Identifier for the model snapshot. */ + snapshot_id: Id + /** A description of the model snapshot. */ + description?: string + /** If `true`, this snapshot will not be deleted during automatic cleanup of + * snapshots older than `model_snapshot_retention_days`. However, this + * snapshot will be deleted when the job is deleted. */ + retain?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, snapshot_id?: never, description?: never, retain?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, snapshot_id?: never, description?: never, retain?: never } +} + +export interface MlUpdateModelSnapshotResponse { + acknowledged: boolean + model: MlModelSnapshot +} + +export interface MlUpdateTrainedModelDeploymentRequest extends RequestBase { + /** The unique identifier of the trained model. Currently, only PyTorch models are supported. */ + model_id: Id + /** The number of model allocations on each node where the model is deployed. + * All allocations on a node share the same copy of the model in memory but use + * a separate set of threads to evaluate the model. + * Increasing this value generally increases the throughput. + * If this setting is greater than the number of hardware threads + * it will automatically be changed to a value less than the number of hardware threads. + * If adaptive_allocations is enabled, do not set this value, because it’s automatically set. */ + number_of_allocations?: integer + /** Adaptive allocations configuration. When enabled, the number of allocations + * is set based on the current load. + * If adaptive_allocations is enabled, do not set the number of allocations manually. */ + adaptive_allocations?: MlAdaptiveAllocationsSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { model_id?: never, number_of_allocations?: never, adaptive_allocations?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { model_id?: never, number_of_allocations?: never, adaptive_allocations?: never } +} + +export interface MlUpdateTrainedModelDeploymentResponse { + assignment: MlTrainedModelAssignment +} + +export interface MlUpgradeJobSnapshotRequest extends RequestBase { + /** Identifier for the anomaly detection job. */ + job_id: Id + /** A numerical character string that uniquely identifies the model snapshot. */ + snapshot_id: Id + /** When true, the API won’t respond until the upgrade is complete. + * Otherwise, it responds as soon as the upgrade task is assigned to a node. */ + wait_for_completion?: boolean + /** Controls the time to wait for the request to complete. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, snapshot_id?: never, wait_for_completion?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, snapshot_id?: never, wait_for_completion?: never, timeout?: never } +} + +export interface MlUpgradeJobSnapshotResponse { + /** The ID of the node that the upgrade task was started on if it is still running. In serverless this will be the "serverless". */ + node: NodeId + /** When true, this means the task is complete. When false, it is still running. */ + completed: boolean +} + +export interface MlValidateRequest extends RequestBase { + job_id?: Id + analysis_config?: MlAnalysisConfig + analysis_limits?: MlAnalysisLimits + data_description?: MlDataDescription + description?: string + model_plot?: MlModelPlotConfig + model_snapshot_id?: Id + model_snapshot_retention_days?: long + results_index_name?: IndexName + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { job_id?: never, analysis_config?: never, analysis_limits?: never, data_description?: never, description?: never, model_plot?: never, model_snapshot_id?: never, model_snapshot_retention_days?: never, results_index_name?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { job_id?: never, analysis_config?: never, analysis_limits?: never, data_description?: never, description?: never, model_plot?: never, model_snapshot_id?: never, model_snapshot_retention_days?: never, results_index_name?: never } +} + +export type MlValidateResponse = AcknowledgedResponseBase + +export interface MlValidateDetectorRequest extends RequestBase { + detector?: MlDetector + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { detector?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { detector?: never } +} + +export type MlValidateDetectorResponse = AcknowledgedResponseBase + +export interface MonitoringBulkRequest extends RequestBase { + /** Default document type for items which don't provide one */ + type?: string + /** Identifier of the monitored system */ + system_id: string + /** */ + system_api_version: string + /** Collection interval (e.g., '10s' or '10000ms') of the payload */ + interval: Duration + operations?: (BulkOperationContainer | BulkUpdateAction | TDocument)[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { type?: never, system_id?: never, system_api_version?: never, interval?: never, operations?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { type?: never, system_id?: never, system_api_version?: never, interval?: never, operations?: never } +} + +export interface MonitoringBulkResponse { + error?: ErrorCause + /** True if there is was an error */ + errors: boolean + /** Was collection disabled? */ + ignored: boolean + took: long +} + +export interface NodesAdaptiveSelection { + /** The exponentially weighted moving average queue size of search requests on the keyed node. */ + avg_queue_size?: long + /** The exponentially weighted moving average response time of search requests on the keyed node. */ + avg_response_time?: Duration + /** The exponentially weighted moving average response time, in nanoseconds, of search requests on the keyed node. */ + avg_response_time_ns?: long + /** The exponentially weighted moving average service time of search requests on the keyed node. */ + avg_service_time?: Duration + /** The exponentially weighted moving average service time, in nanoseconds, of search requests on the keyed node. */ + avg_service_time_ns?: long + /** The number of outstanding search requests to the keyed node from the node these stats are for. */ + outgoing_searches?: long + /** The rank of this node; used for shard selection when routing search requests. */ + rank?: string +} + +export interface NodesBreaker { + /** Estimated memory used for the operation. */ + estimated_size?: string + /** Estimated memory used, in bytes, for the operation. */ + estimated_size_in_bytes?: long + /** Memory limit for the circuit breaker. */ + limit_size?: string + /** Memory limit, in bytes, for the circuit breaker. */ + limit_size_in_bytes?: long + /** A constant that all estimates for the circuit breaker are multiplied with to calculate a final estimate. */ + overhead?: float + /** Total number of times the circuit breaker has been triggered and prevented an out of memory error. */ + tripped?: float +} + +export interface NodesCgroup { + /** Contains statistics about `cpuacct` control group for the node. */ + cpuacct?: NodesCpuAcct + /** Contains statistics about `cpu` control group for the node. */ + cpu?: NodesCgroupCpu + /** Contains statistics about the memory control group for the node. */ + memory?: NodesCgroupMemory +} + +export interface NodesCgroupCpu { + /** The `cpu` control group to which the Elasticsearch process belongs. */ + control_group?: string + /** The period of time, in microseconds, for how regularly all tasks in the same cgroup as the Elasticsearch process should have their access to CPU resources reallocated. */ + cfs_period_micros?: integer + /** The total amount of time, in microseconds, for which all tasks in the same cgroup as the Elasticsearch process can run during one period `cfs_period_micros`. */ + cfs_quota_micros?: integer + /** Contains CPU statistics for the node. */ + stat?: NodesCgroupCpuStat +} + +export interface NodesCgroupCpuStat { + /** The number of reporting periods (as specified by `cfs_period_micros`) that have elapsed. */ + number_of_elapsed_periods?: long + /** The number of times all tasks in the same cgroup as the Elasticsearch process have been throttled. */ + number_of_times_throttled?: long + /** The total amount of time, in nanoseconds, for which all tasks in the same cgroup as the Elasticsearch process have been throttled. */ + time_throttled_nanos?: DurationValue +} + +export interface NodesCgroupMemory { + /** The `memory` control group to which the Elasticsearch process belongs. */ + control_group?: string + /** The maximum amount of user memory (including file cache) allowed for all tasks in the same cgroup as the Elasticsearch process. + * This value can be too big to store in a `long`, so is returned as a string so that the value returned can exactly match what the underlying operating system interface returns. + * Any value that is too large to parse into a `long` almost certainly means no limit has been set for the cgroup. */ + limit_in_bytes?: string + /** The total current memory usage by processes in the cgroup, in bytes, by all tasks in the same cgroup as the Elasticsearch process. + * This value is stored as a string for consistency with `limit_in_bytes`. */ + usage_in_bytes?: string +} + +export interface NodesClient { + /** Unique ID for the HTTP client. */ + id?: long + /** Reported agent for the HTTP client. + * If unavailable, this property is not included in the response. */ + agent?: string + /** Local address for the HTTP connection. */ + local_address?: string + /** Remote address for the HTTP connection. */ + remote_address?: string + /** The URI of the client’s most recent request. */ + last_uri?: string + /** Time at which the client opened the connection. */ + opened_time_millis?: long + /** Time at which the client closed the connection if the connection is closed. */ + closed_time_millis?: long + /** Time of the most recent request from this client. */ + last_request_time_millis?: long + /** Number of requests from this client. */ + request_count?: long + /** Cumulative size in bytes of all requests from this client. */ + request_size_bytes?: long + /** Value from the client’s `x-opaque-id` HTTP header. + * If unavailable, this property is not included in the response. */ + x_opaque_id?: string +} + +export interface NodesClusterAppliedStats { + recordings?: NodesRecording[] +} + +export interface NodesClusterStateQueue { + /** Total number of cluster states in queue. */ + total?: long + /** Number of pending cluster states in queue. */ + pending?: long + /** Number of committed cluster states in queue. */ + committed?: long +} + +export interface NodesClusterStateUpdate { + /** The number of cluster state update attempts that did not change the cluster state since the node started. */ + count: long + /** The cumulative amount of time spent computing no-op cluster state updates since the node started. */ + computation_time?: Duration + /** The cumulative amount of time, in milliseconds, spent computing no-op cluster state updates since the node started. */ + computation_time_millis?: DurationValue + /** The cumulative amount of time spent publishing cluster state updates which ultimately succeeded, which includes everything from the start of the publication (just after the computation of the new cluster state) until the publication has finished and the master node is ready to start processing the next state update. + * This includes the time measured by `context_construction_time`, `commit_time`, `completion_time` and `master_apply_time`. */ + publication_time?: Duration + /** The cumulative amount of time, in milliseconds, spent publishing cluster state updates which ultimately succeeded, which includes everything from the start of the publication (just after the computation of the new cluster state) until the publication has finished and the master node is ready to start processing the next state update. + * This includes the time measured by `context_construction_time`, `commit_time`, `completion_time` and `master_apply_time`. */ + publication_time_millis?: DurationValue + /** The cumulative amount of time spent constructing a publication context since the node started for publications that ultimately succeeded. + * This statistic includes the time spent computing the difference between the current and new cluster state preparing a serialized representation of this difference. */ + context_construction_time?: Duration + /** The cumulative amount of time, in milliseconds, spent constructing a publication context since the node started for publications that ultimately succeeded. + * This statistic includes the time spent computing the difference between the current and new cluster state preparing a serialized representation of this difference. */ + context_construction_time_millis?: DurationValue + /** The cumulative amount of time spent waiting for a successful cluster state update to commit, which measures the time from the start of each publication until a majority of the master-eligible nodes have written the state to disk and confirmed the write to the elected master. */ + commit_time?: Duration + /** The cumulative amount of time, in milliseconds, spent waiting for a successful cluster state update to commit, which measures the time from the start of each publication until a majority of the master-eligible nodes have written the state to disk and confirmed the write to the elected master. */ + commit_time_millis?: DurationValue + /** The cumulative amount of time spent waiting for a successful cluster state update to complete, which measures the time from the start of each publication until all the other nodes have notified the elected master that they have applied the cluster state. */ + completion_time?: Duration + /** The cumulative amount of time, in milliseconds, spent waiting for a successful cluster state update to complete, which measures the time from the start of each publication until all the other nodes have notified the elected master that they have applied the cluster state. */ + completion_time_millis?: DurationValue + /** The cumulative amount of time spent successfully applying cluster state updates on the elected master since the node started. */ + master_apply_time?: Duration + /** The cumulative amount of time, in milliseconds, spent successfully applying cluster state updates on the elected master since the node started. */ + master_apply_time_millis?: DurationValue + /** The cumulative amount of time spent notifying listeners of a no-op cluster state update since the node started. */ + notification_time?: Duration + /** The cumulative amount of time, in milliseconds, spent notifying listeners of a no-op cluster state update since the node started. */ + notification_time_millis?: DurationValue +} + +export interface NodesContext { + context?: string + compilations?: long + cache_evictions?: long + compilation_limit_triggered?: long +} + +export interface NodesCpu { + percent?: integer + sys?: Duration + sys_in_millis?: DurationValue + total?: Duration + total_in_millis?: DurationValue + user?: Duration + user_in_millis?: DurationValue + load_average?: Record +} + +export interface NodesCpuAcct { + /** The `cpuacct` control group to which the Elasticsearch process belongs. */ + control_group?: string + /** The total CPU time, in nanoseconds, consumed by all tasks in the same cgroup as the Elasticsearch process. */ + usage_nanos?: DurationValue +} + +export interface NodesDataPathStats { + /** Total amount of disk space available to this Java virtual machine on this file store. */ + available?: string + /** Total number of bytes available to this Java virtual machine on this file store. */ + available_in_bytes?: long + disk_queue?: string + disk_reads?: long + disk_read_size?: string + disk_read_size_in_bytes?: long + disk_writes?: long + disk_write_size?: string + disk_write_size_in_bytes?: long + /** Total amount of unallocated disk space in the file store. */ + free?: string + /** Total number of unallocated bytes in the file store. */ + free_in_bytes?: long + /** Mount point of the file store (for example: `/dev/sda2`). */ + mount?: string + /** Path to the file store. */ + path?: string + /** Total size of the file store. */ + total?: string + /** Total size of the file store in bytes. */ + total_in_bytes?: long + /** Type of the file store (ex: ext4). */ + type?: string +} + +export interface NodesDiscovery { + /** Contains statistics for the cluster state queue of the node. */ + cluster_state_queue?: NodesClusterStateQueue + /** Contains statistics for the published cluster states of the node. */ + published_cluster_states?: NodesPublishedClusterStates + /** Contains low-level statistics about how long various activities took during cluster state updates while the node was the elected master. + * Omitted if the node is not master-eligible. + * Every field whose name ends in `_time` within this object is also represented as a raw number of milliseconds in a field whose name ends in `_time_millis`. + * The human-readable fields with a `_time` suffix are only returned if requested with the `?human=true` query parameter. */ + cluster_state_update?: Record + serialized_cluster_states?: NodesSerializedClusterState + cluster_applier_stats?: NodesClusterAppliedStats +} + +export interface NodesExtendedMemoryStats extends NodesMemoryStats { + /** Percentage of free memory. */ + free_percent?: integer + /** Percentage of used memory. */ + used_percent?: integer +} + +export interface NodesFileSystem { + /** List of all file stores. */ + data?: NodesDataPathStats[] + /** Last time the file stores statistics were refreshed. + * Recorded in milliseconds since the Unix Epoch. */ + timestamp?: long + /** Contains statistics for all file stores of the node. */ + total?: NodesFileSystemTotal + /** Contains I/O statistics for the node. */ + io_stats?: NodesIoStats +} + +export interface NodesFileSystemTotal { + /** Total disk space available to this Java virtual machine on all file stores. + * Depending on OS or process level restrictions, this might appear less than `free`. + * This is the actual amount of free disk space the Elasticsearch node can utilise. */ + available?: string + /** Total number of bytes available to this Java virtual machine on all file stores. + * Depending on OS or process level restrictions, this might appear less than `free_in_bytes`. + * This is the actual amount of free disk space the Elasticsearch node can utilise. */ + available_in_bytes?: long + /** Total unallocated disk space in all file stores. */ + free?: string + /** Total number of unallocated bytes in all file stores. */ + free_in_bytes?: long + /** Total size of all file stores. */ + total?: string + /** Total size of all file stores in bytes. */ + total_in_bytes?: long +} + +export interface NodesGarbageCollector { + /** Contains statistics about JVM garbage collectors for the node. */ + collectors?: Record +} + +export interface NodesGarbageCollectorTotal { + /** Total number of JVM garbage collectors that collect objects. */ + collection_count?: long + /** Total time spent by JVM collecting objects. */ + collection_time?: string + /** Total time, in milliseconds, spent by JVM collecting objects. */ + collection_time_in_millis?: long +} + +export interface NodesHttp { + /** Current number of open HTTP connections for the node. */ + current_open?: integer + /** Total number of HTTP connections opened for the node. */ + total_opened?: long + /** Information on current and recently-closed HTTP client connections. + * Clients that have been closed longer than the `http.client_stats.closed_channels.max_age` setting will not be represented here. */ + clients?: NodesClient[] + /** Detailed HTTP stats broken down by route + * @remarks This property is not supported on Elastic Cloud Serverless. */ + routes: Record +} + +export interface NodesHttpRoute { + requests: NodesHttpRouteRequests + responses: NodesHttpRouteResponses +} + +export interface NodesHttpRouteRequests { + count: long + total_size_in_bytes: long + size_histogram: NodesSizeHttpHistogram[] +} + +export interface NodesHttpRouteResponses { + count: long + total_size_in_bytes: long + handling_time_histogram: NodesTimeHttpHistogram[] + size_histogram: NodesSizeHttpHistogram[] +} + +export interface NodesIndexingPressure { + /** Contains statistics for memory consumption from indexing load. */ + memory?: NodesIndexingPressureMemory +} + +export interface NodesIndexingPressureMemory { + /** Configured memory limit for the indexing requests. + * Replica requests have an automatic limit that is 1.5x this value. */ + limit?: ByteSize + /** Configured memory limit, in bytes, for the indexing requests. + * Replica requests have an automatic limit that is 1.5x this value. */ + limit_in_bytes?: long + /** Contains statistics for current indexing load. */ + current?: NodesPressureMemory + /** Contains statistics for the cumulative indexing load since the node started. */ + total?: NodesPressureMemory +} + +export interface NodesIngest { + /** Contains statistics about ingest pipelines for the node. */ + pipelines?: Record + /** Contains statistics about ingest operations for the node. */ + total?: NodesIngestTotal +} + +export interface NodesIngestStats { + /** Total number of documents ingested during the lifetime of this node. */ + count: long + /** Total number of documents currently being ingested. */ + current: long + /** Total number of failed ingest operations during the lifetime of this node. */ + failed: long + /** Total number of ingest processors. */ + processors: Record[] + /** Total time, in milliseconds, spent preprocessing ingest documents during the lifetime of this node. */ + time_in_millis: DurationValue + /** Total number of bytes of all documents ingested by the pipeline. + * This field is only present on pipelines which are the first to process a document. + * Thus, it is not present on pipelines which only serve as a final pipeline after a default pipeline, a pipeline run after a reroute processor, or pipelines in pipeline processors. */ + ingested_as_first_pipeline_in_bytes: long + /** Total number of bytes of all documents produced by the pipeline. + * This field is only present on pipelines which are the first to process a document. + * Thus, it is not present on pipelines which only serve as a final pipeline after a default pipeline, a pipeline run after a reroute processor, or pipelines in pipeline processors. + * In situations where there are subsequent pipelines, the value represents the size of the document after all pipelines have run. */ + produced_as_first_pipeline_in_bytes: long +} + +export interface NodesIngestTotal { + /** Total number of documents ingested during the lifetime of this node. */ + count: long + /** Total number of documents currently being ingested. */ + current: long + /** Total number of failed ingest operations during the lifetime of this node. */ + failed: long + /** Total time, in milliseconds, spent preprocessing ingest documents during the lifetime of this node. */ + time_in_millis: DurationValue +} + +export interface NodesIoStatDevice { + /** The Linux device name. */ + device_name?: string + /** The total number of read and write operations for the device completed since starting Elasticsearch. */ + operations?: long + /** The total number of kilobytes read for the device since starting Elasticsearch. */ + read_kilobytes?: long + /** The total number of read operations for the device completed since starting Elasticsearch. */ + read_operations?: long + /** The total number of kilobytes written for the device since starting Elasticsearch. */ + write_kilobytes?: long + /** The total number of write operations for the device completed since starting Elasticsearch. */ + write_operations?: long +} + +export interface NodesIoStats { + /** Array of disk metrics for each device that is backing an Elasticsearch data path. + * These disk metrics are probed periodically and averages between the last probe and the current probe are computed. */ + devices?: NodesIoStatDevice[] + /** The sum of the disk metrics for all devices that back an Elasticsearch data path. */ + total?: NodesIoStatDevice +} + +export interface NodesJvm { + /** Contains statistics about JVM buffer pools for the node. */ + buffer_pools?: Record + /** Contains statistics about classes loaded by JVM for the node. */ + classes?: NodesJvmClasses + /** Contains statistics about JVM garbage collectors for the node. */ + gc?: NodesGarbageCollector + /** Contains JVM memory usage statistics for the node. */ + mem?: NodesJvmMemoryStats + /** Contains statistics about JVM thread usage for the node. */ + threads?: NodesJvmThreads + /** Last time JVM statistics were refreshed. */ + timestamp?: long + /** Human-readable JVM uptime. + * Only returned if the `human` query parameter is `true`. */ + uptime?: string + /** JVM uptime in milliseconds. */ + uptime_in_millis?: long +} + +export interface NodesJvmClasses { + /** Number of classes currently loaded by JVM. */ + current_loaded_count?: long + /** Total number of classes loaded since the JVM started. */ + total_loaded_count?: long + /** Total number of classes unloaded since the JVM started. */ + total_unloaded_count?: long +} + +export interface NodesJvmMemoryStats { + /** Memory, in bytes, currently in use by the heap. */ + heap_used_in_bytes?: long + /** Percentage of memory currently in use by the heap. */ + heap_used_percent?: long + /** Amount of memory, in bytes, available for use by the heap. */ + heap_committed_in_bytes?: long + /** Maximum amount of memory, in bytes, available for use by the heap. */ + heap_max_in_bytes?: long + /** Maximum amount of memory, available for use by the heap. */ + heap_max?: ByteSize + /** Non-heap memory used, in bytes. */ + non_heap_used_in_bytes?: long + /** Amount of non-heap memory available, in bytes. */ + non_heap_committed_in_bytes?: long + /** Contains statistics about heap memory usage for the node. */ + pools?: Record +} + +export interface NodesJvmThreads { + /** Number of active threads in use by JVM. */ + count?: long + /** Highest number of threads used by JVM. */ + peak_count?: long +} + +export interface NodesKeyedProcessor { + stats?: NodesProcessor + type?: string +} + +export interface NodesMemoryStats { + /** If the amount of physical memory has been overridden using the `es`.`total_memory_bytes` system property then this reports the overridden value in bytes. + * Otherwise it reports the same value as `total_in_bytes`. */ + adjusted_total_in_bytes?: long + resident?: string + resident_in_bytes?: long + share?: string + share_in_bytes?: long + total_virtual?: string + total_virtual_in_bytes?: long + /** Total amount of physical memory in bytes. */ + total_in_bytes?: long + /** Amount of free physical memory in bytes. */ + free_in_bytes?: long + /** Amount of used physical memory in bytes. */ + used_in_bytes?: long +} + +export interface NodesNodeBufferPool { + /** Number of buffer pools. */ + count?: long + /** Total capacity of buffer pools. */ + total_capacity?: string + /** Total capacity of buffer pools in bytes. */ + total_capacity_in_bytes?: long + /** Size of buffer pools. */ + used?: string + /** Size of buffer pools in bytes. */ + used_in_bytes?: long +} + +export interface NodesNodeReloadResult { + name: Name + reload_exception?: ErrorCause +} + +export interface NodesNodesResponseBase { + /** Contains statistics about the number of nodes selected by the request’s node filters. */ + _nodes?: NodeStatistics +} + +export interface NodesOperatingSystem { + cpu?: NodesCpu + mem?: NodesExtendedMemoryStats + swap?: NodesMemoryStats + cgroup?: NodesCgroup + timestamp?: long +} + +export interface NodesPool { + /** Memory, in bytes, used by the heap. */ + used_in_bytes?: long + /** Maximum amount of memory, in bytes, available for use by the heap. */ + max_in_bytes?: long + /** Largest amount of memory, in bytes, historically used by the heap. */ + peak_used_in_bytes?: long + /** Largest amount of memory, in bytes, historically used by the heap. */ + peak_max_in_bytes?: long +} + +export interface NodesPressureMemory { + /** Memory consumed by indexing requests in the coordinating, primary, or replica stage. */ + all?: ByteSize + /** Memory consumed, in bytes, by indexing requests in the coordinating, primary, or replica stage. */ + all_in_bytes?: long + /** Memory consumed by indexing requests in the coordinating or primary stage. + * This value is not the sum of coordinating and primary as a node can reuse the coordinating memory if the primary stage is executed locally. */ + combined_coordinating_and_primary?: ByteSize + /** Memory consumed, in bytes, by indexing requests in the coordinating or primary stage. + * This value is not the sum of coordinating and primary as a node can reuse the coordinating memory if the primary stage is executed locally. */ + combined_coordinating_and_primary_in_bytes?: long + /** Memory consumed by indexing requests in the coordinating stage. */ + coordinating?: ByteSize + /** Memory consumed, in bytes, by indexing requests in the coordinating stage. */ + coordinating_in_bytes?: long + /** Memory consumed by indexing requests in the primary stage. */ + primary?: ByteSize + /** Memory consumed, in bytes, by indexing requests in the primary stage. */ + primary_in_bytes?: long + /** Memory consumed by indexing requests in the replica stage. */ + replica?: ByteSize + /** Memory consumed, in bytes, by indexing requests in the replica stage. */ + replica_in_bytes?: long + /** Number of indexing requests rejected in the coordinating stage. */ + coordinating_rejections?: long + /** Number of indexing requests rejected in the primary stage. */ + primary_rejections?: long + /** Number of indexing requests rejected in the replica stage. */ + replica_rejections?: long + primary_document_rejections?: long + large_operation_rejections?: long +} + +export interface NodesProcess { + /** Contains CPU statistics for the node. */ + cpu?: NodesCpu + /** Contains virtual memory statistics for the node. */ + mem?: NodesMemoryStats + /** Number of opened file descriptors associated with the current or `-1` if not supported. */ + open_file_descriptors?: integer + /** Maximum number of file descriptors allowed on the system, or `-1` if not supported. */ + max_file_descriptors?: integer + /** Last time the statistics were refreshed. + * Recorded in milliseconds since the Unix Epoch. */ + timestamp?: long +} + +export interface NodesProcessor { + /** Number of documents transformed by the processor. */ + count?: long + /** Number of documents currently being transformed by the processor. */ + current?: long + /** Number of failed operations for the processor. */ + failed?: long + /** Time, in milliseconds, spent by the processor transforming documents. */ + time_in_millis?: DurationValue +} + +export interface NodesPublishedClusterStates { + /** Number of published cluster states. */ + full_states?: long + /** Number of incompatible differences between published cluster states. */ + incompatible_diffs?: long + /** Number of compatible differences between published cluster states. */ + compatible_diffs?: long +} + +export interface NodesRecording { + name?: string + cumulative_execution_count?: long + cumulative_execution_time?: Duration + cumulative_execution_time_millis?: DurationValue +} + +export interface NodesRepositoryLocation { + base_path: string + /** Container name (Azure) */ + container?: string + /** Bucket name (GCP, S3) */ + bucket?: string +} + +export interface NodesRepositoryMeteringInformation { + /** Repository name. */ + repository_name: Name + /** Repository type. */ + repository_type: string + /** Represents an unique location within the repository. */ + repository_location: NodesRepositoryLocation + /** An identifier that changes every time the repository is updated. */ + repository_ephemeral_id: Id + /** Time the repository was created or updated. Recorded in milliseconds since the Unix Epoch. */ + repository_started_at: EpochTime + /** Time the repository was deleted or updated. Recorded in milliseconds since the Unix Epoch. */ + repository_stopped_at?: EpochTime + /** A flag that tells whether or not this object has been archived. When a repository is closed or updated the + * repository metering information is archived and kept for a certain period of time. This allows retrieving the + * repository metering information of previous repository instantiations. */ + archived: boolean + /** The cluster state version when this object was archived, this field can be used as a logical timestamp to delete + * all the archived metrics up to an observed version. This field is only present for archived repository metering + * information objects. The main purpose of this field is to avoid possible race conditions during repository metering + * information deletions, i.e. deleting archived repositories metering information that we haven’t observed yet. */ + cluster_version?: VersionNumber + /** An object with the number of request performed against the repository grouped by request type. */ + request_counts: NodesRequestCounts +} + +export interface NodesRequestCounts { + /** Number of Get Blob Properties requests (Azure) */ + GetBlobProperties?: long + /** Number of Get Blob requests (Azure) */ + GetBlob?: long + /** Number of List Blobs requests (Azure) */ + ListBlobs?: long + /** Number of Put Blob requests (Azure) */ + PutBlob?: long + /** Number of Put Block (Azure) */ + PutBlock?: long + /** Number of Put Block List requests */ + PutBlockList?: long + /** Number of get object requests (GCP, S3) */ + GetObject?: long + /** Number of list objects requests (GCP, S3) */ + ListObjects?: long + /** Number of insert object requests, including simple, multipart and resumable uploads. Resumable uploads + * can perform multiple http requests to insert a single object but they are considered as a single request + * since they are billed as an individual operation. (GCP) */ + InsertObject?: long + /** Number of PutObject requests (S3) */ + PutObject?: long + /** Number of Multipart requests, including CreateMultipartUpload, UploadPart and CompleteMultipartUpload requests (S3) */ + PutMultipartObject?: long +} + +export interface NodesScriptCache { + /** Total number of times the script cache has evicted old data. */ + cache_evictions?: long + /** Total number of times the script compilation circuit breaker has limited inline script compilations. */ + compilation_limit_triggered?: long + /** Total number of inline script compilations performed by the node. */ + compilations?: long + context?: string +} + +export interface NodesScripting { + /** Total number of times the script cache has evicted old data. */ + cache_evictions?: long + /** Total number of inline script compilations performed by the node. */ + compilations?: long + /** Contains this recent history of script compilations. */ + compilations_history?: Record + /** Total number of times the script compilation circuit breaker has limited inline script compilations. */ + compilation_limit_triggered?: long + contexts?: NodesContext[] +} + +export interface NodesSerializedClusterState { + /** Number of published cluster states. */ + full_states?: NodesSerializedClusterStateDetail + diffs?: NodesSerializedClusterStateDetail +} + +export interface NodesSerializedClusterStateDetail { + count?: long + uncompressed_size?: string + uncompressed_size_in_bytes?: long + compressed_size?: string + compressed_size_in_bytes?: long +} + +export interface NodesSizeHttpHistogram { + count: long + ge_bytes?: long + lt_bytes?: long +} + +export interface NodesStats { + /** Statistics about adaptive replica selection. */ + adaptive_selection?: Record + /** Statistics about the field data circuit breaker. */ + breakers?: Record + /** File system information, data path, free disk space, read/write stats. */ + fs?: NodesFileSystem + /** Network host for the node, based on the network host setting. */ + host?: Host + /** HTTP connection information. */ + http?: NodesHttp + /** Statistics about ingest preprocessing. */ + ingest?: NodesIngest + /** IP address and port for the node. */ + ip?: Ip | Ip[] + /** JVM stats, memory pool information, garbage collection, buffer pools, number of loaded/unloaded classes. */ + jvm?: NodesJvm + /** Human-readable identifier for the node. + * Based on the node name setting. */ + name?: Name + /** Operating system stats, load average, mem, swap. */ + os?: NodesOperatingSystem + /** Process statistics, memory consumption, cpu usage, open file descriptors. */ + process?: NodesProcess + /** Roles assigned to the node. */ + roles?: NodeRoles + /** Contains script statistics for the node. */ + script?: NodesScripting + script_cache?: Record + /** Statistics about each thread pool, including current size, queue and rejected tasks. */ + thread_pool?: Record + timestamp?: long + /** Transport statistics about sent and received bytes in cluster communication. */ + transport?: NodesTransport + /** Host and port for the transport layer, used for internal communication between nodes in a cluster. */ + transport_address?: TransportAddress + /** Contains a list of attributes for the node. */ + attributes?: Record + /** Contains node discovery statistics for the node. */ + discovery?: NodesDiscovery + /** Contains indexing pressure statistics for the node. */ + indexing_pressure?: NodesIndexingPressure + /** Indices stats about size, document count, indexing and deletion times, search times, field cache size, merges and flushes. */ + indices?: IndicesStatsShardStats +} + +export interface NodesThreadCount { + /** Number of active threads in the thread pool. */ + active?: long + /** Number of tasks completed by the thread pool executor. */ + completed?: long + /** Highest number of active threads in the thread pool. */ + largest?: long + /** Number of tasks in queue for the thread pool. */ + queue?: long + /** Number of tasks rejected by the thread pool executor. */ + rejected?: long + /** Number of threads in the thread pool. */ + threads?: long +} + +export interface NodesTimeHttpHistogram { + count: long + ge_millis?: long + lt_millis?: long +} + +export interface NodesTransport { + /** The distribution of the time spent handling each inbound message on a transport thread, represented as a histogram. */ + inbound_handling_time_histogram?: NodesTransportHistogram[] + /** The distribution of the time spent sending each outbound transport message on a transport thread, represented as a histogram. */ + outbound_handling_time_histogram?: NodesTransportHistogram[] + /** Total number of RX (receive) packets received by the node during internal cluster communication. */ + rx_count?: long + /** Size of RX packets received by the node during internal cluster communication. */ + rx_size?: string + /** Size, in bytes, of RX packets received by the node during internal cluster communication. */ + rx_size_in_bytes?: long + /** Current number of inbound TCP connections used for internal communication between nodes. */ + server_open?: integer + /** Total number of TX (transmit) packets sent by the node during internal cluster communication. */ + tx_count?: long + /** Size of TX packets sent by the node during internal cluster communication. */ + tx_size?: string + /** Size, in bytes, of TX packets sent by the node during internal cluster communication. */ + tx_size_in_bytes?: long + /** The cumulative number of outbound transport connections that this node has opened since it started. + * Each transport connection may comprise multiple TCP connections but is only counted once in this statistic. + * Transport connections are typically long-lived so this statistic should remain constant in a stable cluster. */ + total_outbound_connections?: long +} + +export interface NodesTransportHistogram { + /** The number of times a transport thread took a period of time within the bounds of this bucket to handle an inbound message. */ + count?: long + /** The exclusive upper bound of the bucket in milliseconds. + * May be omitted on the last bucket if this bucket has no upper bound. */ + lt_millis?: long + /** The inclusive lower bound of the bucket in milliseconds. May be omitted on the first bucket if this bucket has no lower bound. */ + ge_millis?: long +} + +export interface NodesClearRepositoriesMeteringArchiveRequest extends RequestBase { + /** Comma-separated list of node IDs or names used to limit returned information. */ + node_id: NodeIds + /** Specifies the maximum `archive_version` to be cleared from the archive. */ + max_archive_version: long + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { node_id?: never, max_archive_version?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { node_id?: never, max_archive_version?: never } +} + +export type NodesClearRepositoriesMeteringArchiveResponse = NodesClearRepositoriesMeteringArchiveResponseBase + +export interface NodesClearRepositoriesMeteringArchiveResponseBase extends NodesNodesResponseBase { + /** Name of the cluster. Based on the `cluster.name` setting. */ + cluster_name: Name + /** Contains repositories metering information for the nodes selected by the request. */ + nodes: Record +} + +export interface NodesGetRepositoriesMeteringInfoRequest extends RequestBase { + /** Comma-separated list of node IDs or names used to limit returned information. */ + node_id: NodeIds + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { node_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { node_id?: never } +} + +export type NodesGetRepositoriesMeteringInfoResponse = NodesGetRepositoriesMeteringInfoResponseBase + +export interface NodesGetRepositoriesMeteringInfoResponseBase extends NodesNodesResponseBase { + /** Name of the cluster. Based on the `cluster.name` setting. */ + cluster_name: Name + /** Contains repositories metering information for the nodes selected by the request. */ + nodes: Record +} + +export interface NodesHotThreadsRequest extends RequestBase { + /** List of node IDs or names used to limit returned information. */ + node_id?: NodeIds + /** If true, known idle threads (e.g. waiting in a socket select, or to get + * a task from an empty queue) are filtered out. */ + ignore_idle_threads?: boolean + /** The interval to do the second sampling of threads. */ + interval?: Duration + /** Number of samples of thread stacktrace. */ + snapshots?: long + /** Specifies the number of hot threads to provide information for. */ + threads?: long + /** Period to wait for a response. If no response is received + * before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** The type to sample. */ + type?: ThreadType + /** The sort order for 'cpu' type (default: total) */ + sort?: ThreadType + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { node_id?: never, ignore_idle_threads?: never, interval?: never, snapshots?: never, threads?: never, timeout?: never, type?: never, sort?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { node_id?: never, ignore_idle_threads?: never, interval?: never, snapshots?: never, threads?: never, timeout?: never, type?: never, sort?: never } +} + +export interface NodesHotThreadsResponse { +} + +export interface NodesInfoDeprecationIndexing { + enabled: boolean | string +} + +export interface NodesInfoNodeInfo { + attributes: Record + build_flavor: string + /** Short hash of the last git commit in this release. */ + build_hash: string + build_type: string + component_versions: Record + /** The node’s host name. */ + host: Host + http?: NodesInfoNodeInfoHttp + index_version: VersionNumber + /** The node’s IP address. */ + ip: Ip + jvm?: NodesInfoNodeJvmInfo + /** The node's name */ + name: Name + os?: NodesInfoNodeOperatingSystemInfo + plugins?: PluginStats[] + process?: NodesInfoNodeProcessInfo + roles: NodeRoles + settings?: NodesInfoNodeInfoSettings + thread_pool?: Record + /** Total heap allowed to be used to hold recently indexed documents before they must be written to disk. This size is a shared pool across all shards on this node, and is controlled by Indexing Buffer settings. */ + total_indexing_buffer?: long + /** Same as total_indexing_buffer, but expressed in bytes. */ + total_indexing_buffer_in_bytes?: ByteSize + transport?: NodesInfoNodeInfoTransport + /** Host and port where transport HTTP connections are accepted. */ + transport_address: TransportAddress + transport_version: VersionNumber + /** Elasticsearch version running on this node. */ + version: VersionString + modules?: PluginStats[] + ingest?: NodesInfoNodeInfoIngest + aggregations?: Record + remote_cluster_server?: NodesInfoRemoveClusterServer +} + +export interface NodesInfoNodeInfoAction { + destructive_requires_name: string +} + +export interface NodesInfoNodeInfoAggregation { + types: string[] +} + +export interface NodesInfoNodeInfoBootstrap { + memory_lock: string +} + +export interface NodesInfoNodeInfoClient { + type: string +} + +export interface NodesInfoNodeInfoDiscoverKeys { + seed_hosts?: string[] | string + type?: string + seed_providers?: string[] +} +export type NodesInfoNodeInfoDiscover = NodesInfoNodeInfoDiscoverKeys +& { [property: string]: any } + +export interface NodesInfoNodeInfoHttp { + bound_address: string[] + max_content_length?: ByteSize + max_content_length_in_bytes: long + publish_address: string +} + +export interface NodesInfoNodeInfoIngest { + processors: NodesInfoNodeInfoIngestProcessor[] +} + +export interface NodesInfoNodeInfoIngestDownloader { + enabled: string +} + +export interface NodesInfoNodeInfoIngestInfo { + downloader: NodesInfoNodeInfoIngestDownloader +} + +export interface NodesInfoNodeInfoIngestProcessor { + type: string +} + +export interface NodesInfoNodeInfoJvmMemory { + direct_max?: ByteSize + direct_max_in_bytes: long + heap_init?: ByteSize + heap_init_in_bytes: long + heap_max?: ByteSize + heap_max_in_bytes: long + non_heap_init?: ByteSize + non_heap_init_in_bytes: long + non_heap_max?: ByteSize + non_heap_max_in_bytes: long +} + +export interface NodesInfoNodeInfoMemory { + total: string + total_in_bytes: long +} + +export interface NodesInfoNodeInfoOSCPU { + cache_size: string + cache_size_in_bytes: integer + cores_per_socket: integer + mhz: integer + model: string + total_cores: integer + total_sockets: integer + vendor: string +} + +export interface NodesInfoNodeInfoPath { + logs?: string + home?: string + repo?: string[] + data?: string | string[] +} + +export interface NodesInfoNodeInfoRepositories { + url: NodesInfoNodeInfoRepositoriesUrl +} + +export interface NodesInfoNodeInfoRepositoriesUrl { + allowed_urls: string +} + +export interface NodesInfoNodeInfoScript { + allowed_types: string + disable_max_compilations_rate?: string +} + +export interface NodesInfoNodeInfoSearch { + remote: NodesInfoNodeInfoSearchRemote +} + +export interface NodesInfoNodeInfoSearchRemote { + connect: string +} + +export interface NodesInfoNodeInfoSettings { + cluster: NodesInfoNodeInfoSettingsCluster + node: NodesInfoNodeInfoSettingsNode + path?: NodesInfoNodeInfoPath + repositories?: NodesInfoNodeInfoRepositories + discovery?: NodesInfoNodeInfoDiscover + action?: NodesInfoNodeInfoAction + client?: NodesInfoNodeInfoClient + http: NodesInfoNodeInfoSettingsHttp + bootstrap?: NodesInfoNodeInfoBootstrap + transport: NodesInfoNodeInfoSettingsTransport + network?: NodesInfoNodeInfoSettingsNetwork + xpack?: NodesInfoNodeInfoXpack + script?: NodesInfoNodeInfoScript + search?: NodesInfoNodeInfoSearch + ingest?: NodesInfoNodeInfoSettingsIngest +} + +export interface NodesInfoNodeInfoSettingsCluster { + name: Name + routing?: IndicesIndexRouting + election: NodesInfoNodeInfoSettingsClusterElection + initial_master_nodes?: string[] | string + deprecation_indexing?: NodesInfoDeprecationIndexing +} + +export interface NodesInfoNodeInfoSettingsClusterElection { + strategy: Name +} + +export interface NodesInfoNodeInfoSettingsHttp { + type: NodesInfoNodeInfoSettingsHttpType | string + 'type.default'?: string + compression?: boolean | string + port?: integer | string +} + +export interface NodesInfoNodeInfoSettingsHttpType { + default: string +} + +export interface NodesInfoNodeInfoSettingsIngest { + attachment?: NodesInfoNodeInfoIngestInfo + append?: NodesInfoNodeInfoIngestInfo + csv?: NodesInfoNodeInfoIngestInfo + convert?: NodesInfoNodeInfoIngestInfo + date?: NodesInfoNodeInfoIngestInfo + date_index_name?: NodesInfoNodeInfoIngestInfo + dot_expander?: NodesInfoNodeInfoIngestInfo + enrich?: NodesInfoNodeInfoIngestInfo + fail?: NodesInfoNodeInfoIngestInfo + foreach?: NodesInfoNodeInfoIngestInfo + json?: NodesInfoNodeInfoIngestInfo + user_agent?: NodesInfoNodeInfoIngestInfo + kv?: NodesInfoNodeInfoIngestInfo + geoip?: NodesInfoNodeInfoIngestInfo + grok?: NodesInfoNodeInfoIngestInfo + gsub?: NodesInfoNodeInfoIngestInfo + join?: NodesInfoNodeInfoIngestInfo + lowercase?: NodesInfoNodeInfoIngestInfo + remove?: NodesInfoNodeInfoIngestInfo + rename?: NodesInfoNodeInfoIngestInfo + script?: NodesInfoNodeInfoIngestInfo + set?: NodesInfoNodeInfoIngestInfo + sort?: NodesInfoNodeInfoIngestInfo + split?: NodesInfoNodeInfoIngestInfo + trim?: NodesInfoNodeInfoIngestInfo + uppercase?: NodesInfoNodeInfoIngestInfo + urldecode?: NodesInfoNodeInfoIngestInfo + bytes?: NodesInfoNodeInfoIngestInfo + dissect?: NodesInfoNodeInfoIngestInfo + set_security_user?: NodesInfoNodeInfoIngestInfo + pipeline?: NodesInfoNodeInfoIngestInfo + drop?: NodesInfoNodeInfoIngestInfo + circle?: NodesInfoNodeInfoIngestInfo + inference?: NodesInfoNodeInfoIngestInfo +} + +export interface NodesInfoNodeInfoSettingsNetwork { + host?: Host | Host[] +} + +export interface NodesInfoNodeInfoSettingsNode { + name: Name + attr: Record + max_local_storage_nodes?: string +} + +export interface NodesInfoNodeInfoSettingsTransport { + type: NodesInfoNodeInfoSettingsTransportType | string + 'type.default'?: string + features?: NodesInfoNodeInfoSettingsTransportFeatures + /** Only used in unit tests */ + ignore_deserialization_errors?: SpecUtilsStringified +} + +export interface NodesInfoNodeInfoSettingsTransportFeatures { + 'x-pack': string +} + +export interface NodesInfoNodeInfoSettingsTransportType { + default: string +} + +export interface NodesInfoNodeInfoTransport { + bound_address: string[] + publish_address: string + profiles: Record +} + +export interface NodesInfoNodeInfoXpack { + license?: NodesInfoNodeInfoXpackLicense + security: NodesInfoNodeInfoXpackSecurity + notification?: Record + ml?: NodesInfoNodeInfoXpackMl +} + +export interface NodesInfoNodeInfoXpackLicense { + self_generated: NodesInfoNodeInfoXpackLicenseType +} + +export interface NodesInfoNodeInfoXpackLicenseType { + type: string +} + +export interface NodesInfoNodeInfoXpackMl { + use_auto_machine_memory_percent?: boolean +} + +export interface NodesInfoNodeInfoXpackSecurity { + http?: NodesInfoNodeInfoXpackSecuritySsl + enabled: string + transport?: NodesInfoNodeInfoXpackSecuritySsl + authc?: NodesInfoNodeInfoXpackSecurityAuthc +} + +export interface NodesInfoNodeInfoXpackSecurityAuthc { + realms?: NodesInfoNodeInfoXpackSecurityAuthcRealms + token?: NodesInfoNodeInfoXpackSecurityAuthcToken +} + +export interface NodesInfoNodeInfoXpackSecurityAuthcRealms { + file?: Record + native?: Record + pki?: Record +} + +export interface NodesInfoNodeInfoXpackSecurityAuthcRealmsStatus { + enabled?: string + order: string +} + +export interface NodesInfoNodeInfoXpackSecurityAuthcToken { + enabled: string +} + +export interface NodesInfoNodeInfoXpackSecuritySsl { + ssl: Record +} + +export interface NodesInfoNodeJvmInfo { + gc_collectors: string[] + mem: NodesInfoNodeInfoJvmMemory + memory_pools: string[] + pid: integer + start_time_in_millis: EpochTime + version: VersionString + vm_name: Name + vm_vendor: string + vm_version: VersionString + using_bundled_jdk: boolean + using_compressed_ordinary_object_pointers?: boolean | string + input_arguments: string[] +} + +export interface NodesInfoNodeOperatingSystemInfo { + /** Name of the JVM architecture (ex: amd64, x86) */ + arch: string + /** Number of processors available to the Java virtual machine */ + available_processors: integer + /** The number of processors actually used to calculate thread pool size. This number can be set with the node.processors setting of a node and defaults to the number of processors reported by the OS. */ + allocated_processors?: integer + /** Name of the operating system (ex: Linux, Windows, Mac OS X) */ + name: Name + pretty_name: Name + /** Refresh interval for the OS statistics */ + refresh_interval_in_millis: DurationValue + /** Version of the operating system */ + version: VersionString + cpu?: NodesInfoNodeInfoOSCPU + mem?: NodesInfoNodeInfoMemory + swap?: NodesInfoNodeInfoMemory +} + +export interface NodesInfoNodeProcessInfo { + /** Process identifier (PID) */ + id: long + /** Indicates if the process address space has been successfully locked in memory */ + mlockall: boolean + /** Refresh interval for the process statistics */ + refresh_interval_in_millis: DurationValue +} + +export interface NodesInfoNodeThreadPoolInfo { + core?: integer + keep_alive?: Duration + max?: integer + queue_size: integer + size?: integer + type: string +} + +export type NodesInfoNodesInfoMetric = '_all' | '_none' | 'settings' | 'os' | 'process' | 'jvm' | 'thread_pool' | 'transport' | 'http' | 'remote_cluster_server' | 'plugins' | 'ingest' | 'aggregations' | 'indices' + +export type NodesInfoNodesInfoMetrics = NodesInfoNodesInfoMetric | NodesInfoNodesInfoMetric[] + +export interface NodesInfoRemoveClusterServer { + bound_address: TransportAddress[] + publish_address: TransportAddress +} + +export interface NodesInfoRequest extends RequestBase { + /** Comma-separated list of node IDs or names used to limit returned information. */ + node_id?: NodeIds + /** Limits the information returned to the specific metrics. Supports a comma-separated list, such as http,ingest. */ + metric?: NodesInfoNodesInfoMetrics + /** If true, returns settings in flat format. */ + flat_settings?: boolean + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { node_id?: never, metric?: never, flat_settings?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { node_id?: never, metric?: never, flat_settings?: never, timeout?: never } +} + +export type NodesInfoResponse = NodesInfoResponseBase + +export interface NodesInfoResponseBase extends NodesNodesResponseBase { + cluster_name: Name + nodes: Record +} + +export interface NodesReloadSecureSettingsRequest extends RequestBase { + /** The names of particular nodes in the cluster to target. */ + node_id?: NodeIds + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** The password for the Elasticsearch keystore. */ + secure_settings_password?: Password + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { node_id?: never, timeout?: never, secure_settings_password?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { node_id?: never, timeout?: never, secure_settings_password?: never } +} + +export type NodesReloadSecureSettingsResponse = NodesReloadSecureSettingsResponseBase + +export interface NodesReloadSecureSettingsResponseBase extends NodesNodesResponseBase { + cluster_name: Name + nodes: Record +} + +export type NodesStatsNodeStatsMetric = '_all' | '_none' | 'indices' | 'os' | 'process' | 'jvm' | 'thread_pool' | 'fs' | 'transport' | 'http' | 'breaker' | 'script' | 'discovery' | 'ingest' | 'adaptive_selection' | 'script_cache' | 'indexing_pressure' | 'repositories' | 'allocations' + +export type NodesStatsNodeStatsMetrics = NodesStatsNodeStatsMetric | NodesStatsNodeStatsMetric[] + +export interface NodesStatsRequest extends RequestBase { + /** Comma-separated list of node IDs or names used to limit returned information. */ + node_id?: NodeIds + /** Limit the information returned to the specified metrics */ + metric?: NodesStatsNodeStatsMetrics + /** Limit the information returned for indices metric to the specific index metrics. It can be used only if indices (or all) metric is specified. */ + index_metric?: CommonStatsFlags + /** Comma-separated list or wildcard expressions of fields to include in fielddata and suggest statistics. */ + completion_fields?: Fields + /** Comma-separated list or wildcard expressions of fields to include in fielddata statistics. */ + fielddata_fields?: Fields + /** Comma-separated list or wildcard expressions of fields to include in the statistics. */ + fields?: Fields + /** Comma-separated list of search groups to include in the search statistics. */ + groups?: boolean + /** If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested). */ + include_segment_file_sizes?: boolean + /** Indicates whether statistics are aggregated at the node, indices, or shards level. */ + level?: NodeStatsLevel + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** A comma-separated list of document types for the indexing index metric. */ + types?: string[] + /** If `true`, the response includes information from segments that are not loaded into memory. */ + include_unloaded_segments?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { node_id?: never, metric?: never, index_metric?: never, completion_fields?: never, fielddata_fields?: never, fields?: never, groups?: never, include_segment_file_sizes?: never, level?: never, timeout?: never, types?: never, include_unloaded_segments?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { node_id?: never, metric?: never, index_metric?: never, completion_fields?: never, fielddata_fields?: never, fields?: never, groups?: never, include_segment_file_sizes?: never, level?: never, timeout?: never, types?: never, include_unloaded_segments?: never } +} + +export type NodesStatsResponse = NodesStatsResponseBase + +export interface NodesStatsResponseBase extends NodesNodesResponseBase { + cluster_name?: Name + nodes: Record +} + +export interface NodesUsageNodeUsage { + rest_actions: Record + since: EpochTime + timestamp: EpochTime + aggregations: Record +} + +export type NodesUsageNodesUsageMetric = '_all' | 'rest_actions' | 'aggregations' + +export type NodesUsageNodesUsageMetrics = NodesUsageNodesUsageMetric | NodesUsageNodesUsageMetric[] + +export interface NodesUsageRequest extends RequestBase { + /** A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes */ + node_id?: NodeIds + /** Limits the information returned to the specific metrics. + * A comma-separated list of the following options: `_all`, `rest_actions`, `aggregations`. */ + metric?: NodesUsageNodesUsageMetrics + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { node_id?: never, metric?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { node_id?: never, metric?: never, timeout?: never } +} + +export type NodesUsageResponse = NodesUsageResponseBase + +export interface NodesUsageResponseBase extends NodesNodesResponseBase { + cluster_name: Name + nodes: Record +} + +export interface ProjectTagsProjectTags { + origin: Partial> + linked_projects?: Record +} + +export interface ProjectTagsRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } +} + +export type ProjectTagsResponse = ProjectTagsProjectTags + +export interface ProjectTagsTagsKeys { + _id: string + _alias: string + _type: string + _organisation: string +} +export type ProjectTagsTags = ProjectTagsTagsKeys +& { [property: string]: string } + +export interface QueryRulesQueryRule { + /** A unique identifier for the rule. */ + rule_id: Id + /** The type of rule. + * `pinned` will identify and pin specific documents to the top of search results. + * `exclude` will exclude specific documents from search results. */ + type: QueryRulesQueryRuleType + /** The criteria that must be met for the rule to be applied. + * If multiple criteria are specified for a rule, all criteria must be met for the rule to be applied. */ + criteria: QueryRulesQueryRuleCriteria | QueryRulesQueryRuleCriteria[] + /** The actions to take when the rule is matched. + * The format of this action depends on the rule type. */ + actions: QueryRulesQueryRuleActions + priority?: integer +} + +export interface QueryRulesQueryRuleActions { + /** The unique document IDs of the documents to apply the rule to. + * Only one of `ids` or `docs` may be specified and at least one must be specified. */ + ids?: Id[] + /** The documents to apply the rule to. + * Only one of `ids` or `docs` may be specified and at least one must be specified. + * There is a maximum value of 100 documents in a rule. + * You can specify the following attributes for each document: + * + * * `_index`: The index of the document to pin. + * * `_id`: The unique document ID. */ + docs?: QueryDslPinnedDoc[] +} + +export interface QueryRulesQueryRuleCriteria { + /** The type of criteria. The following criteria types are supported: + * + * * `always`: Matches all queries, regardless of input. + * * `contains`: Matches that contain this value anywhere in the field meet the criteria defined by the rule. Only applicable for string values. + * * `exact`: Only exact matches meet the criteria defined by the rule. Applicable for string or numerical values. + * * `fuzzy`: Exact matches or matches within the allowed Levenshtein Edit Distance meet the criteria defined by the rule. Only applicable for string values. + * * `gt`: Matches with a value greater than this value meet the criteria defined by the rule. Only applicable for numerical values. + * * `gte`: Matches with a value greater than or equal to this value meet the criteria defined by the rule. Only applicable for numerical values. + * * `lt`: Matches with a value less than this value meet the criteria defined by the rule. Only applicable for numerical values. + * * `lte`: Matches with a value less than or equal to this value meet the criteria defined by the rule. Only applicable for numerical values. + * * `prefix`: Matches that start with this value meet the criteria defined by the rule. Only applicable for string values. + * * `suffix`: Matches that end with this value meet the criteria defined by the rule. Only applicable for string values. */ + type: QueryRulesQueryRuleCriteriaType + /** The metadata field to match against. + * This metadata will be used to match against `match_criteria` sent in the rule. + * It is required for all criteria types except `always`. */ + metadata?: string + /** The values to match against the `metadata` field. + * Only one value must match for the criteria to be met. + * It is required for all criteria types except `always`. */ + values?: any[] +} + +export type QueryRulesQueryRuleCriteriaType = 'global' | 'exact' | 'exact_fuzzy' | 'fuzzy' | 'prefix' | 'suffix' | 'contains' | 'lt' | 'lte' | 'gt' | 'gte' | 'always' + +export type QueryRulesQueryRuleType = 'pinned' | 'exclude' + +export interface QueryRulesQueryRuleset { + /** A unique identifier for the ruleset. */ + ruleset_id: Id + /** Rules associated with the query ruleset. */ + rules: QueryRulesQueryRule[] +} + +export interface QueryRulesDeleteRuleRequest extends RequestBase { + /** The unique identifier of the query ruleset containing the rule to delete */ + ruleset_id: Id + /** The unique identifier of the query rule within the specified ruleset to delete */ + rule_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { ruleset_id?: never, rule_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { ruleset_id?: never, rule_id?: never } +} + +export type QueryRulesDeleteRuleResponse = AcknowledgedResponseBase + +export interface QueryRulesDeleteRulesetRequest extends RequestBase { + /** The unique identifier of the query ruleset to delete */ + ruleset_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { ruleset_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { ruleset_id?: never } +} + +export type QueryRulesDeleteRulesetResponse = AcknowledgedResponseBase + +export interface QueryRulesGetRuleRequest extends RequestBase { + /** The unique identifier of the query ruleset containing the rule to retrieve */ + ruleset_id: Id + /** The unique identifier of the query rule within the specified ruleset to retrieve */ + rule_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { ruleset_id?: never, rule_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { ruleset_id?: never, rule_id?: never } +} + +export type QueryRulesGetRuleResponse = QueryRulesQueryRule + +export interface QueryRulesGetRulesetRequest extends RequestBase { + /** The unique identifier of the query ruleset */ + ruleset_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { ruleset_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { ruleset_id?: never } +} + +export type QueryRulesGetRulesetResponse = QueryRulesQueryRuleset + +export interface QueryRulesListRulesetsQueryRulesetListItem { + /** A unique identifier for the ruleset. */ + ruleset_id: Id + /** The number of rules associated with the ruleset. */ + rule_total_count: integer + /** A map of criteria type (for example, `exact`) to the number of rules of that type. + * + * NOTE: The counts in `rule_criteria_types_counts` may be larger than the value of `rule_total_count` because a rule may have multiple criteria. */ + rule_criteria_types_counts: Record + /** A map of rule type (for example, `pinned`) to the number of rules of that type. */ + rule_type_counts: Record +} + +export interface QueryRulesListRulesetsRequest extends RequestBase { + /** The offset from the first result to fetch. */ + from?: integer + /** The maximum number of results to retrieve. */ + size?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { from?: never, size?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { from?: never, size?: never } +} + +export interface QueryRulesListRulesetsResponse { + count: long + results: QueryRulesListRulesetsQueryRulesetListItem[] +} + +export interface QueryRulesPutRuleRequest extends RequestBase { + /** The unique identifier of the query ruleset containing the rule to be created or updated. */ + ruleset_id: Id + /** The unique identifier of the query rule within the specified ruleset to be created or updated. */ + rule_id: Id + /** The type of rule. */ + type: QueryRulesQueryRuleType + /** The criteria that must be met for the rule to be applied. + * If multiple criteria are specified for a rule, all criteria must be met for the rule to be applied. */ + criteria: QueryRulesQueryRuleCriteria | QueryRulesQueryRuleCriteria[] + /** The actions to take when the rule is matched. + * The format of this action depends on the rule type. */ + actions: QueryRulesQueryRuleActions + priority?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { ruleset_id?: never, rule_id?: never, type?: never, criteria?: never, actions?: never, priority?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { ruleset_id?: never, rule_id?: never, type?: never, criteria?: never, actions?: never, priority?: never } +} + +export interface QueryRulesPutRuleResponse { + result: Result +} + +export interface QueryRulesPutRulesetRequest extends RequestBase { + /** The unique identifier of the query ruleset to be created or updated. */ + ruleset_id: Id + rules: QueryRulesQueryRule | QueryRulesQueryRule[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { ruleset_id?: never, rules?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { ruleset_id?: never, rules?: never } +} + +export interface QueryRulesPutRulesetResponse { + result: Result +} + +export interface QueryRulesTestQueryRulesetMatchedRule { + /** Ruleset unique identifier */ + ruleset_id: Id + /** Rule unique identifier within that ruleset */ + rule_id: Id +} + +export interface QueryRulesTestRequest extends RequestBase { + /** The unique identifier of the query ruleset to be created or updated */ + ruleset_id: Id + /** The match criteria to apply to rules in the given query ruleset. + * Match criteria should match the keys defined in the `criteria.metadata` field of the rule. */ + match_criteria: Record + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { ruleset_id?: never, match_criteria?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { ruleset_id?: never, match_criteria?: never } +} + +export interface QueryRulesTestResponse { + total_matched_rules: integer + matched_rules: QueryRulesTestQueryRulesetMatchedRule[] +} + +export interface RollupDateHistogramGrouping { + /** How long to wait before rolling up new documents. + * By default, the indexer attempts to roll up all data that is available. + * However, it is not uncommon for data to arrive out of order. + * The indexer is unable to deal with data that arrives after a time-span has been rolled up. + * You need to specify a delay that matches the longest period of time you expect out-of-order data to arrive. */ + delay?: Duration + /** The date field that is to be rolled up. */ + field: Field + format?: string + interval?: Duration + /** The interval of time buckets to be generated when rolling up. */ + calendar_interval?: Duration + /** The interval of time buckets to be generated when rolling up. */ + fixed_interval?: Duration + /** Defines what `time_zone` the rollup documents are stored as. + * Unlike raw data, which can shift timezones on the fly, rolled documents have to be stored with a specific timezone. + * By default, rollup documents are stored in `UTC`. */ + time_zone?: TimeZone +} + +export interface RollupFieldMetric { + /** The field to collect metrics for. This must be a numeric of some kind. */ + field: Field + /** An array of metrics to collect for the field. At least one metric must be configured. */ + metrics: RollupMetric[] +} + +export interface RollupGroupings { + /** A date histogram group aggregates a date field into time-based buckets. + * This group is mandatory; you currently cannot roll up documents without a timestamp and a `date_histogram` group. */ + date_histogram?: RollupDateHistogramGrouping + /** The histogram group aggregates one or more numeric fields into numeric histogram intervals. */ + histogram?: RollupHistogramGrouping + /** The terms group can be used on keyword or numeric fields to allow bucketing via the terms aggregation at a later point. + * The indexer enumerates and stores all values of a field for each time-period. + * This can be potentially costly for high-cardinality groups such as IP addresses, especially if the time-bucket is particularly sparse. */ + terms?: RollupTermsGrouping +} + +export interface RollupHistogramGrouping { + /** The set of fields that you wish to build histograms for. + * All fields specified must be some kind of numeric. + * Order does not matter. */ + fields: Fields + /** The interval of histogram buckets to be generated when rolling up. + * For example, a value of `5` creates buckets that are five units wide (`0-5`, `5-10`, etc). + * Note that only one interval can be specified in the histogram group, meaning that all fields being grouped via the histogram must share the same interval. */ + interval: long +} + +export type RollupMetric = 'min' | 'max' | 'sum' | 'avg' | 'value_count' + +export interface RollupTermsGrouping { + /** The set of fields that you wish to collect terms for. + * This array can contain fields that are both keyword and numerics. + * Order does not matter. */ + fields: Fields +} + +export interface RollupDeleteJobRequest extends RequestBase { + /** Identifier for the job. */ + id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } +} + +export interface RollupDeleteJobResponse { + acknowledged: boolean + task_failures?: TaskFailure[] +} + +export type RollupGetJobsIndexingJobState = 'started' | 'indexing' | 'stopping' | 'stopped' | 'aborting' + +export interface RollupGetJobsRequest extends RequestBase { + /** Identifier for the rollup job. + * If it is `_all` or omitted, the API returns all rollup jobs. */ + id?: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } +} + +export interface RollupGetJobsResponse { + jobs: RollupGetJobsRollupJob[] +} + +export interface RollupGetJobsRollupJob { + /** The rollup job configuration. */ + config: RollupGetJobsRollupJobConfiguration + /** Transient statistics about the rollup job, such as how many documents have been processed and how many rollup summary docs have been indexed. + * These stats are not persisted. + * If a node is restarted, these stats are reset. */ + stats: RollupGetJobsRollupJobStats + /** The current status of the indexer for the rollup job. */ + status: RollupGetJobsRollupJobStatus +} + +export interface RollupGetJobsRollupJobConfiguration { + cron: string + groups: RollupGroupings + id: Id + index_pattern: string + metrics: RollupFieldMetric[] + page_size: long + rollup_index: IndexName + timeout: Duration +} + +export interface RollupGetJobsRollupJobStats { + documents_processed: long + index_failures: long + index_time_in_ms: DurationValue + index_total: long + pages_processed: long + rollups_indexed: long + search_failures: long + search_time_in_ms: DurationValue + search_total: long + trigger_count: long + processing_time_in_ms: DurationValue + processing_total: long +} + +export interface RollupGetJobsRollupJobStatus { + current_position?: Record + job_state: RollupGetJobsIndexingJobState + upgraded_doc_id?: boolean +} + +export interface RollupGetRollupCapsRequest extends RequestBase { + /** Index, indices or index-pattern to return rollup capabilities for. + * `_all` may be used to fetch rollup capabilities from all jobs. */ + id?: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } +} + +export type RollupGetRollupCapsResponse = Record + +export interface RollupGetRollupCapsRollupCapabilities { + /** There can be multiple, independent jobs configured for a single index or index pattern. Each of these jobs may have different configurations, so the API returns a list of all the various configurations available. */ + rollup_jobs: RollupGetRollupCapsRollupCapabilitySummary[] +} + +export interface RollupGetRollupCapsRollupCapabilitySummary { + fields: Record + index_pattern: string + job_id: string + rollup_index: string +} + +export interface RollupGetRollupCapsRollupFieldSummary { + agg: string + calendar_interval?: Duration + time_zone?: TimeZone +} + +export interface RollupGetRollupIndexCapsIndexCapabilities { + rollup_jobs: RollupGetRollupIndexCapsRollupJobSummary[] +} + +export interface RollupGetRollupIndexCapsRequest extends RequestBase { + /** Data stream or index to check for rollup capabilities. + * Wildcard (`*`) expressions are supported. */ + index: Ids + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never } +} + +export type RollupGetRollupIndexCapsResponse = Record + +export interface RollupGetRollupIndexCapsRollupJobSummary { + fields: Record + index_pattern: string + job_id: Id + rollup_index: IndexName +} + +export interface RollupGetRollupIndexCapsRollupJobSummaryField { + agg: string + time_zone?: TimeZone + calendar_interval?: Duration +} + +export interface RollupPutJobRequest extends RequestBase { + /** Identifier for the rollup job. This can be any alphanumeric string and uniquely identifies the + * data that is associated with the rollup job. The ID is persistent; it is stored with the rolled + * up data. If you create a job, let it run for a while, then delete the job, the data that the job + * rolled up is still be associated with this job ID. You cannot create a new job with the same ID + * since that could lead to problems with mismatched job configurations. */ + id: Id + /** A cron string which defines the intervals when the rollup job should be executed. When the interval + * triggers, the indexer attempts to rollup the data in the index pattern. The cron pattern is unrelated + * to the time interval of the data being rolled up. For example, you may wish to create hourly rollups + * of your document but to only run the indexer on a daily basis at midnight, as defined by the cron. The + * cron pattern is defined just like a Watcher cron schedule. */ + cron: string + /** Defines the grouping fields and aggregations that are defined for this rollup job. These fields will then be + * available later for aggregating into buckets. These aggs and fields can be used in any combination. Think of + * the groups configuration as defining a set of tools that can later be used in aggregations to partition the + * data. Unlike raw data, we have to think ahead to which fields and aggregations might be used. Rollups provide + * enough flexibility that you simply need to determine which fields are needed, not in what order they are needed. */ + groups: RollupGroupings + /** The index or index pattern to roll up. Supports wildcard-style patterns (`logstash-*`). The job attempts to + * rollup the entire index or index-pattern. */ + index_pattern: string + /** Defines the metrics to collect for each grouping tuple. By default, only the doc_counts are collected for each + * group. To make rollup useful, you will often add metrics like averages, mins, maxes, etc. Metrics are defined + * on a per-field basis and for each field you configure which metric should be collected. */ + metrics?: RollupFieldMetric[] + /** The number of bucket results that are processed on each iteration of the rollup indexer. A larger value tends + * to execute faster, but requires more memory during processing. This value has no effect on how the data is + * rolled up; it is merely used for tweaking the speed or memory cost of the indexer. */ + page_size: integer + /** The index that contains the rollup results. The index can be shared with other rollup jobs. The data is stored so that it doesn’t interfere with unrelated jobs. */ + rollup_index: IndexName + /** Time to wait for the request to complete. */ + timeout?: Duration + headers?: HttpHeaders + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, cron?: never, groups?: never, index_pattern?: never, metrics?: never, page_size?: never, rollup_index?: never, timeout?: never, headers?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, cron?: never, groups?: never, index_pattern?: never, metrics?: never, page_size?: never, rollup_index?: never, timeout?: never, headers?: never } +} + +export type RollupPutJobResponse = AcknowledgedResponseBase + +export interface RollupRollupSearchRequest extends RequestBase { + /** A comma-separated list of data streams and indices used to limit the request. + * This parameter has the following rules: + * + * * At least one data stream, index, or wildcard expression must be specified. This target can include a rollup or non-rollup index. For data streams, the stream's backing indices can only serve as non-rollup indices. Omitting the parameter or using `_all` are not permitted. + * * Multiple non-rollup indices may be specified. + * * Only one rollup index may be specified. If more than one are supplied, an exception occurs. + * * Wildcard expressions (`*`) may be used. If they match more than one rollup index, an exception occurs. However, you can use an expression to match multiple non-rollup indices or data streams. */ + index: Indices + /** Indicates whether hits.total should be rendered as an integer or an object in the rest search response */ + rest_total_hits_as_int?: boolean + /** Specify whether aggregation and suggester names should be prefixed by their respective types in the response */ + typed_keys?: boolean + /** Specifies aggregations. */ + aggregations?: Record + /** Specifies aggregations. + * @alias aggregations */ + aggs?: Record + /** Specifies a DSL query that is subject to some limitations. */ + query?: QueryDslQueryContainer + /** Must be zero if set, as rollups work on pre-aggregated data. */ + size?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, rest_total_hits_as_int?: never, typed_keys?: never, aggregations?: never, aggs?: never, query?: never, size?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, rest_total_hits_as_int?: never, typed_keys?: never, aggregations?: never, aggs?: never, query?: never, size?: never } +} + +export interface RollupRollupSearchResponse> { + took: long + timed_out: boolean + terminated_early?: boolean + _shards: ShardStatistics + hits: SearchHitsMetadata + aggregations?: TAggregations +} + +export interface RollupStartJobRequest extends RequestBase { + /** Identifier for the rollup job. */ + id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } +} + +export interface RollupStartJobResponse { + started: boolean +} + +export interface RollupStopJobRequest extends RequestBase { + /** Identifier for the rollup job. */ + id: Id + /** If `wait_for_completion` is `true`, the API blocks for (at maximum) the specified duration while waiting for the job to stop. + * If more than `timeout` time has passed, the API throws a timeout exception. + * NOTE: Even if a timeout occurs, the stop request is still processing and eventually moves the job to STOPPED. + * The timeout simply means the API call itself timed out while waiting for the status change. */ + timeout?: Duration + /** If set to `true`, causes the API to block until the indexer state completely stops. + * If set to `false`, the API returns immediately and the indexer is stopped asynchronously in the background. */ + wait_for_completion?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, timeout?: never, wait_for_completion?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, timeout?: never, wait_for_completion?: never } +} + +export interface RollupStopJobResponse { + stopped: boolean +} + +export interface SearchApplicationAnalyticsCollection { + /** Data stream for the collection. */ + event_data_stream: SearchApplicationEventDataStream +} + +export interface SearchApplicationEventDataStream { + name: IndexName +} + +export type SearchApplicationEventType = 'page_view' | 'search' | 'search_click' + +export interface SearchApplicationSearchApplication extends SearchApplicationSearchApplicationParameters { + /** Search Application name */ + name: Name + /** Last time the Search Application was updated. */ + updated_at_millis: EpochTime +} + +export interface SearchApplicationSearchApplicationParameters { + /** Indices that are part of the Search Application. */ + indices: IndexName[] + /** Analytics collection associated to the Search Application. */ + analytics_collection_name?: Name + /** Search template to use on search operations. */ + template?: SearchApplicationSearchApplicationTemplate +} + +export interface SearchApplicationSearchApplicationTemplate { + /** The associated mustache template. */ + script: Script | ScriptSource +} + +export interface SearchApplicationDeleteRequest extends RequestBase { + /** The name of the search application to delete. */ + name: Name + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never } +} + +export type SearchApplicationDeleteResponse = AcknowledgedResponseBase + +export interface SearchApplicationDeleteBehavioralAnalyticsRequest extends RequestBase { + /** The name of the analytics collection to be deleted */ + name: Name + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never } +} + +export type SearchApplicationDeleteBehavioralAnalyticsResponse = AcknowledgedResponseBase + +export interface SearchApplicationGetRequest extends RequestBase { + /** The name of the search application */ + name: Name + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never } +} + +export type SearchApplicationGetResponse = SearchApplicationSearchApplication + +export interface SearchApplicationGetBehavioralAnalyticsRequest extends RequestBase { + /** A list of analytics collections to limit the returned information */ + name?: Name[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never } +} + +export type SearchApplicationGetBehavioralAnalyticsResponse = Record + +export interface SearchApplicationListRequest extends RequestBase { + /** Query in the Lucene query string syntax. */ + q?: string + /** Starting offset. */ + from?: integer + /** Specifies a max number of results to get. */ + size?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { q?: never, from?: never, size?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { q?: never, from?: never, size?: never } +} + +export interface SearchApplicationListResponse { + count: long + results: SearchApplicationSearchApplication[] +} + +export interface SearchApplicationPostBehavioralAnalyticsEventRequest extends RequestBase { + /** The name of the behavioral analytics collection. */ + collection_name: Name + /** The analytics event type. */ + event_type: SearchApplicationEventType + /** Whether the response type has to include more details */ + debug?: boolean + payload?: any + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { collection_name?: never, event_type?: never, debug?: never, payload?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { collection_name?: never, event_type?: never, debug?: never, payload?: never } +} + +export interface SearchApplicationPostBehavioralAnalyticsEventResponse { + accepted: boolean + event?: any +} + +export interface SearchApplicationPutRequest extends RequestBase { + /** The name of the search application to be created or updated. */ + name: Name + /** If `true`, this request cannot replace or update existing Search Applications. */ + create?: boolean + search_application?: SearchApplicationSearchApplicationParameters + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, create?: never, search_application?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, create?: never, search_application?: never } +} + +export interface SearchApplicationPutResponse { + result: Result +} + +export interface SearchApplicationPutBehavioralAnalyticsAnalyticsAcknowledgeResponseBase extends AcknowledgedResponseBase { + /** The name of the analytics collection created or updated */ + name: Name +} + +export interface SearchApplicationPutBehavioralAnalyticsRequest extends RequestBase { + /** The name of the analytics collection to be created or updated. */ + name: Name + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never } +} + +export type SearchApplicationPutBehavioralAnalyticsResponse = SearchApplicationPutBehavioralAnalyticsAnalyticsAcknowledgeResponseBase + +export interface SearchApplicationRenderQueryRequest extends RequestBase { + /** The name of the search application to render teh query for. */ + name: Name + params?: Record + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, params?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, params?: never } +} + +export interface SearchApplicationRenderQueryResponse { +} + +export interface SearchApplicationSearchRequest extends RequestBase { + /** The name of the search application to be searched. */ + name: Name + /** Determines whether aggregation names are prefixed by their respective types in the response. */ + typed_keys?: boolean + /** Query parameters specific to this request, which will override any defaults specified in the template. */ + params?: Record + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, typed_keys?: never, params?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, typed_keys?: never, params?: never } +} + +export type SearchApplicationSearchResponse> = SearchResponseBody + +export type SearchableSnapshotsStatsLevel = 'cluster' | 'indices' | 'shards' + +export interface SearchableSnapshotsCacheStatsNode { + shared_cache: SearchableSnapshotsCacheStatsShared +} + +export interface SearchableSnapshotsCacheStatsRequest extends RequestBase { + /** The names of the nodes in the cluster to target. */ + node_id?: NodeIds + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { node_id?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { node_id?: never, master_timeout?: never } +} + +export interface SearchableSnapshotsCacheStatsResponse { + nodes: Record +} + +export interface SearchableSnapshotsCacheStatsShared { + reads: long + bytes_read_in_bytes: ByteSize + writes: long + bytes_written_in_bytes: ByteSize + evictions: long + num_regions: integer + size_in_bytes: ByteSize + region_size_in_bytes: ByteSize +} + +export interface SearchableSnapshotsClearCacheRequest extends RequestBase { + /** A comma-separated list of data streams, indices, and aliases to clear from the cache. + * It supports wildcards (`*`). */ + index?: Indices + /** Whether to expand wildcard expression to concrete indices that are open, closed or both. */ + expand_wildcards?: ExpandWildcards + /** Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) */ + allow_no_indices?: boolean + /** Whether specified concrete indices should be ignored when unavailable (missing or closed) */ + ignore_unavailable?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, expand_wildcards?: never, allow_no_indices?: never, ignore_unavailable?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, expand_wildcards?: never, allow_no_indices?: never, ignore_unavailable?: never } +} + +export type SearchableSnapshotsClearCacheResponse = any + +export interface SearchableSnapshotsMountMountedSnapshot { + snapshot: Name + indices: Indices + shards: ShardStatistics +} + +export interface SearchableSnapshotsMountRequest extends RequestBase { + /** The name of the repository containing the snapshot of the index to mount. */ + repository: Name + /** The name of the snapshot of the index to mount. */ + snapshot: Name + /** The period to wait for the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ + master_timeout?: Duration + /** If true, the request blocks until the operation is complete. */ + wait_for_completion?: boolean + /** The mount option for the searchable snapshot index. */ + storage?: string + /** The name of the index contained in the snapshot whose data is to be mounted. + * If no `renamed_index` is specified, this name will also be used to create the new index. */ + index: IndexName + /** The name of the index that will be created. */ + renamed_index?: IndexName + /** The settings that should be added to the index when it is mounted. */ + index_settings?: Record + /** The names of settings that should be removed from the index when it is mounted. */ + ignore_index_settings?: string[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { repository?: never, snapshot?: never, master_timeout?: never, wait_for_completion?: never, storage?: never, index?: never, renamed_index?: never, index_settings?: never, ignore_index_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { repository?: never, snapshot?: never, master_timeout?: never, wait_for_completion?: never, storage?: never, index?: never, renamed_index?: never, index_settings?: never, ignore_index_settings?: never } +} + +export interface SearchableSnapshotsMountResponse { + snapshot: SearchableSnapshotsMountMountedSnapshot +} + +export interface SearchableSnapshotsStatsRequest extends RequestBase { + /** A comma-separated list of data streams and indices to retrieve statistics for. */ + index?: Indices + /** Return stats aggregated at cluster, index or shard level */ + level?: SearchableSnapshotsStatsLevel + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, level?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, level?: never } +} + +export interface SearchableSnapshotsStatsResponse { + stats: any + total: any +} + +export interface SecurityAccess { + /** A list of indices permission entries for cross-cluster replication. */ + replication?: SecurityReplicationAccess[] + /** A list of indices permission entries for cross-cluster search. */ + search?: SecuritySearchAccess[] +} + +export interface SecurityApiKey { + /** Id for the API key */ + id: Id + /** Name of the API key. */ + name: Name + /** The type of the API key (e.g. `rest` or `cross_cluster`). */ + type: SecurityApiKeyType + /** Creation time for the API key in milliseconds. */ + creation: EpochTime + /** Expiration time for the API key in milliseconds. */ + expiration?: EpochTime + /** Invalidation status for the API key. + * If the key has been invalidated, it has a value of `true`. Otherwise, it is `false`. */ + invalidated: boolean + /** If the key has been invalidated, invalidation time in milliseconds. */ + invalidation?: EpochTime + /** Principal for which this API key was created */ + username: Username + /** Realm name of the principal for which this API key was created. */ + realm: string + /** Realm type of the principal for which this API key was created */ + realm_type?: string + /** Metadata of the API key */ + metadata: Metadata + /** The role descriptors assigned to this API key when it was created or last updated. + * An empty role descriptor means the API key inherits the owner user’s permissions. */ + role_descriptors?: Record + /** The owner user’s permissions associated with the API key. + * It is a point-in-time snapshot captured at creation and subsequent updates. + * An API key’s effective permissions are an intersection of its assigned privileges and the owner user’s permissions. */ + limited_by?: Record[] + /** The access granted to cross-cluster API keys. + * The access is composed of permissions for cross cluster search and cross cluster replication. + * At least one of them must be specified. + * When specified, the new access assignment fully replaces the previously assigned access. */ + access?: SecurityAccess + /** The certificate identity associated with a cross-cluster API key. + * Restricts the API key to connections authenticated by a specific TLS certificate. + * Only applicable to cross-cluster API keys. + * @remarks This property is not supported on Elastic Cloud Serverless. */ + certificate_identity?: string + /** The profile uid for the API key owner principal, if requested and if it exists */ + profile_uid?: string + /** Sorting values when using the `sort` parameter with the `security.query_api_keys` API. */ + _sort?: SortResults +} + +export type SecurityApiKeyManagedBy = 'cloud' | 'elasticsearch' + +export type SecurityApiKeyType = 'rest' | 'cross_cluster' + +export interface SecurityApplicationGlobalUserPrivileges { + manage: SecurityManageUserPrivileges +} + +export interface SecurityApplicationPrivileges { + /** The name of the application to which this entry applies. */ + application: string + /** A list of strings, where each element is the name of an application privilege or action. */ + privileges: string[] + /** A list resources to which the privileges are applied. */ + resources: string[] +} + +export interface SecurityBulkError { + /** The number of errors */ + count: integer + /** Details about the errors, keyed by role name */ + details: Record +} + +export interface SecurityClusterNode { + name: Name +} + +export type SecurityClusterPrivilege = 'all' | 'cancel_task' | 'create_snapshot' | 'cross_cluster_replication' | 'cross_cluster_search' | 'delegate_pki' | 'grant_api_key' | 'manage' | 'manage_api_key' | 'manage_autoscaling' | 'manage_behavioral_analytics' | 'manage_ccr' | 'manage_data_frame_transforms' | 'manage_data_stream_global_retention' | 'manage_enrich' | 'manage_esql' | 'manage_ilm' | 'manage_index_templates' | 'manage_inference' | 'manage_ingest_pipelines' | 'manage_logstash_pipelines' | 'manage_ml' | 'manage_oidc' | 'manage_own_api_key' | 'manage_pipeline' | 'manage_rollup' | 'manage_saml' | 'manage_search_application' | 'manage_search_query_rules' | 'manage_search_synonyms' | 'manage_security' | 'manage_service_account' | 'manage_slm' | 'manage_token' | 'manage_transform' | 'manage_user_profile' | 'manage_watcher' | 'monitor' | 'monitor_data_frame_transforms' | 'monitor_data_stream_global_retention' | 'monitor_enrich' | 'monitor_esql' | 'monitor_inference' | 'monitor_ml' | 'monitor_rollup' | 'monitor_snapshot' | 'monitor_stats' | 'monitor_text_structure' | 'monitor_transform' | 'monitor_watcher' | 'none' | 'post_behavioral_analytics_event' | 'read_ccr' | 'read_fleet_secrets' | 'read_ilm' | 'read_pipeline' | 'read_security' | 'read_slm' | 'transport_client' | 'write_connector_secrets' | 'write_fleet_secrets' | string + +export interface SecurityCreatedStatus { + created: boolean +} + +export interface SecurityFieldSecurity { + except?: Fields + grant?: Fields +} + +export interface SecurityGlobalPrivilege { + application: SecurityApplicationGlobalUserPrivileges +} + +export type SecurityGrantType = 'password' | 'access_token' + +export type SecurityIndexPrivilege = 'all' | 'auto_configure' | 'create' | 'create_doc' | 'create_index' | 'cross_cluster_replication' | 'cross_cluster_replication_internal' | 'delete' | 'delete_index' | 'index' | 'maintenance' | 'manage' | 'manage_data_stream_lifecycle' | 'manage_follow_index' | 'manage_ilm' | 'manage_leader_index' | 'monitor' | 'none' | 'read' | 'read_cross_cluster' | 'view_index_metadata' | 'write' | string + +export interface SecurityIndicesPrivileges { + /** The document fields that the owners of the role have read access to. */ + field_security?: SecurityFieldSecurity + /** A list of indices (or index name patterns) to which the permissions in this entry apply. */ + names: IndexName | IndexName[] + /** The index level privileges that owners of the role have on the specified indices. */ + privileges: SecurityIndexPrivilege[] + /** A search query that defines the documents the owners of the role have access to. A document within the specified indices must match this query for it to be accessible by the owners of the role. */ + query?: SecurityIndicesPrivilegesQuery + /** Set to `true` if using wildcard or regular expressions for patterns that cover restricted indices. Implicitly, restricted indices have limited privileges that can cause pattern tests to fail. If restricted indices are explicitly included in the `names` list, Elasticsearch checks privileges against these indices regardless of the value set for `allow_restricted_indices`. + * @remarks This property is not supported on Elastic Cloud Serverless. */ + allow_restricted_indices?: boolean +} + +export type SecurityIndicesPrivilegesQuery = string | QueryDslQueryContainer | SecurityRoleTemplateQuery + +export interface SecurityManageUserPrivileges { + applications: string[] +} + +export interface SecurityNodeSecurityStats { + /** Role statistics. */ + roles: SecurityRolesStats +} + +export interface SecurityRealmInfo { + name: Name + type: string +} + +export type SecurityRemoteClusterPrivilege = 'monitor_enrich' | 'monitor_stats' + +export interface SecurityRemoteClusterPrivileges { + /** A list of cluster aliases to which the permissions in this entry apply. */ + clusters: Names + /** The cluster level privileges that owners of the role have on the remote cluster. */ + privileges: SecurityRemoteClusterPrivilege[] +} + +export interface SecurityRemoteIndicesPrivileges { + /** A list of cluster aliases to which the permissions in this entry apply. */ + clusters: Names + /** The document fields that the owners of the role have read access to. */ + field_security?: SecurityFieldSecurity + /** A list of indices (or index name patterns) to which the permissions in this entry apply. */ + names: IndexName | IndexName[] + /** The index level privileges that owners of the role have on the specified indices. */ + privileges: SecurityIndexPrivilege[] + /** A search query that defines the documents the owners of the role have access to. A document within the specified indices must match this query for it to be accessible by the owners of the role. */ + query?: SecurityIndicesPrivilegesQuery + /** Set to `true` if using wildcard or regular expressions for patterns that cover restricted indices. Implicitly, restricted indices have limited privileges that can cause pattern tests to fail. If restricted indices are explicitly included in the `names` list, Elasticsearch checks privileges against these indices regardless of the value set for `allow_restricted_indices`. + * @remarks This property is not supported on Elastic Cloud Serverless. */ + allow_restricted_indices?: boolean +} + +export interface SecurityRemoteUserIndicesPrivileges { + /** The document fields that the owners of the role have read access to. */ + field_security?: SecurityFieldSecurity[] + /** A list of indices (or index name patterns) to which the permissions in this entry apply. */ + names: IndexName | IndexName[] + /** The index level privileges that owners of the role have on the specified indices. */ + privileges: SecurityIndexPrivilege[] + /** Search queries that define the documents the user has access to. A document within the specified indices must match these queries for it to be accessible by the owners of the role. */ + query?: SecurityIndicesPrivilegesQuery[] + /** Set to `true` if using wildcard or regular expressions for patterns that cover restricted indices. Implicitly, restricted indices have limited privileges that can cause pattern tests to fail. If restricted indices are explicitly included in the `names` list, Elasticsearch checks privileges against these indices regardless of the value set for `allow_restricted_indices`. */ + allow_restricted_indices: boolean + clusters: string[] +} + +export interface SecurityReplicationAccess { + /** A list of indices (or index name patterns) to which the permissions in this entry apply. */ + names: IndexName | IndexName[] + /** This needs to be set to true if the patterns in the names field should cover system indices. */ + allow_restricted_indices?: boolean +} + +export interface SecurityRestriction { + /** A list of workflows to which the API key is restricted. + * NOTE: In order to use a role restriction, an API key must be created with a single role descriptor. */ + workflows: SecurityRestrictionWorkflow[] +} + +export type SecurityRestrictionWorkflow = 'search_application_query' | string + +export interface SecurityRoleDescriptor { + /** A list of cluster privileges. These privileges define the cluster level actions that API keys are able to execute. */ + cluster?: SecurityClusterPrivilege[] + /** A list of indices permissions entries. */ + indices?: SecurityIndicesPrivileges[] + /** A list of indices permissions entries. + * @alias indices */ + index?: SecurityIndicesPrivileges[] + /** A list of indices permissions for remote clusters. + * @remarks This property is not supported on Elastic Cloud Serverless. */ + remote_indices?: SecurityRemoteIndicesPrivileges[] + /** A list of cluster permissions for remote clusters. + * NOTE: This is limited a subset of the cluster permissions. + * @remarks This property is not supported on Elastic Cloud Serverless. */ + remote_cluster?: SecurityRemoteClusterPrivileges[] + /** An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges. + * @remarks This property is not supported on Elastic Cloud Serverless. */ + global?: SecurityGlobalPrivilege[] | SecurityGlobalPrivilege + /** A list of application privilege entries */ + applications?: SecurityApplicationPrivileges[] + /** Optional meta-data. Within the metadata object, keys that begin with `_` are reserved for system usage. */ + metadata?: Metadata + /** A list of users that the API keys can impersonate. + * NOTE: In Elastic Cloud Serverless, the run-as feature is disabled. + * For API compatibility, you can still specify an empty `run_as` field, but a non-empty list will be rejected. */ + run_as?: string[] + /** Optional description of the role descriptor */ + description?: string + /** Restriction for when the role descriptor is allowed to be effective. */ + restriction?: SecurityRestriction + transient_metadata?: Record +} + +export interface SecurityRoleDescriptorRead { + /** A list of cluster privileges. These privileges define the cluster level actions that API keys are able to execute. */ + cluster: SecurityClusterPrivilege[] + /** A list of indices permissions entries. */ + indices: SecurityIndicesPrivileges[] + /** A list of indices permissions entries. + * @alias indices */ + index: SecurityIndicesPrivileges[] + /** A list of indices permissions for remote clusters. + * @remarks This property is not supported on Elastic Cloud Serverless. */ + remote_indices?: SecurityRemoteIndicesPrivileges[] + /** A list of cluster permissions for remote clusters. + * NOTE: This is limited a subset of the cluster permissions. + * @remarks This property is not supported on Elastic Cloud Serverless. */ + remote_cluster?: SecurityRemoteClusterPrivileges[] + /** An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges. + * @remarks This property is not supported on Elastic Cloud Serverless. */ + global?: SecurityGlobalPrivilege[] | SecurityGlobalPrivilege + /** A list of application privilege entries */ + applications?: SecurityApplicationPrivileges[] + /** Optional meta-data. Within the metadata object, keys that begin with `_` are reserved for system usage. */ + metadata?: Metadata + /** A list of users that the API keys can impersonate. */ + run_as?: string[] + /** An optional description of the role descriptor. */ + description?: string + /** A restriction for when the role descriptor is allowed to be effective. */ + restriction?: SecurityRestriction + transient_metadata?: Record +} + +export interface SecurityRoleMapping { + enabled: boolean + metadata: Metadata + roles?: string[] + role_templates?: SecurityRoleTemplate[] + rules: SecurityRoleMappingRule +} + +export interface SecurityRoleMappingRule { + any?: SecurityRoleMappingRule[] + all?: SecurityRoleMappingRule[] + field?: Partial> + except?: SecurityRoleMappingRule +} + +export interface SecurityRoleTemplate { + format?: SecurityTemplateFormat + template: Script | ScriptSource +} + +export type SecurityRoleTemplateInlineQuery = string | QueryDslQueryContainer + +export interface SecurityRoleTemplateQuery { + /** When you create a role, you can specify a query that defines the document level security permissions. You can optionally + * use Mustache templates in the role query to insert the username of the current authenticated user into the role. + * Like other places in Elasticsearch that support templating or scripting, you can specify inline, stored, or file-based + * templates and define custom parameters. You access the details for the current authenticated user through the _user parameter. */ + template?: SecurityRoleTemplateScript | SecurityRoleTemplateInlineQuery +} + +export interface SecurityRoleTemplateScript { + source?: SecurityRoleTemplateInlineQuery + /** The `id` for a stored script. */ + id?: Id + /** Specifies any named parameters that are passed into the script as variables. + * Use parameters instead of hard-coded values to decrease compile time. */ + params?: Record + /** Specifies the language the script is written in. */ + lang?: ScriptLanguage + options?: Record +} + +export interface SecurityRolesStats { + /** Document-level security (DLS) statistics. */ + dls: XpackUsageSecurityRolesDls +} + +export interface SecuritySearchAccess { + /** The document fields that the owners of the role have read access to. */ + field_security?: SecurityFieldSecurity + /** A list of indices (or index name patterns) to which the permissions in this entry apply. */ + names: IndexName | IndexName[] + /** A search query that defines the documents the owners of the role have access to. A document within the specified indices must match this query for it to be accessible by the owners of the role. */ + query?: SecurityIndicesPrivilegesQuery + /** Set to `true` if using wildcard or regular expressions for patterns that cover restricted indices. Implicitly, restricted indices have limited privileges that can cause pattern tests to fail. If restricted indices are explicitly included in the `names` list, Elasticsearch checks privileges against these indices regardless of the value set for `allow_restricted_indices`. + * @remarks This property is not supported on Elastic Cloud Serverless. */ + allow_restricted_indices?: boolean +} + +export interface SecuritySecuritySettings { + index?: IndicesIndexSettings +} + +export type SecurityTemplateFormat = 'string' | 'json' + +export interface SecurityUser { + email?: string | null + full_name?: Name | null + metadata: Metadata + roles: string[] + username: Username + enabled: boolean + profile_uid?: SecurityUserProfileId +} + +export interface SecurityUserIndicesPrivileges { + /** The document fields that the owners of the role have read access to. */ + field_security?: SecurityFieldSecurity[] + /** A list of indices (or index name patterns) to which the permissions in this entry apply. */ + names: IndexName | IndexName[] + /** The index level privileges that owners of the role have on the specified indices. */ + privileges: SecurityIndexPrivilege[] + /** Search queries that define the documents the user has access to. A document within the specified indices must match these queries for it to be accessible by the owners of the role. */ + query?: SecurityIndicesPrivilegesQuery[] + /** Set to `true` if using wildcard or regular expressions for patterns that cover restricted indices. Implicitly, restricted indices have limited privileges that can cause pattern tests to fail. If restricted indices are explicitly included in the `names` list, Elasticsearch checks privileges against these indices regardless of the value set for `allow_restricted_indices`. */ + allow_restricted_indices: boolean +} + +export interface SecurityUserProfile { + uid: SecurityUserProfileId + user: SecurityUserProfileUser + data: Record + labels: Record + enabled?: boolean +} + +export interface SecurityUserProfileHitMetadata { + _primary_term: long + _seq_no: SequenceNumber +} + +export type SecurityUserProfileId = string + +export interface SecurityUserProfileUser { + email?: string | null + full_name?: Name | null + realm_name: Name + realm_domain?: Name + roles: string[] + username: Username +} + +export interface SecurityUserProfileWithMetadata extends SecurityUserProfile { + last_synchronized: long + _doc: SecurityUserProfileHitMetadata +} + +export interface SecurityActivateUserProfileRequest extends RequestBase { + /** The user's Elasticsearch access token or JWT. + * Both `access` and `id` JWT token types are supported and they depend on the underlying JWT realm configuration. + * If you specify the `access_token` grant type, this parameter is required. + * It is not valid with other grant types. */ + access_token?: string + /** The type of grant. */ + grant_type: SecurityGrantType + /** The user's password. + * If you specify the `password` grant type, this parameter is required. + * It is not valid with other grant types. */ + password?: string + /** The username that identifies the user. + * If you specify the `password` grant type, this parameter is required. + * It is not valid with other grant types. */ + username?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { access_token?: never, grant_type?: never, password?: never, username?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { access_token?: never, grant_type?: never, password?: never, username?: never } +} + +export type SecurityActivateUserProfileResponse = SecurityUserProfileWithMetadata + +export interface SecurityAuthenticateAuthenticateApiKey { + id: Id + name?: Name + managed_by: SecurityApiKeyManagedBy + internal?: boolean +} + +export interface SecurityAuthenticateRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } +} + +export interface SecurityAuthenticateResponse { + api_key?: SecurityAuthenticateAuthenticateApiKey + authentication_realm: SecurityRealmInfo + email?: string | null + full_name?: Name | null + lookup_realm: SecurityRealmInfo + metadata: Metadata + roles: string[] + username: Username + enabled: boolean + authentication_type: string + token?: SecurityAuthenticateToken +} + +export interface SecurityAuthenticateToken { + name: Name + type?: string +} + +export interface SecurityBulkDeleteRoleRequest extends RequestBase { + /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ + refresh?: Refresh + /** An array of role names to delete */ + names: string[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { refresh?: never, names?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { refresh?: never, names?: never } +} + +export interface SecurityBulkDeleteRoleResponse { + /** Array of deleted roles */ + deleted?: string[] + /** Array of roles that could not be found */ + not_found?: string[] + /** Present if any deletes resulted in errors */ + errors?: SecurityBulkError +} + +export interface SecurityBulkPutRoleRequest extends RequestBase { + /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ + refresh?: Refresh + /** A dictionary of role name to RoleDescriptor objects to add or update */ + roles: Record + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { refresh?: never, roles?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { refresh?: never, roles?: never } +} + +export interface SecurityBulkPutRoleResponse { + /** Array of created roles */ + created?: string[] + /** Array of updated roles */ + updated?: string[] + /** Array of role names without any changes */ + noop?: string[] + /** Present if any updates resulted in errors */ + errors?: SecurityBulkError +} + +export interface SecurityBulkUpdateApiKeysRequest extends RequestBase { + /** Expiration time for the API keys. + * By default, API keys never expire. + * This property can be omitted to leave the value unchanged. */ + expiration?: Duration + /** The API key identifiers. */ + ids: string | string[] + /** Arbitrary nested metadata to associate with the API keys. + * Within the `metadata` object, top-level keys beginning with an underscore (`_`) are reserved for system usage. + * Any information specified with this parameter fully replaces metadata previously associated with the API key. */ + metadata?: Metadata + /** The role descriptors to assign to the API keys. + * An API key's effective permissions are an intersection of its assigned privileges and the point-in-time snapshot of permissions of the owner user. + * You can assign new privileges by specifying them in this parameter. + * To remove assigned privileges, supply the `role_descriptors` parameter as an empty object `{}`. + * If an API key has no assigned privileges, it inherits the owner user's full permissions. + * The snapshot of the owner's permissions is always updated, whether you supply the `role_descriptors` parameter. + * The structure of a role descriptor is the same as the request for the create API keys API. */ + role_descriptors?: Record + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { expiration?: never, ids?: never, metadata?: never, role_descriptors?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { expiration?: never, ids?: never, metadata?: never, role_descriptors?: never } +} + +export interface SecurityBulkUpdateApiKeysResponse { + errors?: SecurityBulkError + noops: string[] + updated: string[] +} + +export interface SecurityChangePasswordRequest extends RequestBase { + /** The user whose password you want to change. If you do not specify this + * parameter, the password is changed for the current user. */ + username?: Username + /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ + refresh?: Refresh + /** The new password value. Passwords must be at least 6 characters long. */ + password?: Password + /** A hash of the new password value. This must be produced using the same + * hashing algorithm as has been configured for password storage. For more details, + * see the explanation of the `xpack.security.authc.password_hashing.algorithm` + * setting. */ + password_hash?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { username?: never, refresh?: never, password?: never, password_hash?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { username?: never, refresh?: never, password?: never, password_hash?: never } +} + +export interface SecurityChangePasswordResponse { +} + +export interface SecurityClearApiKeyCacheRequest extends RequestBase { + /** Comma-separated list of API key IDs to evict from the API key cache. + * To evict all API keys, use `*`. + * Does not support other wildcard patterns. */ + ids: Ids + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { ids?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { ids?: never } +} + +export interface SecurityClearApiKeyCacheResponse { + _nodes: NodeStatistics + cluster_name: Name + nodes: Record +} + +export interface SecurityClearCachedPrivilegesRequest extends RequestBase { + /** A comma-separated list of applications. + * To clear all applications, use an asterism (`*`). + * It does not support other wildcard patterns. */ + application: Names + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { application?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { application?: never } +} + +export interface SecurityClearCachedPrivilegesResponse { + _nodes: NodeStatistics + cluster_name: Name + nodes: Record +} + +export interface SecurityClearCachedRealmsRequest extends RequestBase { + /** A comma-separated list of realms. + * To clear all realms, use an asterisk (`*`). + * It does not support other wildcard patterns. */ + realms: Names + /** A comma-separated list of the users to clear from the cache. + * If you do not specify this parameter, the API evicts all users from the user cache. */ + usernames?: string[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { realms?: never, usernames?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { realms?: never, usernames?: never } +} + +export interface SecurityClearCachedRealmsResponse { + _nodes: NodeStatistics + cluster_name: Name + nodes: Record +} + +export interface SecurityClearCachedRolesRequest extends RequestBase { + /** A comma-separated list of roles to evict from the role cache. + * To evict all roles, use an asterisk (`*`). + * It does not support other wildcard patterns. */ + name: Names + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never } +} + +export interface SecurityClearCachedRolesResponse { + _nodes: NodeStatistics + cluster_name: Name + nodes: Record +} + +export interface SecurityClearCachedServiceTokensRequest extends RequestBase { + /** The namespace, which is a top-level grouping of service accounts. */ + namespace: Namespace + /** The name of the service, which must be unique within its namespace. */ + service: Service + /** A comma-separated list of token names to evict from the service account token caches. + * Use a wildcard (`*`) to evict all tokens that belong to a service account. + * It does not support other wildcard patterns. */ + name: Names + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { namespace?: never, service?: never, name?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { namespace?: never, service?: never, name?: never } +} + +export interface SecurityClearCachedServiceTokensResponse { + _nodes: NodeStatistics + cluster_name: Name + nodes: Record +} + +export interface SecurityCreateApiKeyRequest extends RequestBase { + /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ + refresh?: Refresh + /** The expiration time for the API key. + * By default, API keys never expire. */ + expiration?: Duration + /** A name for the API key. */ + name?: Name + /** An array of role descriptors for this API key. + * When it is not specified or it is an empty array, the API key will have a point in time snapshot of permissions of the authenticated user. + * If you supply role descriptors, the resultant permissions are an intersection of API keys permissions and the authenticated user's permissions thereby limiting the access scope for API keys. + * The structure of role descriptor is the same as the request for the create role API. + * For more details, refer to the create or update roles API. + * + * NOTE: Due to the way in which this permission intersection is calculated, it is not possible to create an API key that is a child of another API key, unless the derived key is created without any privileges. + * In this case, you must explicitly specify a role descriptor with no privileges. + * The derived API key can be used for authentication; it will not have authority to call Elasticsearch APIs. */ + role_descriptors?: Record + /** Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. */ + metadata?: Metadata + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { refresh?: never, expiration?: never, name?: never, role_descriptors?: never, metadata?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { refresh?: never, expiration?: never, name?: never, role_descriptors?: never, metadata?: never } +} + +export interface SecurityCreateApiKeyResponse { + /** Generated API key. */ + api_key: string + /** Expiration in milliseconds for the API key. */ + expiration?: long + /** Unique ID for this API key. */ + id: Id + /** Specifies the name for this API key. */ + name: Name + /** API key credentials which is the base64-encoding of + * the UTF-8 representation of `id` and `api_key` joined + * by a colon (`:`). */ + encoded: string +} + +export interface SecurityCreateCrossClusterApiKeyRequest extends RequestBase { + /** The access to be granted to this API key. + * The access is composed of permissions for cross-cluster search and cross-cluster replication. + * At least one of them must be specified. + * + * NOTE: No explicit privileges should be specified for either search or replication access. + * The creation process automatically converts the access specification to a role descriptor which has relevant privileges assigned accordingly. */ + access: SecurityAccess + /** Expiration time for the API key. + * By default, API keys never expire. */ + expiration?: Duration + /** Arbitrary metadata that you want to associate with the API key. + * It supports nested data structure. + * Within the metadata object, keys beginning with `_` are reserved for system usage. */ + metadata?: Metadata + /** Specifies the name for this API key. */ + name: Name + /** The certificate identity to associate with this API key. + * This field is used to restrict the API key to connections authenticated by a specific TLS certificate. + * The value should match the certificate's distinguished name (DN) pattern. */ + certificate_identity?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { access?: never, expiration?: never, metadata?: never, name?: never, certificate_identity?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { access?: never, expiration?: never, metadata?: never, name?: never, certificate_identity?: never } +} + +export interface SecurityCreateCrossClusterApiKeyResponse { + /** Generated API key. */ + api_key: string + /** Expiration in milliseconds for the API key. */ + expiration?: DurationValue + /** Unique ID for this API key. */ + id: Id + /** Specifies the name for this API key. */ + name: Name + /** API key credentials which is the base64-encoding of + * the UTF-8 representation of `id` and `api_key` joined + * by a colon (`:`). */ + encoded: string +} + +export interface SecurityCreateServiceTokenRequest extends RequestBase { + /** The name of the namespace, which is a top-level grouping of service accounts. */ + namespace: Namespace + /** The name of the service. */ + service: Service + /** The name for the service account token. + * If omitted, a random name will be generated. + * + * Token names must be at least one and no more than 256 characters. + * They can contain alphanumeric characters (a-z, A-Z, 0-9), dashes (`-`), and underscores (`_`), but cannot begin with an underscore. + * + * NOTE: Token names must be unique in the context of the associated service account. + * They must also be globally unique with their fully qualified names, which are comprised of the service account principal and token name, such as `//`. */ + name?: Name + /** If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ + refresh?: Refresh + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { namespace?: never, service?: never, name?: never, refresh?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { namespace?: never, service?: never, name?: never, refresh?: never } +} + +export interface SecurityCreateServiceTokenResponse { + created: boolean + token: SecurityCreateServiceTokenToken +} + +export interface SecurityCreateServiceTokenToken { + name: Name + value: string +} + +export interface SecurityDelegatePkiAuthentication { + username: string + roles: string[] + full_name: string | null + email: string | null + token?: Record + metadata: Metadata + enabled: boolean + authentication_realm: SecurityDelegatePkiAuthenticationRealm + lookup_realm: SecurityDelegatePkiAuthenticationRealm + authentication_type: string + api_key?: Record +} + +export interface SecurityDelegatePkiAuthenticationRealm { + name: string + type: string + domain?: string +} + +export interface SecurityDelegatePkiRequest extends RequestBase { + /** The X509Certificate chain, which is represented as an ordered string array. + * Each string in the array is a base64-encoded (Section 4 of RFC4648 - not base64url-encoded) of the certificate's DER encoding. + * + * The first element is the target certificate that contains the subject distinguished name that is requesting access. + * This may be followed by additional certificates; each subsequent certificate is used to certify the previous one. */ + x509_certificate_chain: string[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { x509_certificate_chain?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { x509_certificate_chain?: never } +} + +export interface SecurityDelegatePkiResponse { + /** An access token associated with the subject distinguished name of the client's certificate. */ + access_token: string + /** The amount of time (in seconds) before the token expires. */ + expires_in: long + /** The type of token. */ + type: string + authentication?: SecurityDelegatePkiAuthentication +} + +export interface SecurityDeletePrivilegesFoundStatus { + found: boolean +} + +export interface SecurityDeletePrivilegesRequest extends RequestBase { + /** The name of the application. + * Application privileges are always associated with exactly one application. */ + application: Name + /** The name of the privilege. */ + name: Names + /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ + refresh?: Refresh + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { application?: never, name?: never, refresh?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { application?: never, name?: never, refresh?: never } +} + +export type SecurityDeletePrivilegesResponse = Record> + +export interface SecurityDeleteRoleRequest extends RequestBase { + /** The name of the role. */ + name: Name + /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ + refresh?: Refresh + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, refresh?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, refresh?: never } +} + +export interface SecurityDeleteRoleResponse { + /** If the role is successfully deleted, `found` is `true`. + * Otherwise, `found` is `false`. */ + found: boolean +} + +export interface SecurityDeleteRoleMappingRequest extends RequestBase { + /** The distinct name that identifies the role mapping. + * The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. */ + name: Name + /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ + refresh?: Refresh + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, refresh?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, refresh?: never } +} + +export interface SecurityDeleteRoleMappingResponse { + /** If the mapping is successfully deleted, `found` is `true`. + * Otherwise, `found` is `false`. */ + found: boolean +} + +export interface SecurityDeleteServiceTokenRequest extends RequestBase { + /** The namespace, which is a top-level grouping of service accounts. */ + namespace: Namespace + /** The service name. */ + service: Service + /** The name of the service account token. */ + name: Name + /** If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ + refresh?: Refresh + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { namespace?: never, service?: never, name?: never, refresh?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { namespace?: never, service?: never, name?: never, refresh?: never } +} + +export interface SecurityDeleteServiceTokenResponse { + /** If the service account token is successfully deleted, the request returns `{"found": true}`. + * Otherwise, the response will have status code 404 and `found` is set to `false`. */ + found: boolean +} + +export interface SecurityDeleteUserRequest extends RequestBase { + /** An identifier for the user. */ + username: Username + /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ + refresh?: Refresh + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { username?: never, refresh?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { username?: never, refresh?: never } +} + +export interface SecurityDeleteUserResponse { + /** If the user is successfully deleted, the request returns `{"found": true}`. + * Otherwise, `found` is set to `false`. */ + found: boolean +} + +export interface SecurityDisableUserRequest extends RequestBase { + /** An identifier for the user. */ + username: Username + /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ + refresh?: Refresh + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { username?: never, refresh?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { username?: never, refresh?: never } +} + +export interface SecurityDisableUserResponse { +} + +export interface SecurityDisableUserProfileRequest extends RequestBase { + /** Unique identifier for the user profile. */ + uid: SecurityUserProfileId + /** If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. + * If 'wait_for', it waits for a refresh to make this operation visible to search. + * If 'false', it does nothing with refreshes. */ + refresh?: Refresh + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { uid?: never, refresh?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { uid?: never, refresh?: never } +} + +export type SecurityDisableUserProfileResponse = AcknowledgedResponseBase + +export interface SecurityEnableUserRequest extends RequestBase { + /** An identifier for the user. */ + username: Username + /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ + refresh?: Refresh + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { username?: never, refresh?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { username?: never, refresh?: never } +} + +export interface SecurityEnableUserResponse { +} + +export interface SecurityEnableUserProfileRequest extends RequestBase { + /** A unique identifier for the user profile. */ + uid: SecurityUserProfileId + /** If 'true', Elasticsearch refreshes the affected shards to make this operation + * visible to search. + * If 'wait_for', it waits for a refresh to make this operation visible to search. + * If 'false', nothing is done with refreshes. */ + refresh?: Refresh + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { uid?: never, refresh?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { uid?: never, refresh?: never } +} + +export type SecurityEnableUserProfileResponse = AcknowledgedResponseBase + +export interface SecurityEnrollKibanaRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } +} + +export interface SecurityEnrollKibanaResponse { + token: SecurityEnrollKibanaToken + /** The CA certificate used to sign the node certificates that Elasticsearch uses for TLS on the HTTP layer. + * The certificate is returned as a Base64 encoded string of the ASN.1 DER encoding of the certificate. */ + http_ca: string +} + +export interface SecurityEnrollKibanaToken { + /** The name of the bearer token for the `elastic/kibana` service account. */ + name: string + /** The value of the bearer token for the `elastic/kibana` service account. + * Use this value to authenticate the service account with Elasticsearch. */ + value: string +} + +export interface SecurityEnrollNodeRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } +} + +export interface SecurityEnrollNodeResponse { + /** The CA private key that can be used by the new node in order to sign its certificate for the HTTP layer, as a Base64 encoded string of the ASN.1 DER encoding of the key. */ + http_ca_key: string + /** The CA certificate that can be used by the new node in order to sign its certificate for the HTTP layer, as a Base64 encoded string of the ASN.1 DER encoding of the certificate. */ + http_ca_cert: string + /** The CA certificate that is used to sign the TLS certificate for the transport layer, as a Base64 encoded string of the ASN.1 DER encoding of the certificate. */ + transport_ca_cert: string + /** The private key that the node can use for TLS for its transport layer, as a Base64 encoded string of the ASN.1 DER encoding of the key. */ + transport_key: string + /** The certificate that the node can use for TLS for its transport layer, as a Base64 encoded string of the ASN.1 DER encoding of the certificate. */ + transport_cert: string + /** A list of transport addresses in the form of `host:port` for the nodes that are already members of the cluster. */ + nodes_addresses: string[] +} + +export interface SecurityGetApiKeyRequest extends RequestBase { + /** An API key id. + * This parameter cannot be used with any of `name`, `realm_name` or `username`. */ + id?: Id + /** An API key name. + * This parameter cannot be used with any of `id`, `realm_name` or `username`. + * It supports prefix search with wildcard. */ + name?: Name + /** A boolean flag that can be used to query API keys owned by the currently authenticated user. + * The `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones. */ + owner?: boolean + /** The name of an authentication realm. + * This parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`. */ + realm_name?: Name + /** The username of a user. + * This parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`. */ + username?: Username + /** Return the snapshot of the owner user's role descriptors + * associated with the API key. An API key's actual + * permission is the intersection of its assigned role + * descriptors and the owner user's role descriptors. */ + with_limited_by?: boolean + /** A boolean flag that can be used to query API keys that are currently active. An API key is considered active if it is neither invalidated, nor expired at query time. You can specify this together with other parameters such as `owner` or `name`. If `active_only` is false, the response will include both active and inactive (expired or invalidated) keys. */ + active_only?: boolean + /** Determines whether to also retrieve the profile uid, for the API key owner principal, if it exists. */ + with_profile_uid?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, name?: never, owner?: never, realm_name?: never, username?: never, with_limited_by?: never, active_only?: never, with_profile_uid?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, name?: never, owner?: never, realm_name?: never, username?: never, with_limited_by?: never, active_only?: never, with_profile_uid?: never } +} + +export interface SecurityGetApiKeyResponse { + api_keys: SecurityApiKey[] +} + +export interface SecurityGetBuiltinPrivilegesRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } +} + +export interface SecurityGetBuiltinPrivilegesResponse { + /** The list of cluster privileges that are understood by this version of Elasticsearch. */ + cluster: SecurityClusterPrivilege[] + /** The list of index privileges that are understood by this version of Elasticsearch. */ + index: IndexName[] + /** The list of remote_cluster privileges that are understood by this version of Elasticsearch. + * @remarks This property is not supported on Elastic Cloud Serverless. */ + remote_cluster: SecurityRemoteClusterPrivilege[] +} + +export interface SecurityGetPrivilegesRequest extends RequestBase { + /** The name of the application. + * Application privileges are always associated with exactly one application. + * If you do not specify this parameter, the API returns information about all privileges for all applications. */ + application?: Name + /** The name of the privilege. + * If you do not specify this parameter, the API returns information about all privileges for the requested application. */ + name?: Names + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { application?: never, name?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { application?: never, name?: never } +} + +export type SecurityGetPrivilegesResponse = Record> + +export interface SecurityGetRoleRequest extends RequestBase { + /** The name of the role. + * You can specify multiple roles as a comma-separated list. + * If you do not specify this parameter, the API returns information about all roles. */ + name?: Names + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never } +} + +export type SecurityGetRoleResponse = Record + +export interface SecurityGetRoleRole { + cluster: SecurityClusterPrivilege[] + indices: SecurityIndicesPrivileges[] + /** @remarks This property is not supported on Elastic Cloud Serverless. */ + remote_indices?: SecurityRemoteIndicesPrivileges[] + /** @remarks This property is not supported on Elastic Cloud Serverless. */ + remote_cluster?: SecurityRemoteClusterPrivileges[] + metadata: Metadata + description?: string + run_as?: string[] + transient_metadata?: Record + applications: SecurityApplicationPrivileges[] + role_templates?: SecurityRoleTemplate[] + global?: Record>> +} + +export interface SecurityGetRoleMappingRequest extends RequestBase { + /** The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. You can specify multiple mapping names as a comma-separated list. If you do not specify this parameter, the API returns information about all role mappings. */ + name?: Names + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never } +} + +export type SecurityGetRoleMappingResponse = Record + +export interface SecurityGetServiceAccountsRequest extends RequestBase { + /** The name of the namespace. + * Omit this parameter to retrieve information about all service accounts. + * If you omit this parameter, you must also omit the `service` parameter. */ + namespace?: Namespace + /** The service name. + * Omit this parameter to retrieve information about all service accounts that belong to the specified `namespace`. */ + service?: Service + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { namespace?: never, service?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { namespace?: never, service?: never } +} + +export type SecurityGetServiceAccountsResponse = Record + +export interface SecurityGetServiceAccountsRoleDescriptorWrapper { + role_descriptor: SecurityRoleDescriptorRead +} + +export interface SecurityGetServiceCredentialsNodesCredentials { + /** General status showing how nodes respond to the above collection request */ + _nodes: NodeStatistics + /** File-backed tokens collected from all nodes */ + file_tokens: Record +} + +export interface SecurityGetServiceCredentialsNodesCredentialsFileToken { + nodes: string[] +} + +export interface SecurityGetServiceCredentialsRequest extends RequestBase { + /** The name of the namespace. */ + namespace: Namespace + /** The service name. */ + service: Name + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { namespace?: never, service?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { namespace?: never, service?: never } +} + +export interface SecurityGetServiceCredentialsResponse { + service_account: string + count: integer + tokens: Record + /** Service account credentials collected from all nodes of the cluster. */ + nodes_credentials: SecurityGetServiceCredentialsNodesCredentials +} + +export interface SecurityGetSettingsRequest extends RequestBase { + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never } +} + +export interface SecurityGetSettingsResponse { + /** Settings for the index used for most security configuration, including native realm users and roles configured with the API. */ + security: SecuritySecuritySettings + /** Settings for the index used to store profile information. */ + 'security-profile': SecuritySecuritySettings + /** Settings for the index used to store tokens. */ + 'security-tokens': SecuritySecuritySettings +} + +export interface SecurityGetStatsRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } +} + +export interface SecurityGetStatsResponse { + /** A map of node IDs to security statistics for that node. */ + nodes: Record +} + +export type SecurityGetTokenAccessTokenGrantType = 'password' | 'client_credentials' | '_kerberos' | 'refresh_token' + +export interface SecurityGetTokenAuthenticatedUser extends SecurityUser { + authentication_realm: SecurityGetTokenUserRealm + lookup_realm: SecurityGetTokenUserRealm + authentication_provider?: SecurityGetTokenAuthenticationProvider + authentication_type: string +} + +export interface SecurityGetTokenAuthenticationProvider { + type: string + name: Name +} + +export interface SecurityGetTokenRequest extends RequestBase { + /** The type of grant. + * Supported grant types are: `password`, `_kerberos`, `client_credentials`, and `refresh_token`. */ + grant_type?: SecurityGetTokenAccessTokenGrantType + /** The scope of the token. + * Currently tokens are only issued for a scope of FULL regardless of the value sent with the request. */ + scope?: string + /** The user's password. + * If you specify the `password` grant type, this parameter is required. + * This parameter is not valid with any other supported grant type. */ + password?: Password + /** The base64 encoded kerberos ticket. + * If you specify the `_kerberos` grant type, this parameter is required. + * This parameter is not valid with any other supported grant type. */ + kerberos_ticket?: string + /** The string that was returned when you created the token, which enables you to extend its life. + * If you specify the `refresh_token` grant type, this parameter is required. + * This parameter is not valid with any other supported grant type. */ + refresh_token?: string + /** The username that identifies the user. + * If you specify the `password` grant type, this parameter is required. + * This parameter is not valid with any other supported grant type. */ + username?: Username + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { grant_type?: never, scope?: never, password?: never, kerberos_ticket?: never, refresh_token?: never, username?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { grant_type?: never, scope?: never, password?: never, kerberos_ticket?: never, refresh_token?: never, username?: never } +} + +export interface SecurityGetTokenResponse { + access_token: string + expires_in: long + scope?: string + type: string + refresh_token?: string + kerberos_authentication_response_token?: string + authentication: SecurityGetTokenAuthenticatedUser +} + +export interface SecurityGetTokenUserRealm { + name: Name + type: string +} + +export interface SecurityGetUserRequest extends RequestBase { + /** An identifier for the user. You can specify multiple usernames as a comma-separated list. If you omit this parameter, the API retrieves information about all users. */ + username?: Username | Username[] + /** Determines whether to retrieve the user profile UID, if it exists, for the users. */ + with_profile_uid?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { username?: never, with_profile_uid?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { username?: never, with_profile_uid?: never } +} + +export type SecurityGetUserResponse = Record + +export interface SecurityGetUserPrivilegesRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } +} + +export interface SecurityGetUserPrivilegesResponse { + applications: SecurityApplicationPrivileges[] + cluster: string[] + remote_cluster?: SecurityRemoteClusterPrivileges[] + global: SecurityGlobalPrivilege[] + indices: SecurityUserIndicesPrivileges[] + remote_indices?: SecurityRemoteUserIndicesPrivileges[] + run_as: string[] +} + +export interface SecurityGetUserProfileGetUserProfileErrors { + count: long + details: Record +} + +export interface SecurityGetUserProfileRequest extends RequestBase { + /** A unique identifier for the user profile. */ + uid: SecurityUserProfileId | SecurityUserProfileId[] + /** A comma-separated list of filters for the `data` field of the profile document. + * To return all content use `data=*`. + * To return a subset of content use `data=` to retrieve content nested under the specified ``. + * By default returns no `data` content. */ + data?: string | string[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { uid?: never, data?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { uid?: never, data?: never } +} + +export interface SecurityGetUserProfileResponse { + /** A successful call returns the JSON representation of the user profile and its internal versioning numbers. + * The API returns an empty object if no profile document is found for the provided `uid`. + * The content of the data field is not returned by default to avoid deserializing a potential large payload. */ + profiles: SecurityUserProfileWithMetadata[] + errors?: SecurityGetUserProfileGetUserProfileErrors +} + +export type SecurityGrantApiKeyApiKeyGrantType = 'access_token' | 'password' + +export interface SecurityGrantApiKeyGrantApiKey { + name: Name + /** Expiration time for the API key. By default, API keys never expire. */ + expiration?: DurationLarge + /** The role descriptors for this API key. + * When it is not specified or is an empty array, the API key has a point in time snapshot of permissions of the specified user or access token. + * If you supply role descriptors, the resultant permissions are an intersection of API keys permissions and the permissions of the user or access token. */ + role_descriptors?: Record | Record[] + /** Arbitrary metadata that you want to associate with the API key. + * It supports nested data structure. + * Within the `metadata` object, keys beginning with `_` are reserved for system usage. */ + metadata?: Metadata +} + +export interface SecurityGrantApiKeyRequest extends RequestBase { + /** If 'true', Elasticsearch refreshes the affected shards to make this operation + * visible to search. + * If 'wait_for', it waits for a refresh to make this operation visible to search. + * If 'false', nothing is done with refreshes. */ + refresh?: Refresh + /** The API key. */ + api_key: SecurityGrantApiKeyGrantApiKey + /** The type of grant. Supported grant types are: `access_token`, `password`. */ + grant_type: SecurityGrantApiKeyApiKeyGrantType + /** The user's access token. + * If you specify the `access_token` grant type, this parameter is required. + * It is not valid with other grant types. */ + access_token?: string + /** The user name that identifies the user. + * If you specify the `password` grant type, this parameter is required. + * It is not valid with other grant types. */ + username?: Username + /** The user's password. + * If you specify the `password` grant type, this parameter is required. + * It is not valid with other grant types. */ + password?: Password + /** The name of the user to be impersonated. */ + run_as?: Username + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { refresh?: never, api_key?: never, grant_type?: never, access_token?: never, username?: never, password?: never, run_as?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { refresh?: never, api_key?: never, grant_type?: never, access_token?: never, username?: never, password?: never, run_as?: never } +} + +export interface SecurityGrantApiKeyResponse { + api_key: string + id: Id + name: Name + expiration?: EpochTime + encoded: string +} + +export interface SecurityHasPrivilegesApplicationPrivilegesCheck { + /** The name of the application. */ + application: string + /** A list of the privileges that you want to check for the specified resources. + * It may be either application privilege names or the names of actions that are granted by those privileges */ + privileges: string[] + /** A list of resource names against which the privileges should be checked. */ + resources: string[] +} + +export type SecurityHasPrivilegesApplicationsPrivileges = Record + +export interface SecurityHasPrivilegesIndexPrivilegesCheck { + /** A list of indices. */ + names: Indices + /** A list of the privileges that you want to check for the specified indices. */ + privileges: SecurityIndexPrivilege[] + /** This needs to be set to `true` (default is `false`) if using wildcards or regexps for patterns that cover restricted indices. + * Implicitly, restricted indices do not match index patterns because restricted indices usually have limited privileges and including them in pattern tests would render most such tests false. + * If restricted indices are explicitly included in the names list, privileges will be checked against them regardless of the value of `allow_restricted_indices`. */ + allow_restricted_indices?: boolean +} + +export type SecurityHasPrivilegesPrivileges = Record + +export interface SecurityHasPrivilegesRequest extends RequestBase { + /** Username */ + user?: Name + application?: SecurityHasPrivilegesApplicationPrivilegesCheck[] + /** A list of the cluster privileges that you want to check. */ + cluster?: SecurityClusterPrivilege[] + index?: SecurityHasPrivilegesIndexPrivilegesCheck[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { user?: never, application?: never, cluster?: never, index?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { user?: never, application?: never, cluster?: never, index?: never } +} + +export type SecurityHasPrivilegesResourcePrivileges = Record + +export interface SecurityHasPrivilegesResponse { + application: SecurityHasPrivilegesApplicationsPrivileges + cluster: Record + has_all_requested: boolean + index: Record + username: Username +} + +export interface SecurityHasPrivilegesUserProfileHasPrivilegesUserProfileErrors { + count: long + details: Record +} + +export interface SecurityHasPrivilegesUserProfilePrivilegesCheck { + application?: SecurityHasPrivilegesApplicationPrivilegesCheck[] + /** A list of the cluster privileges that you want to check. */ + cluster?: SecurityClusterPrivilege[] + index?: SecurityHasPrivilegesIndexPrivilegesCheck[] +} + +export interface SecurityHasPrivilegesUserProfileRequest extends RequestBase { + /** A list of profile IDs. The privileges are checked for associated users of the profiles. */ + uids: SecurityUserProfileId[] + /** An object containing all the privileges to be checked. */ + privileges: SecurityHasPrivilegesUserProfilePrivilegesCheck + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { uids?: never, privileges?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { uids?: never, privileges?: never } +} + +export interface SecurityHasPrivilegesUserProfileResponse { + /** The subset of the requested profile IDs of the users that + * have all the requested privileges. */ + has_privilege_uids: SecurityUserProfileId[] + /** The subset of the requested profile IDs for which an error + * was encountered. It does not include the missing profile IDs + * or the profile IDs of the users that do not have all the + * requested privileges. This field is absent if empty. */ + errors?: SecurityHasPrivilegesUserProfileHasPrivilegesUserProfileErrors +} + +export interface SecurityInvalidateApiKeyRequest extends RequestBase { + id?: Id + /** A list of API key ids. + * This parameter cannot be used with any of `name`, `realm_name`, or `username`. */ + ids?: Id[] + /** An API key name. + * This parameter cannot be used with any of `ids`, `realm_name` or `username`. */ + name?: Name + /** Query API keys owned by the currently authenticated user. + * The `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones. + * + * NOTE: At least one of `ids`, `name`, `username`, and `realm_name` must be specified if `owner` is `false`. */ + owner?: boolean + /** The name of an authentication realm. + * This parameter cannot be used with either `ids` or `name`, or when `owner` flag is set to `true`. */ + realm_name?: string + /** The username of a user. + * This parameter cannot be used with either `ids` or `name` or when `owner` flag is set to `true`. */ + username?: Username + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, ids?: never, name?: never, owner?: never, realm_name?: never, username?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, ids?: never, name?: never, owner?: never, realm_name?: never, username?: never } +} + +export interface SecurityInvalidateApiKeyResponse { + /** The number of errors that were encountered when invalidating the API keys. */ + error_count: integer + /** Details about the errors. + * This field is not present in the response when `error_count` is `0`. */ + error_details?: ErrorCause[] + /** The IDs of the API keys that were invalidated as part of this request. */ + invalidated_api_keys: string[] + /** The IDs of the API keys that were already invalidated. */ + previously_invalidated_api_keys: string[] +} + +export interface SecurityInvalidateTokenRequest extends RequestBase { + /** An access token. + * This parameter cannot be used if any of `refresh_token`, `realm_name`, or `username` are used. */ + token?: string + /** A refresh token. + * This parameter cannot be used if any of `refresh_token`, `realm_name`, or `username` are used. */ + refresh_token?: string + /** The name of an authentication realm. + * This parameter cannot be used with either `refresh_token` or `token`. */ + realm_name?: Name + /** The username of a user. + * This parameter cannot be used with either `refresh_token` or `token`. */ + username?: Username + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { token?: never, refresh_token?: never, realm_name?: never, username?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { token?: never, refresh_token?: never, realm_name?: never, username?: never } +} + +export interface SecurityInvalidateTokenResponse { + /** The number of errors that were encountered when invalidating the tokens. */ + error_count: long + /** Details about the errors. + * This field is not present in the response when `error_count` is `0`. */ + error_details?: ErrorCause[] + /** The number of the tokens that were invalidated as part of this request. */ + invalidated_tokens: long + /** The number of tokens that were already invalidated. */ + previously_invalidated_tokens: long +} + +export interface SecurityOidcAuthenticateRequest extends RequestBase { + /** Associate a client session with an ID token and mitigate replay attacks. + * This value needs to be the same as the one that was provided to the `/_security/oidc/prepare` API or the one that was generated by Elasticsearch and included in the response to that call. */ + nonce: string + /** The name of the OpenID Connect realm. + * This property is useful in cases where multiple realms are defined. */ + realm?: string + /** The URL to which the OpenID Connect Provider redirected the User Agent in response to an authentication request after a successful authentication. + * This URL must be provided as-is (URL encoded), taken from the body of the response or as the value of a location header in the response from the OpenID Connect Provider. */ + redirect_uri: string + /** Maintain state between the authentication request and the response. + * This value needs to be the same as the one that was provided to the `/_security/oidc/prepare` API or the one that was generated by Elasticsearch and included in the response to that call. */ + state: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { nonce?: never, realm?: never, redirect_uri?: never, state?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { nonce?: never, realm?: never, redirect_uri?: never, state?: never } +} + +export interface SecurityOidcAuthenticateResponse { + /** The Elasticsearch access token. */ + access_token: string + /** The duration (in seconds) of the tokens. */ + expires_in: integer + /** The Elasticsearch refresh token. */ + refresh_token: string + /** The type of token. */ + type: string +} + +export interface SecurityOidcLogoutRequest extends RequestBase { + /** The access token to be invalidated. */ + token: string + /** The refresh token to be invalidated. */ + refresh_token?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { token?: never, refresh_token?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { token?: never, refresh_token?: never } +} + +export interface SecurityOidcLogoutResponse { + /** A URI that points to the end session endpoint of the OpenID Connect Provider with all the parameters of the logout request as HTTP GET parameters. */ + redirect: string +} + +export interface SecurityOidcPrepareAuthenticationRequest extends RequestBase { + /** In the case of a third party initiated single sign on, this is the issuer identifier for the OP that the RP is to send the authentication request to. + * It cannot be specified when *realm* is specified. + * One of *realm* or *iss* is required. */ + iss?: string + /** In the case of a third party initiated single sign on, it is a string value that is included in the authentication request as the *login_hint* parameter. + * This parameter is not valid when *realm* is specified. */ + login_hint?: string + /** The value used to associate a client session with an ID token and to mitigate replay attacks. + * If the caller of the API does not provide a value, Elasticsearch will generate one with sufficient entropy and return it in the response. */ + nonce?: string + /** The name of the OpenID Connect realm in Elasticsearch the configuration of which should be used in order to generate the authentication request. + * It cannot be specified when *iss* is specified. + * One of *realm* or *iss* is required. */ + realm?: string + /** The value used to maintain state between the authentication request and the response, typically used as a Cross-Site Request Forgery mitigation. + * If the caller of the API does not provide a value, Elasticsearch will generate one with sufficient entropy and return it in the response. */ + state?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { iss?: never, login_hint?: never, nonce?: never, realm?: never, state?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { iss?: never, login_hint?: never, nonce?: never, realm?: never, state?: never } +} + +export interface SecurityOidcPrepareAuthenticationResponse { + nonce: string + realm: string + /** A URI that points to the authorization endpoint of the OpenID Connect Provider with all the parameters of the authentication request as HTTP GET parameters. */ + redirect: string + state: string +} + +export interface SecurityPutPrivilegesActions { + actions: string[] + application?: string + name?: Name + metadata?: Metadata +} + +export interface SecurityPutPrivilegesRequest extends RequestBase { + /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ + refresh?: Refresh + privileges?: Record> + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { refresh?: never, privileges?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { refresh?: never, privileges?: never } +} + +export type SecurityPutPrivilegesResponse = Record> + +export interface SecurityPutRoleRequest extends RequestBase { + /** The name of the role that is being created or updated. On Elasticsearch Serverless, the role name must begin with a letter or digit and can only contain letters, digits and the characters '_', '-', and '.'. Each role must have a unique name, as this will serve as the identifier for that role. */ + name: Name + /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ + refresh?: Refresh + /** A list of application privilege entries. */ + applications?: SecurityApplicationPrivileges[] + /** A list of cluster privileges. These privileges define the cluster-level actions for users with this role. */ + cluster?: SecurityClusterPrivilege[] + /** An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges. + * @remarks This property is not supported on Elastic Cloud Serverless. */ + global?: Record + /** A list of indices permissions entries. */ + indices?: SecurityIndicesPrivileges[] + /** A list of remote indices permissions entries. + * + * NOTE: Remote indices are effective for remote clusters configured with the API key based model. + * They have no effect for remote clusters configured with the certificate based model. + * @remarks This property is not supported on Elastic Cloud Serverless. */ + remote_indices?: SecurityRemoteIndicesPrivileges[] + /** A list of remote cluster permissions entries. + * @remarks This property is not supported on Elastic Cloud Serverless. */ + remote_cluster?: SecurityRemoteClusterPrivileges[] + /** Optional metadata. Within the metadata object, keys that begin with an underscore (`_`) are reserved for system use. */ + metadata?: Metadata + /** A list of users that the owners of this role can impersonate. *Note*: in Serverless, the run-as feature is disabled. For API compatibility, you can still specify an empty `run_as` field, but a non-empty list will be rejected. */ + run_as?: string[] + /** Optional description of the role descriptor */ + description?: string + /** Indicates roles that might be incompatible with the current cluster license, specifically roles with document and field level security. When the cluster license doesn’t allow certain features for a given role, this parameter is updated dynamically to list the incompatible features. If `enabled` is `false`, the role is ignored, but is still listed in the response from the authenticate API. */ + transient_metadata?: Record + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, refresh?: never, applications?: never, cluster?: never, global?: never, indices?: never, remote_indices?: never, remote_cluster?: never, metadata?: never, run_as?: never, description?: never, transient_metadata?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, refresh?: never, applications?: never, cluster?: never, global?: never, indices?: never, remote_indices?: never, remote_cluster?: never, metadata?: never, run_as?: never, description?: never, transient_metadata?: never } +} + +export interface SecurityPutRoleResponse { + /** When an existing role is updated, `created` is set to `false`. */ + role: SecurityCreatedStatus +} + +export interface SecurityPutRoleMappingRequest extends RequestBase { + /** The distinct name that identifies the role mapping. + * The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. */ + name: Name + /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ + refresh?: Refresh + /** Mappings that have `enabled` set to `false` are ignored when role mapping is performed. */ + enabled?: boolean + /** Additional metadata that helps define which roles are assigned to each user. + * Within the metadata object, keys beginning with `_` are reserved for system usage. */ + metadata?: Metadata + /** A list of role names that are granted to the users that match the role mapping rules. + * Exactly one of `roles` or `role_templates` must be specified. */ + roles?: string[] + /** A list of Mustache templates that will be evaluated to determine the roles names that should granted to the users that match the role mapping rules. + * Exactly one of `roles` or `role_templates` must be specified. */ + role_templates?: SecurityRoleTemplate[] + /** The rules that determine which users should be matched by the mapping. + * A rule is a logical condition that is expressed by using a JSON DSL. */ + rules?: SecurityRoleMappingRule + run_as?: string[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, refresh?: never, enabled?: never, metadata?: never, roles?: never, role_templates?: never, rules?: never, run_as?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, refresh?: never, enabled?: never, metadata?: never, roles?: never, role_templates?: never, rules?: never, run_as?: never } +} + +export interface SecurityPutRoleMappingResponse { + created?: boolean + role_mapping: SecurityCreatedStatus +} + +export interface SecurityPutUserRequest extends RequestBase { + /** An identifier for the user. + * + * NOTE: Usernames must be at least 1 and no more than 507 characters. + * They can contain alphanumeric characters (a-z, A-Z, 0-9), spaces, punctuation, and printable symbols in the Basic Latin (ASCII) block. + * Leading or trailing whitespace is not allowed. */ + username: Username + /** Valid values are `true`, `false`, and `wait_for`. + * These values have the same meaning as in the index API, but the default value for this API is true. */ + refresh?: Refresh + /** The email of the user. */ + email?: string | null + /** The full name of the user. */ + full_name?: string | null + /** Arbitrary metadata that you want to associate with the user. */ + metadata?: Metadata + /** The user's password. + * Passwords must be at least 6 characters long. + * When adding a user, one of `password` or `password_hash` is required. + * When updating an existing user, the password is optional, so that other fields on the user (such as their roles) may be updated without modifying the user's password */ + password?: Password + /** A hash of the user's password. + * This must be produced using the same hashing algorithm as has been configured for password storage. + * For more details, see the explanation of the `xpack.security.authc.password_hashing.algorithm` setting in the user cache and password hash algorithm documentation. + * Using this parameter allows the client to pre-hash the password for performance and/or confidentiality reasons. + * The `password` parameter and the `password_hash` parameter cannot be used in the same request. */ + password_hash?: string + /** A set of roles the user has. + * The roles determine the user's access permissions. + * To create a user without any roles, specify an empty list (`[]`). */ + roles?: string[] + /** Specifies whether the user is enabled. */ + enabled?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { username?: never, refresh?: never, email?: never, full_name?: never, metadata?: never, password?: never, password_hash?: never, roles?: never, enabled?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { username?: never, refresh?: never, email?: never, full_name?: never, metadata?: never, password?: never, password_hash?: never, roles?: never, enabled?: never } +} + +export interface SecurityPutUserResponse { + /** A successful call returns a JSON structure that shows whether the user has been created or updated. + * When an existing user is updated, `created` is set to `false`. */ + created: boolean +} + +export type SecurityQueryApiKeysApiKeyAggregate = AggregationsCardinalityAggregate | AggregationsValueCountAggregate | AggregationsStringTermsAggregate | AggregationsLongTermsAggregate | AggregationsDoubleTermsAggregate | AggregationsUnmappedTermsAggregate | AggregationsMultiTermsAggregate | AggregationsMissingAggregate | AggregationsFilterAggregate | AggregationsFiltersAggregate | AggregationsRangeAggregate | AggregationsDateRangeAggregate | AggregationsCompositeAggregate + +export interface SecurityQueryApiKeysApiKeyAggregationContainer { + /** Sub-aggregations for this aggregation. + * Only applies to bucket aggregations. */ + aggregations?: Record + /** Sub-aggregations for this aggregation. + * Only applies to bucket aggregations. + * @alias aggregations */ + aggs?: Record + meta?: Metadata + /** A single-value metrics aggregation that calculates an approximate count of distinct values. */ + cardinality?: AggregationsCardinalityAggregation + /** A multi-bucket aggregation that creates composite buckets from different sources. + * Unlike the other multi-bucket aggregations, you can use the `composite` aggregation to paginate *all* buckets from a multi-level aggregation efficiently. */ + composite?: AggregationsCompositeAggregation + /** A multi-bucket value source based aggregation that enables the user to define a set of date ranges - each representing a bucket. */ + date_range?: AggregationsDateRangeAggregation + /** A single bucket aggregation that narrows the set of documents to those that match a query. */ + filter?: SecurityQueryApiKeysApiKeyQueryContainer + /** A multi-bucket aggregation where each bucket contains the documents that match a query. */ + filters?: SecurityQueryApiKeysApiKeyFiltersAggregation + missing?: AggregationsMissingAggregation + /** A multi-bucket value source based aggregation that enables the user to define a set of ranges - each representing a bucket. */ + range?: AggregationsRangeAggregation + /** A multi-bucket value source based aggregation where buckets are dynamically built - one per unique value. */ + terms?: AggregationsTermsAggregation + /** A single-value metrics aggregation that counts the number of values that are extracted from the aggregated documents. */ + value_count?: AggregationsValueCountAggregation +} + +export interface SecurityQueryApiKeysApiKeyFiltersAggregation extends AggregationsBucketAggregationBase { + /** Collection of queries from which to build buckets. */ + filters?: AggregationsBuckets + /** Set to `true` to add a bucket to the response which will contain all documents that do not match any of the given filters. */ + other_bucket?: boolean + /** The key with which the other bucket is returned. */ + other_bucket_key?: string + /** By default, the named filters aggregation returns the buckets as an object. + * Set to `false` to return the buckets as an array of objects. */ + keyed?: boolean +} + +export interface SecurityQueryApiKeysApiKeyQueryContainer { + /** Matches documents matching boolean combinations of other queries. */ + bool?: QueryDslBoolQuery + /** Returns documents that contain an indexed value for a field. */ + exists?: QueryDslExistsQuery + /** Returns documents based on their IDs. + * This query uses document IDs stored in the `_id` field. */ + ids?: QueryDslIdsQuery + /** Returns documents that match a provided text, number, date or boolean value. + * The provided text is analyzed before matching. */ + match?: Partial> + /** Matches all documents, giving them all a `_score` of 1.0. */ + match_all?: QueryDslMatchAllQuery + /** Returns documents that contain a specific prefix in a provided field. */ + prefix?: Partial> + /** Returns documents that contain terms within a provided range. */ + range?: Partial> + /** Returns documents based on a provided query string, using a parser with a limited but fault-tolerant syntax. */ + simple_query_string?: QueryDslSimpleQueryStringQuery + /** Returns documents that contain an exact term in a provided field. + * To return a document, the query term must exactly match the queried field's value, including whitespace and capitalization. */ + term?: Partial> + /** Returns documents that contain one or more exact terms in a provided field. + * To return a document, one or more terms must exactly match a field value, including whitespace and capitalization. */ + terms?: QueryDslTermsQuery + /** Returns documents that contain terms matching a wildcard pattern. */ + wildcard?: Partial> +} + +export interface SecurityQueryApiKeysRequest extends RequestBase { + /** Return the snapshot of the owner user's role descriptors associated with the API key. + * An API key's actual permission is the intersection of its assigned role descriptors and the owner user's role descriptors (effectively limited by it). + * An API key cannot retrieve any API key’s limited-by role descriptors (including itself) unless it has `manage_api_key` or higher privileges. */ + with_limited_by?: boolean + /** Determines whether to also retrieve the profile UID for the API key owner principal. + * If it exists, the profile UID is returned under the `profile_uid` response field for each API key. */ + with_profile_uid?: boolean + /** Determines whether aggregation names are prefixed by their respective types in the response. */ + typed_keys?: boolean + /** Any aggregations to run over the corpus of returned API keys. + * Aggregations and queries work together. Aggregations are computed only on the API keys that match the query. + * This supports only a subset of aggregation types, namely: `terms`, `range`, `date_range`, `missing`, + * `cardinality`, `value_count`, `composite`, `filter`, and `filters`. + * Additionally, aggregations only run over the same subset of fields that query works with. */ + aggregations?: Record + /** Any aggregations to run over the corpus of returned API keys. + * Aggregations and queries work together. Aggregations are computed only on the API keys that match the query. + * This supports only a subset of aggregation types, namely: `terms`, `range`, `date_range`, `missing`, + * `cardinality`, `value_count`, `composite`, `filter`, and `filters`. + * Additionally, aggregations only run over the same subset of fields that query works with. + * @alias aggregations */ + aggs?: Record + /** A query to filter which API keys to return. + * If the query parameter is missing, it is equivalent to a `match_all` query. + * The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, + * `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. + * You can query the following public information associated with an API key: `id`, `type`, `name`, + * `creation`, `expiration`, `invalidated`, `invalidation`, `username`, `realm`, and `metadata`. + * + * NOTE: The queryable string values associated with API keys are internally mapped as keywords. + * Consequently, if no `analyzer` parameter is specified for a `match` query, then the provided match query string is interpreted as a single keyword value. + * Such a match query is hence equivalent to a `term` query. */ + query?: SecurityQueryApiKeysApiKeyQueryContainer + /** The starting document offset. + * It must not be negative. + * By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. + * To page through more hits, use the `search_after` parameter. */ + from?: integer + /** The sort definition. + * Other than `id`, all public fields of an API key are eligible for sorting. + * In addition, sort can also be applied to the `_doc` field to sort by index order. */ + sort?: Sort + /** The number of hits to return. + * It must not be negative. + * The `size` parameter can be set to `0`, in which case no API key matches are returned, only the aggregation results. + * By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. + * To page through more hits, use the `search_after` parameter. */ + size?: integer + /** The search after definition. */ + search_after?: SortResults + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { with_limited_by?: never, with_profile_uid?: never, typed_keys?: never, aggregations?: never, aggs?: never, query?: never, from?: never, sort?: never, size?: never, search_after?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { with_limited_by?: never, with_profile_uid?: never, typed_keys?: never, aggregations?: never, aggs?: never, query?: never, from?: never, sort?: never, size?: never, search_after?: never } +} + +export interface SecurityQueryApiKeysResponse { + /** The total number of API keys found. */ + total: integer + /** The number of API keys returned in the response. */ + count: integer + /** A list of API key information. */ + api_keys: SecurityApiKey[] + /** The aggregations result, if requested. */ + aggregations?: Record +} + +export interface SecurityQueryRoleQueryRole extends SecurityRoleDescriptor { + _sort?: SortResults + /** Name of the role. */ + name: string +} + +export interface SecurityQueryRoleRequest extends RequestBase { + /** A query to filter which roles to return. + * If the query parameter is missing, it is equivalent to a `match_all` query. + * The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, + * `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. + * You can query the following information associated with roles: `name`, `description`, `metadata`, + * `applications.application`, `applications.privileges`, and `applications.resources`. */ + query?: SecurityQueryRoleRoleQueryContainer + /** The starting document offset. + * It must not be negative. + * By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. + * To page through more hits, use the `search_after` parameter. */ + from?: integer + /** The sort definition. + * You can sort on `name`, `description`, `metadata`, `applications.application`, `applications.privileges`, + * and `applications.resources`. + * In addition, sort can also be applied to the `_doc` field to sort by index order. */ + sort?: Sort + /** The number of hits to return. + * It must not be negative. + * By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. + * To page through more hits, use the `search_after` parameter. */ + size?: integer + /** The search after definition. */ + search_after?: SortResults + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { query?: never, from?: never, sort?: never, size?: never, search_after?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { query?: never, from?: never, sort?: never, size?: never, search_after?: never } +} + +export interface SecurityQueryRoleResponse { + /** The total number of roles found. */ + total: integer + /** The number of roles returned in the response. */ + count: integer + /** A list of roles that match the query. + * The returned role format is an extension of the role definition format. + * It adds the `transient_metadata.enabled` and the `_sort` fields. + * `transient_metadata.enabled` is set to `false` in case the role is automatically disabled, for example when the role grants privileges that are not allowed by the installed license. + * `_sort` is present when the search query sorts on some field. + * It contains the array of values that have been used for sorting. */ + roles: SecurityQueryRoleQueryRole[] +} + +export interface SecurityQueryRoleRoleQueryContainer { + /** matches roles matching boolean combinations of other queries. */ + bool?: QueryDslBoolQuery + /** Returns roles that contain an indexed value for a field. */ + exists?: QueryDslExistsQuery + /** Returns roles based on their IDs. + * This query uses role document IDs stored in the `_id` field. */ + ids?: QueryDslIdsQuery + /** Returns roles that match a provided text, number, date or boolean value. + * The provided text is analyzed before matching. */ + match?: Partial> + /** Matches all roles, giving them all a `_score` of 1.0. */ + match_all?: QueryDslMatchAllQuery + /** Returns roles that contain a specific prefix in a provided field. */ + prefix?: Partial> + /** Returns roles that contain terms within a provided range. */ + range?: Partial> + /** Returns roles based on a provided query string, using a parser with a limited but fault-tolerant syntax. */ + simple_query_string?: QueryDslSimpleQueryStringQuery + /** Returns roles that contain an exact term in a provided field. + * To return a document, the query term must exactly match the queried field's value, including whitespace and capitalization. */ + term?: Partial> + /** Returns roles that contain one or more exact terms in a provided field. + * To return a document, one or more terms must exactly match a field value, including whitespace and capitalization. */ + terms?: QueryDslTermsQuery + /** Returns roles that contain terms matching a wildcard pattern. */ + wildcard?: Partial> +} + +export interface SecurityQueryUserQueryUser extends SecurityUser { + _sort?: SortResults +} + +export interface SecurityQueryUserRequest extends RequestBase { + /** Determines whether to retrieve the user profile UID, if it exists, for the users. */ + with_profile_uid?: boolean + /** A query to filter which users to return. + * If the query parameter is missing, it is equivalent to a `match_all` query. + * The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, + * `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. + * You can query the following information associated with user: `username`, `roles`, `enabled`, `full_name`, and `email`. */ + query?: SecurityQueryUserUserQueryContainer + /** The starting document offset. + * It must not be negative. + * By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. + * To page through more hits, use the `search_after` parameter. */ + from?: integer + /** The sort definition. + * Fields eligible for sorting are: `username`, `roles`, `enabled`. + * In addition, sort can also be applied to the `_doc` field to sort by index order. */ + sort?: Sort + /** The number of hits to return. + * It must not be negative. + * By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. + * To page through more hits, use the `search_after` parameter. */ + size?: integer + /** The search after definition */ + search_after?: SortResults + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { with_profile_uid?: never, query?: never, from?: never, sort?: never, size?: never, search_after?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { with_profile_uid?: never, query?: never, from?: never, sort?: never, size?: never, search_after?: never } +} + +export interface SecurityQueryUserResponse { + /** The total number of users found. */ + total: integer + /** The number of users returned in the response. */ + count: integer + /** A list of users that match the query. */ + users: SecurityQueryUserQueryUser[] +} + +export interface SecurityQueryUserUserQueryContainer { + /** Returns users based on their IDs. + * This query uses the user document IDs stored in the `_id` field. */ + ids?: QueryDslIdsQuery + /** matches users matching boolean combinations of other queries. */ + bool?: QueryDslBoolQuery + /** Returns users that contain an indexed value for a field. */ + exists?: QueryDslExistsQuery + /** Returns users that match a provided text, number, date or boolean value. + * The provided text is analyzed before matching. */ + match?: Partial> + /** Matches all users, giving them all a `_score` of 1.0. */ + match_all?: QueryDslMatchAllQuery + /** Returns users that contain a specific prefix in a provided field. */ + prefix?: Partial> + /** Returns users that contain terms within a provided range. */ + range?: Partial> + /** Returns users based on a provided query string, using a parser with a limited but fault-tolerant syntax. */ + simple_query_string?: QueryDslSimpleQueryStringQuery + /** Returns users that contain an exact term in a provided field. + * To return a document, the query term must exactly match the queried field's value, including whitespace and capitalization. */ + term?: Partial> + /** Returns users that contain one or more exact terms in a provided field. + * To return a document, one or more terms must exactly match a field value, including whitespace and capitalization. */ + terms?: QueryDslTermsQuery + /** Returns users that contain terms matching a wildcard pattern. */ + wildcard?: Partial> +} + +export interface SecuritySamlAuthenticateRequest extends RequestBase { + /** The SAML response as it was sent by the user's browser, usually a Base64 encoded XML document. */ + content: string + /** A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user. */ + ids: Ids + /** The name of the realm that should authenticate the SAML response. Useful in cases where many SAML realms are defined. */ + realm?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { content?: never, ids?: never, realm?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { content?: never, ids?: never, realm?: never } +} + +export interface SecuritySamlAuthenticateResponse { + /** The access token that was generated by Elasticsearch. */ + access_token: string + /** The authenticated user's name. */ + username: string + /** The amount of time (in seconds) left until the token expires. */ + expires_in: integer + /** The refresh token that was generated by Elasticsearch. */ + refresh_token: string + /** The name of the realm where the user was authenticated. */ + realm: string +} + +export interface SecuritySamlCompleteLogoutRequest extends RequestBase { + /** The name of the SAML realm in Elasticsearch for which the configuration is used to verify the logout response. */ + realm: string + /** A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user. */ + ids: Ids + /** If the SAML IdP sends the logout response with the HTTP-Redirect binding, this field must be set to the query string of the redirect URI. */ + query_string?: string + /** If the SAML IdP sends the logout response with the HTTP-Post binding, this field must be set to the value of the SAMLResponse form parameter from the logout response. */ + content?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { realm?: never, ids?: never, query_string?: never, content?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { realm?: never, ids?: never, query_string?: never, content?: never } +} + +export type SecuritySamlCompleteLogoutResponse = boolean + +export interface SecuritySamlInvalidateRequest extends RequestBase { + /** The Assertion Consumer Service URL that matches the one of the SAML realm in Elasticsearch that should be used. You must specify either this parameter or the `realm` parameter. */ + acs?: string + /** The query part of the URL that the user was redirected to by the SAML IdP to initiate the Single Logout. + * This query should include a single parameter named `SAMLRequest` that contains a SAML logout request that is deflated and Base64 encoded. + * If the SAML IdP has signed the logout request, the URL should include two extra parameters named `SigAlg` and `Signature` that contain the algorithm used for the signature and the signature value itself. + * In order for Elasticsearch to be able to verify the IdP's signature, the value of the `query_string` field must be an exact match to the string provided by the browser. + * The client application must not attempt to parse or process the string in any way. */ + query_string: string + /** The name of the SAML realm in Elasticsearch the configuration. You must specify either this parameter or the `acs` parameter. */ + realm?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { acs?: never, query_string?: never, realm?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { acs?: never, query_string?: never, realm?: never } +} + +export interface SecuritySamlInvalidateResponse { + /** The number of tokens that were invalidated as part of this logout. */ + invalidated: integer + /** The realm name of the SAML realm in Elasticsearch that authenticated the user. */ + realm: string + /** A SAML logout response as a parameter so that the user can be redirected back to the SAML IdP. */ + redirect: string +} + +export interface SecuritySamlLogoutRequest extends RequestBase { + /** The access token that was returned as a response to calling the SAML authenticate API. + * Alternatively, the most recent token that was received after refreshing the original one by using a `refresh_token`. */ + token: string + /** The refresh token that was returned as a response to calling the SAML authenticate API. + * Alternatively, the most recent refresh token that was received after refreshing the original access token. */ + refresh_token?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { token?: never, refresh_token?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { token?: never, refresh_token?: never } +} + +export interface SecuritySamlLogoutResponse { + /** A URL that contains a SAML logout request as a parameter. + * You can use this URL to be redirected back to the SAML IdP and to initiate Single Logout. */ + redirect: string +} + +export interface SecuritySamlPrepareAuthenticationRequest extends RequestBase { + /** The Assertion Consumer Service URL that matches the one of the SAML realms in Elasticsearch. + * The realm is used to generate the authentication request. You must specify either this parameter or the `realm` parameter. */ + acs?: string + /** The name of the SAML realm in Elasticsearch for which the configuration is used to generate the authentication request. + * You must specify either this parameter or the `acs` parameter. */ + realm?: string + /** A string that will be included in the redirect URL that this API returns as the `RelayState` query parameter. + * If the Authentication Request is signed, this value is used as part of the signature computation. */ + relay_state?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { acs?: never, realm?: never, relay_state?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { acs?: never, realm?: never, relay_state?: never } +} + +export interface SecuritySamlPrepareAuthenticationResponse { + /** A unique identifier for the SAML Request to be stored by the caller of the API. */ + id: Id + /** The name of the Elasticsearch realm that was used to construct the authentication request. */ + realm: string + /** The URL to redirect the user to. */ + redirect: string +} + +export interface SecuritySamlServiceProviderMetadataRequest extends RequestBase { + /** The name of the SAML realm in Elasticsearch. */ + realm_name: Name + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { realm_name?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { realm_name?: never } +} + +export interface SecuritySamlServiceProviderMetadataResponse { + /** An XML string that contains a SAML Service Provider's metadata for the realm. */ + metadata: string +} + +export interface SecuritySuggestUserProfilesHint { + /** A list of profile UIDs to match against. */ + uids?: SecurityUserProfileId[] + /** A single key-value pair to match against the labels section + * of a profile. A profile is considered matching if it matches + * at least one of the strings. */ + labels?: Record +} + +export interface SecuritySuggestUserProfilesRequest extends RequestBase { + /** A query string used to match name-related fields in user profile documents. + * Name-related fields are the user's `username`, `full_name`, and `email`. */ + name?: string + /** The number of profiles to return. */ + size?: long + /** A comma-separated list of filters for the `data` field of the profile document. + * To return all content use `data=*`. + * To return a subset of content, use `data=` to retrieve content nested under the specified ``. + * By default, the API returns no `data` content. + * It is an error to specify `data` as both the query parameter and the request body field. */ + data?: string | string[] + /** Extra search criteria to improve relevance of the suggestion result. + * Profiles matching the spcified hint are ranked higher in the response. + * Profiles not matching the hint aren't excluded from the response as long as the profile matches the `name` field query. */ + hint?: SecuritySuggestUserProfilesHint + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, size?: never, data?: never, hint?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, size?: never, data?: never, hint?: never } +} + +export interface SecuritySuggestUserProfilesResponse { + /** Metadata about the number of matching profiles. */ + total: SecuritySuggestUserProfilesTotalUserProfiles + /** The number of milliseconds it took Elasticsearch to run the request. */ + took: long + /** A list of profile documents, ordered by relevance, that match the search criteria. */ + profiles: SecurityUserProfile[] +} + +export interface SecuritySuggestUserProfilesTotalUserProfiles { + value: long + relation: RelationName +} + +export interface SecurityUpdateApiKeyRequest extends RequestBase { + /** The ID of the API key to update. */ + id: Id + /** The role descriptors to assign to this API key. + * The API key's effective permissions are an intersection of its assigned privileges and the point in time snapshot of permissions of the owner user. + * You can assign new privileges by specifying them in this parameter. + * To remove assigned privileges, you can supply an empty `role_descriptors` parameter, that is to say, an empty object `{}`. + * If an API key has no assigned privileges, it inherits the owner user's full permissions. + * The snapshot of the owner's permissions is always updated, whether you supply the `role_descriptors` parameter or not. + * The structure of a role descriptor is the same as the request for the create API keys API. */ + role_descriptors?: Record + /** Arbitrary metadata that you want to associate with the API key. + * It supports a nested data structure. + * Within the metadata object, keys beginning with `_` are reserved for system usage. + * When specified, this value fully replaces the metadata previously associated with the API key. */ + metadata?: Metadata + /** The expiration time for the API key. + * By default, API keys never expire. + * This property can be omitted to leave the expiration unchanged. */ + expiration?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, role_descriptors?: never, metadata?: never, expiration?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, role_descriptors?: never, metadata?: never, expiration?: never } +} + +export interface SecurityUpdateApiKeyResponse { + /** If `true`, the API key was updated. + * If `false`, the API key didn't change because no change was detected. */ + updated: boolean +} + +export interface SecurityUpdateCrossClusterApiKeyRequest extends RequestBase { + /** The ID of the cross-cluster API key to update. */ + id: Id + /** The access to be granted to this API key. + * The access is composed of permissions for cross cluster search and cross cluster replication. + * At least one of them must be specified. + * When specified, the new access assignment fully replaces the previously assigned access. */ + access: SecurityAccess + /** The expiration time for the API key. + * By default, API keys never expire. This property can be omitted to leave the value unchanged. */ + expiration?: Duration + /** Arbitrary metadata that you want to associate with the API key. + * It supports nested data structure. + * Within the metadata object, keys beginning with `_` are reserved for system usage. + * When specified, this information fully replaces metadata previously associated with the API key. */ + metadata?: Metadata + /** The certificate identity to associate with this API key. + * This field is used to restrict the API key to connections authenticated by a specific TLS certificate. + * The value should match the certificate's distinguished name (DN) pattern. + * When specified, this fully replaces any previously assigned certificate identity. + * To clear an existing certificate identity, explicitly set this field to `null`. + * When omitted, the existing certificate identity remains unchanged. */ + certificate_identity?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, access?: never, expiration?: never, metadata?: never, certificate_identity?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, access?: never, expiration?: never, metadata?: never, certificate_identity?: never } +} + +export interface SecurityUpdateCrossClusterApiKeyResponse { + /** If `true`, the API key was updated. + * If `false`, the API key didn’t change because no change was detected. */ + updated: boolean +} + +export interface SecurityUpdateSettingsRequest extends RequestBase { + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** Settings for the index used for most security configuration, including native realm users and roles configured with the API. */ + security?: SecuritySecuritySettings + /** Settings for the index used to store profile information. */ + 'security-profile'?: SecuritySecuritySettings + /** Settings for the index used to store tokens. */ + 'security-tokens'?: SecuritySecuritySettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never, security?: never, 'security-profile'?: never, 'security-tokens'?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never, timeout?: never, security?: never, 'security-profile'?: never, 'security-tokens'?: never } +} + +export interface SecurityUpdateSettingsResponse { + acknowledged: boolean +} + +export interface SecurityUpdateUserProfileDataRequest extends RequestBase { + /** A unique identifier for the user profile. */ + uid: SecurityUserProfileId + /** Only perform the operation if the document has this sequence number. */ + if_seq_no?: SequenceNumber + /** Only perform the operation if the document has this primary term. */ + if_primary_term?: long + /** If 'true', Elasticsearch refreshes the affected shards to make this operation + * visible to search. + * If 'wait_for', it waits for a refresh to make this operation visible to search. + * If 'false', nothing is done with refreshes. */ + refresh?: Refresh + /** Searchable data that you want to associate with the user profile. + * This field supports a nested data structure. + * Within the labels object, top-level keys cannot begin with an underscore (`_`) or contain a period (`.`). */ + labels?: Record + /** Non-searchable data that you want to associate with the user profile. + * This field supports a nested data structure. + * Within the `data` object, top-level keys cannot begin with an underscore (`_`) or contain a period (`.`). + * The data object is not searchable, but can be retrieved with the get user profile API. */ + data?: Record + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { uid?: never, if_seq_no?: never, if_primary_term?: never, refresh?: never, labels?: never, data?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { uid?: never, if_seq_no?: never, if_primary_term?: never, refresh?: never, labels?: never, data?: never } +} + +export type SecurityUpdateUserProfileDataResponse = AcknowledgedResponseBase + +export type ShutdownType = 'restart' | 'remove' | 'replace' + +export interface ShutdownDeleteNodeRequest extends RequestBase { + /** The node id of node to be removed from the shutdown state */ + node_id: NodeId + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { node_id?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { node_id?: never, master_timeout?: never, timeout?: never } +} + +export type ShutdownDeleteNodeResponse = AcknowledgedResponseBase + +export interface ShutdownGetNodeNodeShutdownStatus { + node_id: NodeId + type: ShutdownGetNodeShutdownType + reason: string + shutdown_startedmillis: EpochTime + status: ShutdownGetNodeShutdownStatus + shard_migration: ShutdownGetNodeShardMigrationStatus + persistent_tasks: ShutdownGetNodePersistentTaskStatus + plugins: ShutdownGetNodePluginsStatus +} + +export interface ShutdownGetNodePersistentTaskStatus { + status: ShutdownGetNodeShutdownStatus +} + +export interface ShutdownGetNodePluginsStatus { + status: ShutdownGetNodeShutdownStatus +} + +export interface ShutdownGetNodeRequest extends RequestBase { + /** Which node for which to retrieve the shutdown status */ + node_id?: NodeIds + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { node_id?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { node_id?: never, master_timeout?: never } +} + +export interface ShutdownGetNodeResponse { + nodes: ShutdownGetNodeNodeShutdownStatus[] +} + +export interface ShutdownGetNodeShardMigrationStatus { + status: ShutdownGetNodeShutdownStatus +} + +export type ShutdownGetNodeShutdownStatus = 'not_started' | 'in_progress' | 'stalled' | 'complete' + +export type ShutdownGetNodeShutdownType = 'remove' | 'restart' + +export interface ShutdownPutNodeRequest extends RequestBase { + /** The node identifier. + * This parameter is not validated against the cluster's active nodes. + * This enables you to register a node for shut down while it is offline. + * No error is thrown if you specify an invalid node ID. */ + node_id: NodeId + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** Valid values are restart, remove, or replace. + * Use restart when you need to temporarily shut down a node to perform an upgrade, make configuration changes, or perform other maintenance. + * Because the node is expected to rejoin the cluster, data is not migrated off of the node. + * Use remove when you need to permanently remove a node from the cluster. + * The node is not marked ready for shutdown until data is migrated off of the node Use replace to do a 1:1 replacement of a node with another node. + * Certain allocation decisions will be ignored (such as disk watermarks) in the interest of true replacement of the source node with the target node. + * During a replace-type shutdown, rollover and index creation may result in unassigned shards, and shrink may fail until the replacement is complete. */ + type: ShutdownType + /** A human-readable reason that the node is being shut down. + * This field provides information for other cluster operators; it does not affect the shut down process. */ + reason: string + /** Only valid if type is restart. + * Controls how long Elasticsearch will wait for the node to restart and join the cluster before reassigning its shards to other nodes. + * This works the same as delaying allocation with the index.unassigned.node_left.delayed_timeout setting. + * If you specify both a restart allocation delay and an index-level allocation delay, the longer of the two is used. */ + allocation_delay?: string + /** Only valid if type is replace. + * Specifies the name of the node that is replacing the node being shut down. + * Shards from the shut down node are only allowed to be allocated to the target node, and no other data will be allocated to the target node. + * During relocation of data certain allocation rules are ignored, such as disk watermarks or user attribute filtering rules. */ + target_node_name?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { node_id?: never, master_timeout?: never, timeout?: never, type?: never, reason?: never, allocation_delay?: never, target_node_name?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { node_id?: never, master_timeout?: never, timeout?: never, type?: never, reason?: never, allocation_delay?: never, target_node_name?: never } +} + +export type ShutdownPutNodeResponse = AcknowledgedResponseBase + +export interface SimulateIngestIngestDocumentSimulationKeys { + /** Identifier for the document. */ + _id: Id + /** Name of the index that the document would be indexed into if this were not a simulation. */ + _index: IndexName + /** JSON body for the document. */ + _source: Record + /** */ + _version: SpecUtilsStringified + /** A list of the names of the pipelines executed on this document. */ + executed_pipelines: string[] + /** A list of the fields that would be ignored at the indexing step. For example, a field whose + * value is larger than the allowed limit would make it through all of the pipelines, but + * would not be indexed into Elasticsearch. */ + ignored_fields?: Record[] + /** Any error resulting from simulatng ingest on this doc. This can be an error generated by + * executing a processor, or a mapping validation error when simulating indexing the resulting + * doc. */ + error?: ErrorCause + effective_mapping?: MappingTypeMapping +} +export type SimulateIngestIngestDocumentSimulation = SimulateIngestIngestDocumentSimulationKeys +& { [property: string]: string | Id | IndexName | Record | SpecUtilsStringified | string[] | Record[] | ErrorCause | MappingTypeMapping } + +export type SimulateIngestMergeType = 'index' | 'template' + +export interface SimulateIngestRequest extends RequestBase { + /** The index to simulate ingesting into. + * This value can be overridden by specifying an index on each document. + * If you specify this parameter in the request path, it is used for any documents that do not explicitly specify an index argument. */ + index?: IndexName + /** The pipeline to use as the default pipeline. + * This value can be used to override the default pipeline of the index. */ + pipeline?: PipelineName + /** The mapping merge type if mapping overrides are being provided in mapping_addition. + * The allowed values are one of index or template. + * The index option merges mappings the way they would be merged into an existing index. + * The template option merges mappings the way they would be merged into a template. */ + merge_type?: SimulateIngestMergeType + /** Sample documents to test in the pipeline. */ + docs: IngestDocument[] + /** A map of component template names to substitute component template definition objects. */ + component_template_substitutions?: Record + /** A map of index template names to substitute index template definition objects. */ + index_template_substitutions?: Record + mapping_addition?: MappingTypeMapping + /** Pipelines to test. + * If you don’t specify the `pipeline` request path parameter, this parameter is required. + * If you specify both this and the request path parameter, the API only uses the request path parameter. */ + pipeline_substitutions?: Record + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, pipeline?: never, merge_type?: never, docs?: never, component_template_substitutions?: never, index_template_substitutions?: never, mapping_addition?: never, pipeline_substitutions?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, pipeline?: never, merge_type?: never, docs?: never, component_template_substitutions?: never, index_template_substitutions?: never, mapping_addition?: never, pipeline_substitutions?: never } +} + +export interface SimulateIngestResponse { + docs: SimulateIngestSimulateIngestDocumentResult[] +} + +export interface SimulateIngestSimulateIngestDocumentResult { + doc?: SimulateIngestIngestDocumentSimulation +} + +export interface SlmConfiguration { + /** If false, the snapshot fails if any data stream or index in indices is missing or closed. If true, the snapshot ignores missing or closed data streams and indices. */ + ignore_unavailable?: boolean + /** A comma-separated list of data streams and indices to include in the snapshot. Multi-index syntax is supported. + * By default, a snapshot includes all data streams and indices in the cluster. If this argument is provided, the snapshot only includes the specified data streams and clusters. */ + indices?: Indices + /** If true, the current global state is included in the snapshot. */ + include_global_state?: boolean + /** A list of feature states to be included in this snapshot. A list of features available for inclusion in the snapshot and their descriptions be can be retrieved using the get features API. + * Each feature state includes one or more system indices containing data necessary for the function of that feature. Providing an empty array will include no feature states in the snapshot, regardless of the value of include_global_state. By default, all available feature states will be included in the snapshot if include_global_state is true, or no feature states if include_global_state is false. */ + feature_states?: string[] + /** Attaches arbitrary metadata to the snapshot, such as a record of who took the snapshot, why it was taken, or any other useful data. Metadata must be less than 1024 bytes. */ + metadata?: Metadata + /** If false, the entire snapshot will fail if one or more indices included in the snapshot do not have all primary shards available. */ + partial?: boolean +} + +export interface SlmInProgress { + name: Name + start_time_millis: EpochTime + state: string + uuid: Uuid +} + +export interface SlmInvocation { + snapshot_name: Name + time: DateTime +} + +export interface SlmPolicy { + config?: SlmConfiguration + name: Name + repository: string + retention?: SlmRetention + schedule: WatcherCronExpression +} + +export interface SlmRetention { + /** Time period after which a snapshot is considered expired and eligible for deletion. SLM deletes expired snapshots based on the slm.retention_schedule. */ + expire_after: Duration + /** Maximum number of snapshots to retain, even if the snapshots have not yet expired. If the number of snapshots in the repository exceeds this limit, the policy retains the most recent snapshots and deletes older snapshots. */ + max_count: integer + /** Minimum number of snapshots to retain, even if the snapshots have expired. */ + min_count: integer +} + +export interface SlmSnapshotLifecycle { + in_progress?: SlmInProgress + last_failure?: SlmInvocation + last_success?: SlmInvocation + /** The last time the policy was modified. */ + modified_date?: DateTime + modified_date_millis: EpochTime + /** The next time the policy will run. */ + next_execution?: DateTime + next_execution_millis: EpochTime + policy: SlmPolicy + /** The version of the snapshot policy. + * Only the latest version is stored and incremented when the policy is updated. */ + version: VersionNumber + stats: SlmStatistics +} + +export interface SlmSnapshotPolicyStats { + policy: string + snapshots_taken: long + snapshots_failed: long + snapshots_deleted: long + snapshot_deletion_failures: long +} + +export interface SlmStatistics { + retention_deletion_time?: Duration + retention_deletion_time_millis?: DurationValue + retention_failed?: long + retention_runs?: long + retention_timed_out?: long + policy?: Id + total_snapshots_deleted?: long + /** @alias total_snapshots_deleted */ + snapshots_deleted?: long + total_snapshot_deletion_failures?: long + /** @alias total_snapshot_deletion_failures */ + snapshot_deletion_failures?: long + total_snapshots_failed?: long + /** @alias total_snapshots_failed */ + snapshots_failed?: long + total_snapshots_taken?: long + /** @alias total_snapshots_taken */ + snapshots_taken?: long +} + +export interface SlmDeleteLifecycleRequest extends RequestBase { + /** The id of the snapshot lifecycle policy to remove */ + policy_id: Name + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { policy_id?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { policy_id?: never, master_timeout?: never, timeout?: never } +} + +export type SlmDeleteLifecycleResponse = AcknowledgedResponseBase + +export interface SlmExecuteLifecycleRequest extends RequestBase { + /** The id of the snapshot lifecycle policy to be executed */ + policy_id: Name + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { policy_id?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { policy_id?: never, master_timeout?: never, timeout?: never } +} + +export interface SlmExecuteLifecycleResponse { + snapshot_name: Name +} + +export interface SlmExecuteRetentionRequest extends RequestBase { + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never, timeout?: never } +} + +export type SlmExecuteRetentionResponse = AcknowledgedResponseBase + +export interface SlmGetLifecycleRequest extends RequestBase { + /** Comma-separated list of snapshot lifecycle policies to retrieve */ + policy_id?: Names + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { policy_id?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { policy_id?: never, master_timeout?: never, timeout?: never } +} + +export type SlmGetLifecycleResponse = Record + +export interface SlmGetStatsRequest extends RequestBase { + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never, timeout?: never } +} + +export interface SlmGetStatsResponse { + retention_deletion_time: Duration + retention_deletion_time_millis: DurationValue + retention_failed: long + retention_runs: long + retention_timed_out: long + total_snapshots_deleted: long + total_snapshot_deletion_failures: long + total_snapshots_failed: long + total_snapshots_taken: long + policy_stats: SlmSnapshotPolicyStats[] +} + +export interface SlmGetStatusRequest extends RequestBase { + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ + master_timeout?: Duration + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never, timeout?: never } +} + +export interface SlmGetStatusResponse { + operation_mode: LifecycleOperationMode +} + +export interface SlmPutLifecycleRequest extends RequestBase { + /** The identifier for the snapshot lifecycle policy you want to create or update. */ + policy_id: Name + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ + master_timeout?: Duration + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ + timeout?: Duration + /** Configuration for each snapshot created by the policy. */ + config?: SlmConfiguration + /** Name automatically assigned to each snapshot created by the policy. Date math is supported. To prevent conflicting snapshot names, a UUID is automatically appended to each snapshot name. */ + name?: Name + /** Repository used to store snapshots created by this policy. This repository must exist prior to the policy’s creation. You can create a repository using the snapshot repository API. */ + repository?: string + /** Retention rules used to retain and delete snapshots created by the policy. */ + retention?: SlmRetention + /** Periodic or absolute schedule at which the policy creates snapshots. SLM applies schedule changes immediately. */ + schedule?: WatcherCronExpression + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { policy_id?: never, master_timeout?: never, timeout?: never, config?: never, name?: never, repository?: never, retention?: never, schedule?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { policy_id?: never, master_timeout?: never, timeout?: never, config?: never, name?: never, repository?: never, retention?: never, schedule?: never } +} + +export type SlmPutLifecycleResponse = AcknowledgedResponseBase + +export interface SlmStartRequest extends RequestBase { + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ + master_timeout?: Duration + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never, timeout?: never } +} + +export type SlmStartResponse = AcknowledgedResponseBase + +export interface SlmStopRequest extends RequestBase { + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ + master_timeout?: Duration + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never, timeout?: never } +} + +export type SlmStopResponse = AcknowledgedResponseBase + +export interface SnapshotAzureRepository extends SnapshotRepositoryBase { + /** The Azure repository type. */ + type: 'azure' + /** The repository settings. */ + settings?: SnapshotAzureRepositorySettings +} + +export interface SnapshotAzureRepositorySettings extends SnapshotRepositorySettingsBase { + /** The path to the repository data within the container. + * It defaults to the root directory. + * + * NOTE: Don't set `base_path` when configuring a snapshot repository for Elastic Cloud Enterprise. + * Elastic Cloud Enterprise automatically generates the `base_path` for each deployment so that multiple deployments can share the same bucket. */ + base_path?: string + /** The name of the Azure repository client to use. */ + client?: string + /** The Azure container. */ + container?: string + /** The maxmimum batch size, between 1 and 256, used for `BlobBatch` requests. + * Defaults to 256 which is the maximum number supported by the Azure blob batch API. */ + delete_objects_max_size?: integer + /** Either `primary_only` or `secondary_only`. + * Note that if you set it to `secondary_only`, it will force `readonly` to `true`. */ + location_mode?: string + /** The maximum number of concurrent batch delete requests that will be submitted for any individual bulk delete with `BlobBatch`. + * Note that the effective number of concurrent deletes is further limited by the Azure client connection and event loop thread limits. + * Defaults to 10, minimum is 1, maximum is 100. */ + max_concurrent_batch_deletes?: integer + /** If `true`, the repository is read-only. + * The cluster can retrieve and restore snapshots from the repository but not write to the repository or create snapshots in it. + * + * Only a cluster with write access can create snapshots in the repository. + * All other clusters connected to the repository should have the `readonly` parameter set to `true`. + * If `false`, the cluster can write to the repository and create snapshots in it. + * + * IMPORTANT: If you register the same snapshot repository with multiple clusters, only one cluster should have write access to the repository. + * Having multiple clusters write to the repository at the same time risks corrupting the contents of the repository. */ + readonly?: boolean +} + +export interface SnapshotFileCountSnapshotStats { + file_count: integer + size_in_bytes: long +} + +export interface SnapshotGcsRepository extends SnapshotRepositoryBase { + /** The Google Cloud Storage repository type. */ + type: 'gcs' + /** The repository settings. */ + settings: SnapshotGcsRepositorySettings +} + +export interface SnapshotGcsRepositorySettings extends SnapshotRepositorySettingsBase { + /** The name of the bucket to be used for snapshots. */ + bucket: string + /** The name used by the client when it uses the Google Cloud Storage service. */ + application_name?: string + /** The path to the repository data within the bucket. + * It defaults to the root of the bucket. + * + * NOTE: Don't set `base_path` when configuring a snapshot repository for Elastic Cloud Enterprise. + * Elastic Cloud Enterprise automatically generates the `base_path` for each deployment so that multiple deployments can share the same bucket. */ + base_path?: string + /** The name of the client to use to connect to Google Cloud Storage. */ + client?: string + /** If `true`, the repository is read-only. + * The cluster can retrieve and restore snapshots from the repository but not write to the repository or create snapshots in it. + * + * Only a cluster with write access can create snapshots in the repository. + * All other clusters connected to the repository should have the `readonly` parameter set to `true`. + * + * If `false`, the cluster can write to the repository and create snapshots in it. + * + * IMPORTANT: If you register the same snapshot repository with multiple clusters, only one cluster should have write access to the repository. + * Having multiple clusters write to the repository at the same time risks corrupting the contents of the repository. */ + readonly?: boolean +} + +export interface SnapshotIndexDetails { + shard_count: integer + size?: ByteSize + size_in_bytes: long + max_segments_per_shard: long +} + +export interface SnapshotInfoFeatureState { + feature_name: string + indices: Indices +} + +export interface SnapshotReadOnlyUrlRepository extends SnapshotRepositoryBase { + /** The read-only URL repository type. */ + type: 'url' + /** The repository settings. */ + settings: SnapshotReadOnlyUrlRepositorySettings +} + +export interface SnapshotReadOnlyUrlRepositorySettings extends SnapshotRepositorySettingsBase { + /** The maximum number of retries for HTTP and HTTPS URLs. */ + http_max_retries?: integer + /** The maximum wait time for data transfers over a connection. */ + http_socket_timeout?: Duration + /** The maximum number of snapshots the repository can contain. + * The default is `Integer.MAX_VALUE`, which is 2^31-1 or `2147483647`. */ + max_number_of_snapshots?: integer + /** The URL location of the root of the shared filesystem repository. + * The following protocols are supported: + * + * * `file` + * * `ftp` + * * `http` + * * `https` + * * `jar` + * + * URLs using the HTTP, HTTPS, or FTP protocols must be explicitly allowed with the `repositories.url.allowed_urls` cluster setting. + * This setting supports wildcards in the place of a host, path, query, or fragment in the URL. + * + * URLs using the file protocol must point to the location of a shared filesystem accessible to all master and data nodes in the cluster. + * This location must be registered in the `path.repo` setting. + * You don't need to register URLs using the FTP, HTTP, HTTPS, or JAR protocols in the `path.repo` setting. */ + url: string +} + +export type SnapshotRepository = SnapshotAzureRepository | SnapshotGcsRepository | SnapshotS3Repository | SnapshotSharedFileSystemRepository | SnapshotReadOnlyUrlRepository | SnapshotSourceOnlyRepository + +export interface SnapshotRepositoryBase { + uuid?: Uuid +} + +export interface SnapshotRepositorySettingsBase { + /** Big files can be broken down into multiple smaller blobs in the blob store during snapshotting. + * It is not recommended to change this value from its default unless there is an explicit reason for limiting the size of blobs in the repository. + * Setting a value lower than the default can result in an increased number of API calls to the blob store during snapshot create and restore operations compared to using the default value and thus make both operations slower and more costly. + * Specify the chunk size as a byte unit, for example: `10MB`, `5KB`, 500B. + * The default varies by repository type. */ + chunk_size?: ByteSize + /** When set to `true`, metadata files are stored in compressed format. + * This setting doesn't affect index files that are already compressed by default. */ + compress?: boolean + /** The maximum snapshot restore rate per node. + * It defaults to unlimited. + * Note that restores are also throttled through recovery settings. */ + max_restore_bytes_per_sec?: ByteSize + /** The maximum snapshot creation rate per node. + * It defaults to 40mb per second. + * Note that if the recovery settings for managed services are set, then it defaults to unlimited, and the rate is additionally throttled through recovery settings. */ + max_snapshot_bytes_per_sec?: ByteSize +} + +export interface SnapshotS3Repository extends SnapshotRepositoryBase { + /** The S3 repository type. */ + type: 's3' + /** The repository settings. + * + * NOTE: In addition to the specified settings, you can also use all non-secure client settings in the repository settings. + * In this case, the client settings found in the repository settings will be merged with those of the named client used by the repository. + * Conflicts between client and repository settings are resolved by the repository settings taking precedence over client settings. */ + settings: SnapshotS3RepositorySettings +} + +export interface SnapshotS3RepositorySettings extends SnapshotRepositorySettingsBase { + /** The name of the S3 bucket to use for snapshots. + * The bucket name must adhere to Amazon's S3 bucket naming rules. */ + bucket: string + /** The path to the repository data within its bucket. + * It defaults to an empty string, meaning that the repository is at the root of the bucket. + * The value of this setting should not start or end with a forward slash (`/`). + * + * NOTE: Don't set base_path when configuring a snapshot repository for Elastic Cloud Enterprise. + * Elastic Cloud Enterprise automatically generates the `base_path` for each deployment so that multiple deployments may share the same bucket. */ + base_path?: string + /** The minimum threshold below which the chunk is uploaded using a single request. + * Beyond this threshold, the S3 repository will use the AWS Multipart Upload API to split the chunk into several parts, each of `buffer_size` length, and to upload each part in its own request. + * Note that setting a buffer size lower than 5mb is not allowed since it will prevent the use of the Multipart API and may result in upload errors. + * It is also not possible to set a buffer size greater than 5gb as it is the maximum upload size allowed by S3. + * Defaults to `100mb` or 5% of JVM heap, whichever is smaller. */ + buffer_size?: ByteSize + /** The S3 repository supports all S3 canned ACLs: `private`, `public-read`, `public-read-write`, `authenticated-read`, `log-delivery-write`, `bucket-owner-read`, `bucket-owner-full-control`. + * You could specify a canned ACL using the `canned_acl` setting. + * When the S3 repository creates buckets and objects, it adds the canned ACL into the buckets and objects. */ + canned_acl?: string + /** The name of the S3 client to use to connect to S3. */ + client?: string + /** The maxmimum batch size, between 1 and 1000, used for `DeleteObjects` requests. + * Defaults to 1000 which is the maximum number supported by the AWS DeleteObjects API. */ + delete_objects_max_size?: integer + /** The time to wait before trying again if an attempt to read a linearizable register fails. */ + get_register_retry_delay?: Duration + /** The maximum number of parts that Elasticsearch will write during a multipart upload of a single object. + * Files which are larger than `buffer_size × max_multipart_parts` will be chunked into several smaller objects. + * Elasticsearch may also split a file across multiple objects to satisfy other constraints such as the `chunk_size` limit. + * Defaults to `10000` which is the maximum number of parts in a multipart upload in AWS S3. */ + max_multipart_parts?: integer + /** The maximum number of possibly-dangling multipart uploads to clean up in each batch of snapshot deletions. + * Defaults to 1000 which is the maximum number supported by the AWS ListMultipartUploads API. + * If set to `0`, Elasticsearch will not attempt to clean up dangling multipart uploads. */ + max_multipart_upload_cleanup_size?: integer + /** If true, the repository is read-only. + * The cluster can retrieve and restore snapshots from the repository but not write to the repository or create snapshots in it. + * + * Only a cluster with write access can create snapshots in the repository. + * All other clusters connected to the repository should have the `readonly` parameter set to `true`. + * + * If `false`, the cluster can write to the repository and create snapshots in it. + * + * IMPORTANT: If you register the same snapshot repository with multiple clusters, only one cluster should have write access to the repository. + * Having multiple clusters write to the repository at the same time risks corrupting the contents of the repository. */ + readonly?: boolean + /** When set to `true`, files are encrypted on server side using an AES256 algorithm. */ + server_side_encryption?: boolean + /** The S3 storage class for objects written to the repository. + * Values may be `standard`, `reduced_redundancy`, `standard_ia`, `onezone_ia`, and `intelligent_tiering`. */ + storage_class?: string + /** The delay before the first retry and the amount the delay is incremented by on each subsequent retry. + * The default is 50ms and the minimum is 0ms. */ + 'throttled_delete_retry.delay_increment'?: Duration + /** The upper bound on how long the delays between retries will grow to. + * The default is 5s and the minimum is 0ms. */ + 'throttled_delete_retry.maximum_delay'?: Duration + /** The number times to retry a throttled snapshot deletion. + * The default is 10 and the minimum value is 0 which will disable retries altogether. + * Note that if retries are enabled in the Azure client, each of these retries comprises that many client-level retries. */ + 'throttled_delete_retry.maximum_number_of_retries'?: integer +} + +export interface SnapshotShardsStats { + /** The number of shards that initialized, started, and finalized successfully. */ + done: long + /** The number of shards that failed to be included in the snapshot. */ + failed: long + /** The number of shards that are finalizing but are not done. */ + finalizing: long + /** The number of shards that are still initializing. */ + initializing: long + /** The number of shards that have started but are not finalized. */ + started: long + /** The total number of shards included in the snapshot. */ + total: long +} + +export type SnapshotShardsStatsStage = 'DONE' | 'FAILURE' | 'FINALIZE' | 'INIT' | 'STARTED' + +export interface SnapshotShardsStatsSummary { + incremental: SnapshotShardsStatsSummaryItem + total: SnapshotShardsStatsSummaryItem + start_time_in_millis: EpochTime + time?: Duration + time_in_millis: DurationValue +} + +export interface SnapshotShardsStatsSummaryItem { + file_count: long + size_in_bytes: long +} + +export interface SnapshotSharedFileSystemRepository extends SnapshotRepositoryBase { + /** The shared file system repository type. */ + type: 'fs' + /** The repository settings. */ + settings: SnapshotSharedFileSystemRepositorySettings +} + +export interface SnapshotSharedFileSystemRepositorySettings extends SnapshotRepositorySettingsBase { + /** The location of the shared filesystem used to store and retrieve snapshots. + * This location must be registered in the `path.repo` setting on all master and data nodes in the cluster. + * Unlike `path.repo`, this setting supports only a single file path. */ + location: string + /** The maximum number of snapshots the repository can contain. + * The default is `Integer.MAX_VALUE`, which is 2^31-1 or `2147483647`. */ + max_number_of_snapshots?: integer + /** If `true`, the repository is read-only. + * The cluster can retrieve and restore snapshots from the repository but not write to the repository or create snapshots in it. + * + * Only a cluster with write access can create snapshots in the repository. + * All other clusters connected to the repository should have the `readonly` parameter set to `true`. + * + * If `false`, the cluster can write to the repository and create snapshots in it. + * + * IMPORTANT: If you register the same snapshot repository with multiple clusters, only one cluster should have write access to the repository. + * Having multiple clusters write to the repository at the same time risks corrupting the contents of the repository. */ + readonly?: boolean +} + +export interface SnapshotSnapshotIndexStats { + shards: Record + shards_stats: SnapshotShardsStats + stats: SnapshotSnapshotStats +} + +export interface SnapshotSnapshotInfo { + data_streams: string[] + duration?: Duration + duration_in_millis?: DurationValue + end_time?: DateTime + end_time_in_millis?: EpochTime + failures?: SnapshotSnapshotShardFailure[] + include_global_state?: boolean + indices?: IndexName[] + index_details?: Record + metadata?: Metadata + reason?: string + repository?: Name + snapshot: Name + shards?: ShardStatistics + start_time?: DateTime + start_time_in_millis?: EpochTime + state?: string + uuid: Uuid + version?: VersionString + version_id?: VersionNumber + feature_states?: SnapshotInfoFeatureState[] +} + +export interface SnapshotSnapshotShardFailure { + index: IndexName + node_id?: Id + reason: string + shard_id: integer + index_uuid: Id + status: string +} + +export interface SnapshotSnapshotShardsStatus { + stage: SnapshotShardsStatsStage + stats: SnapshotShardsStatsSummary +} + +export type SnapshotSnapshotSort = 'start_time' | 'duration' | 'name' | 'index_count' | 'repository' | 'shard_count' | 'failed_shard_count' + +export type SnapshotSnapshotState = 'IN_PROGRESS' | 'SUCCESS' | 'FAILED' | 'PARTIAL' | 'INCOMPATIBLE' + +export interface SnapshotSnapshotStats { + /** The number and size of files that still need to be copied as part of the incremental snapshot. + * For completed snapshots, this property indicates the number and size of files that were not already in the repository and were copied as part of the incremental snapshot. */ + incremental: SnapshotFileCountSnapshotStats + /** The time, in milliseconds, when the snapshot creation process started. */ + start_time_in_millis: EpochTime + time?: Duration + /** The total time, in milliseconds, that it took for the snapshot process to complete. */ + time_in_millis: DurationValue + /** The total number and size of files that are referenced by the snapshot. */ + total: SnapshotFileCountSnapshotStats +} + +export interface SnapshotSourceOnlyRepository extends SnapshotRepositoryBase { + /** The source-only repository type. */ + type: 'source' + /** The repository settings. */ + settings: SnapshotSourceOnlyRepositorySettings +} + +export interface SnapshotSourceOnlyRepositorySettings extends SnapshotRepositorySettingsBase { + /** The delegated repository type. For valid values, refer to the `type` parameter. + * Source repositories can use `settings` properties for its delegated repository type. */ + delegate_type?: string + /** The maximum number of snapshots the repository can contain. + * The default is `Integer.MAX_VALUE`, which is 2^31-1 or `2147483647`. */ + max_number_of_snapshots?: integer + /** If `true`, the repository is read-only. + * The cluster can retrieve and restore snapshots from the repository but not write to the repository or create snapshots in it. + * + * Only a cluster with write access can create snapshots in the repository. + * All other clusters connected to the repository should have the `readonly` parameter set to `true`. + * + * If `false`, the cluster can write to the repository and create snapshots in it. + * + * IMPORTANT: If you register the same snapshot repository with multiple clusters, only one cluster should have write access to the repository. + * Having multiple clusters write to the repository at the same time risks corrupting the contents of the repository. */ + read_only?: boolean + /** If `true`, the repository is read-only. + * The cluster can retrieve and restore snapshots from the repository but not write to the repository or create snapshots in it. + * + * Only a cluster with write access can create snapshots in the repository. + * All other clusters connected to the repository should have the `readonly` parameter set to `true`. + * + * If `false`, the cluster can write to the repository and create snapshots in it. + * + * IMPORTANT: If you register the same snapshot repository with multiple clusters, only one cluster should have write access to the repository. + * Having multiple clusters write to the repository at the same time risks corrupting the contents of the repository. + * @alias read_only */ + readonly?: boolean +} + +export interface SnapshotStatus { + /** Indicates whether the current cluster state is included in the snapshot. */ + include_global_state: boolean + indices: Record + /** The name of the repository that includes the snapshot. */ + repository: string + /** Statistics for the shards in the snapshot. */ + shards_stats: SnapshotShardsStats + /** The name of the snapshot. */ + snapshot: string + /** The current snapshot state: + * + * * `FAILED`: The snapshot finished with an error and failed to store any data. + * * `STARTED`: The snapshot is currently running. + * * `SUCCESS`: The snapshot completed. */ + state: string + /** Details about the number (`file_count`) and size (`size_in_bytes`) of files included in the snapshot. */ + stats: SnapshotSnapshotStats + /** The universally unique identifier (UUID) for the snapshot. */ + uuid: Uuid +} + +export interface SnapshotCleanupRepositoryCleanupRepositoryResults { + /** The number of binary large objects (blobs) removed from the snapshot repository during cleanup operations. + * A non-zero value indicates that unreferenced blobs were found and subsequently cleaned up. */ + deleted_blobs: long + /** The number of bytes freed by cleanup operations. */ + deleted_bytes: long +} + +export interface SnapshotCleanupRepositoryRequest extends RequestBase { + /** The name of the snapshot repository to clean up. */ + name: Name + /** The period to wait for a connection to the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1` */ + master_timeout?: Duration + /** The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. + * If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. + * To indicate that the request should never timeout, set it to `-1`. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } +} + +export interface SnapshotCleanupRepositoryResponse { + /** Statistics for cleanup operations. */ + results: SnapshotCleanupRepositoryCleanupRepositoryResults +} + +export interface SnapshotCloneRequest extends RequestBase { + /** The name of the snapshot repository that both source and target snapshot belong to. */ + repository: Name + /** The source snapshot name. */ + snapshot: Name + /** The target snapshot name. */ + target_snapshot: Name + /** The period to wait for the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ + master_timeout?: Duration + /** A comma-separated list of indices to include in the snapshot. + * Multi-target syntax is supported. */ + indices: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { repository?: never, snapshot?: never, target_snapshot?: never, master_timeout?: never, indices?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { repository?: never, snapshot?: never, target_snapshot?: never, master_timeout?: never, indices?: never } +} + +export type SnapshotCloneResponse = AcknowledgedResponseBase + +export interface SnapshotCreateRequest extends RequestBase { + /** The name of the repository for the snapshot. */ + repository: Name + /** The name of the snapshot. + * It supportes date math. + * It must be unique in the repository. */ + snapshot: Name + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** If `true`, the request returns a response when the snapshot is complete. + * If `false`, the request returns a response when the snapshot initializes. */ + wait_for_completion?: boolean + /** Determines how wildcard patterns in the `indices` parameter match data streams and indices. + * It supports comma-separated values such as `open,hidden`. */ + expand_wildcards?: ExpandWildcards + /** The feature states to include in the snapshot. + * Each feature state includes one or more system indices containing related data. + * You can view a list of eligible features using the get features API. + * + * If `include_global_state` is `true`, all current feature states are included by default. + * If `include_global_state` is `false`, no feature states are included by default. + * + * Note that specifying an empty array will result in the default behavior. + * To exclude all feature states, regardless of the `include_global_state` value, specify an array with only the value `none` (`["none"]`). */ + feature_states?: string[] + /** If `true`, the request ignores data streams and indices in `indices` that are missing or closed. + * If `false`, the request returns an error for any data stream or index that is missing or closed. */ + ignore_unavailable?: boolean + /** If `true`, the current cluster state is included in the snapshot. + * The cluster state includes persistent cluster settings, composable index templates, legacy index templates, ingest pipelines, and ILM policies. + * It also includes data stored in system indices, such as Watches and task records (configurable via `feature_states`). */ + include_global_state?: boolean + /** A comma-separated list of data streams and indices to include in the snapshot. + * It supports a multi-target syntax. + * The default is an empty array (`[]`), which includes all regular data streams and regular indices. + * To exclude all data streams and indices, use `-*`. + * + * You can't use this parameter to include or exclude system indices or system data streams from a snapshot. + * Use `feature_states` instead. */ + indices?: Indices + /** Arbitrary metadata to the snapshot, such as a record of who took the snapshot, why it was taken, or any other useful data. + * It can have any contents but it must be less than 1024 bytes. + * This information is not automatically generated by Elasticsearch. */ + metadata?: Metadata + /** If `true`, it enables you to restore a partial snapshot of indices with unavailable shards. + * Only shards that were successfully included in the snapshot will be restored. + * All missing shards will be recreated as empty. + * + * If `false`, the entire restore operation will fail if one or more indices included in the snapshot do not have all primary shards available. */ + partial?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { repository?: never, snapshot?: never, master_timeout?: never, wait_for_completion?: never, expand_wildcards?: never, feature_states?: never, ignore_unavailable?: never, include_global_state?: never, indices?: never, metadata?: never, partial?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { repository?: never, snapshot?: never, master_timeout?: never, wait_for_completion?: never, expand_wildcards?: never, feature_states?: never, ignore_unavailable?: never, include_global_state?: never, indices?: never, metadata?: never, partial?: never } +} + +export interface SnapshotCreateResponse { + /** Equals `true` if the snapshot was accepted. Present when the request had `wait_for_completion` set to `false` */ + accepted?: boolean + /** Snapshot information. Present when the request had `wait_for_completion` set to `true` */ + snapshot?: SnapshotSnapshotInfo +} + +export interface SnapshotCreateRepositoryRequest extends RequestBase { + /** The name of the snapshot repository to register or update. */ + name: Name + /** The period to wait for the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ + master_timeout?: Duration + /** The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. + * If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. + * To indicate that the request should never timeout, set it to `-1`. */ + timeout?: Duration + /** If `true`, the request verifies the repository is functional on all master and data nodes in the cluster. + * If `false`, this verification is skipped. + * You can also perform this verification with the verify snapshot repository API. */ + verify?: boolean + repository?: SnapshotRepository + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never, verify?: never, repository?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never, verify?: never, repository?: never } +} + +export type SnapshotCreateRepositoryResponse = AcknowledgedResponseBase + +export interface SnapshotDeleteRequest extends RequestBase { + /** The name of the repository to delete a snapshot from. */ + repository: Name + /** A comma-separated list of snapshot names to delete. + * It also accepts wildcards (`*`). */ + snapshot: Names + /** The period to wait for the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ + master_timeout?: Duration + /** If `true`, the request returns a response when the matching snapshots are all deleted. + * If `false`, the request returns a response as soon as the deletes are scheduled. */ + wait_for_completion?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { repository?: never, snapshot?: never, master_timeout?: never, wait_for_completion?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { repository?: never, snapshot?: never, master_timeout?: never, wait_for_completion?: never } +} + +export type SnapshotDeleteResponse = AcknowledgedResponseBase + +export interface SnapshotDeleteRepositoryRequest extends RequestBase { + /** The ame of the snapshot repositories to unregister. + * Wildcard (`*`) patterns are supported. */ + name: Names + /** The period to wait for the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ + master_timeout?: Duration + /** The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. + * If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. + * To indicate that the request should never timeout, set it to `-1`. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } +} + +export type SnapshotDeleteRepositoryResponse = AcknowledgedResponseBase + +export interface SnapshotGetRequest extends RequestBase { + /** A comma-separated list of snapshot repository names used to limit the request. + * Wildcard (`*`) expressions are supported. */ + repository: Name + /** A comma-separated list of snapshot names to retrieve + * Wildcards (`*`) are supported. + * + * * To get information about all snapshots in a registered repository, use a wildcard (`*`) or `_all`. + * * To get information about any snapshots that are currently running, use `_current`. */ + snapshot: Names + /** An offset identifier to start pagination from as returned by the next field in the response body. */ + after?: string + /** The value of the current sort column at which to start retrieval. + * It can be a string `snapshot-` or a repository name when sorting by snapshot or repository name. + * It can be a millisecond time value or a number when sorting by `index-` or shard count. */ + from_sort_value?: string + /** If `false`, the request returns an error for any snapshots that are unavailable. */ + ignore_unavailable?: boolean + /** If `true`, the response includes additional information about each index in the snapshot comprising the number of shards in the index, the total size of the index in bytes, and the maximum number of segments per shard in the index. + * The default is `false`, meaning that this information is omitted. */ + index_details?: boolean + /** If `true`, the response includes the name of each index in each snapshot. */ + index_names?: boolean + /** If `true`, the response includes the repository name in each snapshot. */ + include_repository?: boolean + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** The sort order. + * Valid values are `asc` for ascending and `desc` for descending order. + * The default behavior is ascending order. */ + order?: SortOrder + /** Numeric offset to start pagination from based on the snapshots matching this request. Using a non-zero value for this parameter is mutually exclusive with using the after parameter. Defaults to 0. */ + offset?: integer + /** The maximum number of snapshots to return. + * The default is 0, which means to return all that match the request without limit. */ + size?: integer + /** Filter snapshots by a comma-separated list of snapshot lifecycle management (SLM) policy names that snapshots belong to. + * + * You can use wildcards (`*`) and combinations of wildcards followed by exclude patterns starting with `-`. + * For example, the pattern `*,-policy-a-\*` will return all snapshots except for those that were created by an SLM policy with a name starting with `policy-a-`. + * Note that the wildcard pattern `*` matches all snapshots created by an SLM policy but not those snapshots that were not created by an SLM policy. + * To include snapshots that were not created by an SLM policy, you can use the special pattern `_none` that will match all snapshots without an SLM policy. */ + slm_policy_filter?: Name + /** The sort order for the result. + * The default behavior is sorting by snapshot start time stamp. */ + sort?: SnapshotSnapshotSort + /** Only return snapshots with a state found in the given comma-separated list of snapshot states. + * The default is all snapshot states. */ + state?: SnapshotSnapshotState | SnapshotSnapshotState[] + /** If `true`, returns additional information about each snapshot such as the version of Elasticsearch which took the snapshot, the start and end times of the snapshot, and the number of shards snapshotted. + * + * NOTE: The parameters `size`, `order`, `after`, `from_sort_value`, `offset`, `slm_policy_filter`, and `sort` are not supported when you set `verbose=false` and the sort order for requests with `verbose=false` is undefined. */ + verbose?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { repository?: never, snapshot?: never, after?: never, from_sort_value?: never, ignore_unavailable?: never, index_details?: never, index_names?: never, include_repository?: never, master_timeout?: never, order?: never, offset?: never, size?: never, slm_policy_filter?: never, sort?: never, state?: never, verbose?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { repository?: never, snapshot?: never, after?: never, from_sort_value?: never, ignore_unavailable?: never, index_details?: never, index_names?: never, include_repository?: never, master_timeout?: never, order?: never, offset?: never, size?: never, slm_policy_filter?: never, sort?: never, state?: never, verbose?: never } +} + +export interface SnapshotGetResponse { + /** The number of remaining snapshots that were not returned due to size limits and that can be fetched by additional requests using the `next` field value. */ + remaining: integer + /** The total number of snapshots that match the request when ignoring the size limit or `after` query parameter. */ + total: integer + /** If the request contained a size limit and there might be more results, a `next` field will be added to the response. + * It can be used as the `after` query parameter to fetch additional results. */ + next?: string + responses?: SnapshotGetSnapshotResponseItem[] + snapshots?: SnapshotSnapshotInfo[] +} + +export interface SnapshotGetSnapshotResponseItem { + repository: Name + snapshots?: SnapshotSnapshotInfo[] + error?: ErrorCause +} + +export interface SnapshotGetRepositoryRequest extends RequestBase { + /** A comma-separated list of snapshot repository names used to limit the request. + * Wildcard (`*`) expressions are supported including combining wildcards with exclude patterns starting with `-`. + * + * To get information about all snapshot repositories registered in the cluster, omit this parameter or use `*` or `_all`. */ + name?: Names + /** If `true`, the request gets information from the local node only. + * If `false`, the request gets information from the master node. */ + local?: boolean + /** The period to wait for the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, local?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, local?: never, master_timeout?: never } +} + +export type SnapshotGetRepositoryResponse = Record + +export interface SnapshotRepositoryAnalyzeBlobDetails { + /** The name of the blob. */ + name: string + /** Indicates whether the blob was overwritten while the read operations were ongoing. + * /** */ + overwritten: boolean + read_early: boolean + /** The position, in bytes, at which read operations completed. */ + read_end: long + /** The position, in bytes, at which read operations started. */ + read_start: long + /** A description of every read operation performed on the blob. */ + reads: SnapshotRepositoryAnalyzeReadBlobDetails + /** The size of the blob. */ + size: ByteSize + /** The size of the blob in bytes. */ + size_bytes: long +} + +export interface SnapshotRepositoryAnalyzeDetailsInfo { + /** A description of the blob that was written and read. */ + blob: SnapshotRepositoryAnalyzeBlobDetails + /** The elapsed time spent overwriting the blob. + * If the blob was not overwritten, this information is omitted. */ + overwrite_elapsed?: Duration + /** The elapsed time spent overwriting the blob, in nanoseconds. + * If the blob was not overwritten, this information is omitted. */ + overwrite_elapsed_nanos?: DurationValue + /** The elapsed time spent writing the blob. */ + write_elapsed: Duration + /** The elapsed time spent writing the blob, in nanoseconds. */ + write_elapsed_nanos: DurationValue + /** The length of time spent waiting for the `max_snapshot_bytes_per_sec` (or `indices.recovery.max_bytes_per_sec` if the recovery settings for managed services are set) throttle while writing the blob. */ + write_throttled: Duration + /** The length of time spent waiting for the `max_snapshot_bytes_per_sec` (or `indices.recovery.max_bytes_per_sec` if the recovery settings for managed services are set) throttle while writing the blob, in nanoseconds. */ + write_throttled_nanos: DurationValue + /** The node which wrote the blob and coordinated the read operations. */ + writer_node: SnapshotRepositoryAnalyzeSnapshotNodeInfo +} + +export interface SnapshotRepositoryAnalyzeReadBlobDetails { + /** Indicates whether the read operation may have started before the write operation was complete. */ + before_write_complete?: boolean + /** The length of time spent reading the blob. + * If the blob was not found, this detail is omitted. */ + elapsed?: Duration + /** The length of time spent reading the blob, in nanoseconds. + * If the blob was not found, this detail is omitted. */ + elapsed_nanos?: DurationValue + /** The length of time waiting for the first byte of the read operation to be received. + * If the blob was not found, this detail is omitted. */ + first_byte_time?: Duration + /** The length of time waiting for the first byte of the read operation to be received, in nanoseconds. + * If the blob was not found, this detail is omitted. */ + first_byte_time_nanos: DurationValue + /** Indicates whether the blob was found by the read operation. + * If the read was started before the write completed or the write was ended before completion, it might be false. */ + found: boolean + /** The node that performed the read operation. */ + node: SnapshotRepositoryAnalyzeSnapshotNodeInfo + /** The length of time spent waiting due to the `max_restore_bytes_per_sec` or `indices.recovery.max_bytes_per_sec` throttles during the read of the blob. + * If the blob was not found, this detail is omitted. */ + throttled?: Duration + /** The length of time spent waiting due to the `max_restore_bytes_per_sec` or `indices.recovery.max_bytes_per_sec` throttles during the read of the blob, in nanoseconds. + * If the blob was not found, this detail is omitted. */ + throttled_nanos?: DurationValue +} + +export interface SnapshotRepositoryAnalyzeReadSummaryInfo { + /** The number of read operations performed in the test. */ + count: integer + /** The maximum time spent waiting for the first byte of any read request to be received. */ + max_wait: Duration + /** The maximum time spent waiting for the first byte of any read request to be received, in nanoseconds. */ + max_wait_nanos: DurationValue + /** The total elapsed time spent on reading blobs in the test. */ + total_elapsed: Duration + /** The total elapsed time spent on reading blobs in the test, in nanoseconds. */ + total_elapsed_nanos: DurationValue + /** The total size of all the blobs or partial blobs read in the test. */ + total_size: ByteSize + /** The total size of all the blobs or partial blobs read in the test, in bytes. */ + total_size_bytes: long + /** The total time spent waiting due to the `max_restore_bytes_per_sec` or `indices.recovery.max_bytes_per_sec` throttles. */ + total_throttled: Duration + /** The total time spent waiting due to the `max_restore_bytes_per_sec` or `indices.recovery.max_bytes_per_sec` throttles, in nanoseconds. */ + total_throttled_nanos: DurationValue + /** The total time spent waiting for the first byte of each read request to be received. */ + total_wait: Duration + /** The total time spent waiting for the first byte of each read request to be received, in nanoseconds. */ + total_wait_nanos: DurationValue +} + +export interface SnapshotRepositoryAnalyzeRequest extends RequestBase { + /** The name of the repository. */ + name: Name + /** The total number of blobs to write to the repository during the test. + * For realistic experiments, set this parameter to at least `2000`. */ + blob_count?: integer + /** The number of operations to run concurrently during the test. + * For realistic experiments, leave this parameter unset. */ + concurrency?: integer + /** Indicates whether to return detailed results, including timing information for every operation performed during the analysis. + * If false, it returns only a summary of the analysis. */ + detailed?: boolean + /** The number of nodes on which to perform an early read operation while writing each blob. + * Early read operations are only rarely performed. + * For realistic experiments, leave this parameter unset. */ + early_read_node_count?: integer + /** The maximum size of a blob to be written during the test. + * For realistic experiments, set this parameter to at least `2gb`. */ + max_blob_size?: ByteSize + /** An upper limit on the total size of all the blobs written during the test. + * For realistic experiments, set this parameter to at least `1tb`. */ + max_total_data_size?: ByteSize + /** The probability of performing a rare action such as an early read, an overwrite, or an aborted write on each blob. + * For realistic experiments, leave this parameter unset. */ + rare_action_probability?: double + /** Indicates whether to rarely cancel writes before they complete. + * For realistic experiments, leave this parameter unset. */ + rarely_abort_writes?: boolean + /** The number of nodes on which to read a blob after writing. + * For realistic experiments, leave this parameter unset. */ + read_node_count?: integer + /** The minimum number of linearizable register operations to perform in total. + * For realistic experiments, set this parameter to at least `100`. */ + register_operation_count?: integer + /** The seed for the pseudo-random number generator used to generate the list of operations performed during the test. + * To repeat the same set of operations in multiple experiments, use the same seed in each experiment. + * Note that the operations are performed concurrently so might not always happen in the same order on each run. + * For realistic experiments, leave this parameter unset. */ + seed?: integer + /** The period of time to wait for the test to complete. + * If no response is received before the timeout expires, the test is cancelled and returns an error. + * For realistic experiments, set this parameter sufficiently long to allow the test to complete. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, blob_count?: never, concurrency?: never, detailed?: never, early_read_node_count?: never, max_blob_size?: never, max_total_data_size?: never, rare_action_probability?: never, rarely_abort_writes?: never, read_node_count?: never, register_operation_count?: never, seed?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, blob_count?: never, concurrency?: never, detailed?: never, early_read_node_count?: never, max_blob_size?: never, max_total_data_size?: never, rare_action_probability?: never, rarely_abort_writes?: never, read_node_count?: never, register_operation_count?: never, seed?: never, timeout?: never } +} + +export interface SnapshotRepositoryAnalyzeResponse { + /** The number of blobs written to the repository during the test. */ + blob_count: integer + /** The path in the repository under which all the blobs were written during the test. */ + blob_path: string + /** The number of write operations performed concurrently during the test. */ + concurrency: integer + /** The node that coordinated the analysis and performed the final cleanup. */ + coordinating_node: SnapshotRepositoryAnalyzeSnapshotNodeInfo + /** The time it took to delete all the blobs in the container. */ + delete_elapsed: Duration + /** The time it took to delete all the blobs in the container, in nanoseconds. */ + delete_elapsed_nanos: DurationValue + /** A description of every read and write operation performed during the test. */ + details: SnapshotRepositoryAnalyzeDetailsInfo + /** The limit on the number of nodes on which early read operations were performed after writing each blob. */ + early_read_node_count: integer + /** A list of correctness issues detected, which is empty if the API succeeded. + * It is included to emphasize that a successful response does not guarantee correct behaviour in future. */ + issues_detected: string[] + /** The time it took to retrieve a list of all the blobs in the container. */ + listing_elapsed: Duration + /** The time it took to retrieve a list of all the blobs in the container, in nanoseconds. */ + listing_elapsed_nanos: DurationValue + /** The limit on the size of a blob written during the test. */ + max_blob_size: ByteSize + /** The limit, in bytes, on the size of a blob written during the test. */ + max_blob_size_bytes: long + /** The limit on the total size of all blob written during the test. */ + max_total_data_size: ByteSize + /** The limit, in bytes, on the total size of all blob written during the test. */ + max_total_data_size_bytes: long + /** The probability of performing rare actions during the test. */ + rare_action_probability: double + /** The limit on the number of nodes on which read operations were performed after writing each blob. */ + read_node_count: integer + /** The name of the repository that was the subject of the analysis. */ + repository: string + /** The seed for the pseudo-random number generator used to generate the operations used during the test. */ + seed: long + /** A collection of statistics that summarize the results of the test. */ + summary: SnapshotRepositoryAnalyzeSummaryInfo +} + +export interface SnapshotRepositoryAnalyzeSnapshotNodeInfo { + id: Id + name: Name +} + +export interface SnapshotRepositoryAnalyzeSummaryInfo { + /** A collection of statistics that summarise the results of the read operations in the test. */ + read: SnapshotRepositoryAnalyzeReadSummaryInfo + /** A collection of statistics that summarise the results of the write operations in the test. */ + write: SnapshotRepositoryAnalyzeWriteSummaryInfo +} + +export interface SnapshotRepositoryAnalyzeWriteSummaryInfo { + /** The number of write operations performed in the test. */ + count: integer + /** The total elapsed time spent on writing blobs in the test. */ + total_elapsed: Duration + /** The total elapsed time spent on writing blobs in the test, in nanoseconds. */ + total_elapsed_nanos: DurationValue + /** The total size of all the blobs written in the test. */ + total_size: ByteSize + /** The total size of all the blobs written in the test, in bytes. */ + total_size_bytes: long + /** The total time spent waiting due to the `max_snapshot_bytes_per_sec` throttle. */ + total_throttled: Duration + /** The total time spent waiting due to the `max_snapshot_bytes_per_sec` throttle, in nanoseconds. */ + total_throttled_nanos: long +} + +export interface SnapshotRepositoryVerifyIntegrityRequest extends RequestBase { + /** The name of the snapshot repository. */ + name: Names + /** If `verify_blob_contents` is `true`, this parameter specifies how many blobs to verify at once. */ + blob_thread_pool_concurrency?: integer + /** The maximum number of index snapshots to verify concurrently within each index verification. */ + index_snapshot_verification_concurrency?: integer + /** The number of indices to verify concurrently. + * The default behavior is to use the entire `snapshot_meta` thread pool. */ + index_verification_concurrency?: integer + /** If `verify_blob_contents` is `true`, this parameter specifies the maximum amount of data that Elasticsearch will read from the repository every second. */ + max_bytes_per_sec?: string + /** The number of shard snapshot failures to track during integrity verification, in order to avoid excessive resource usage. + * If your repository contains more than this number of shard snapshot failures, the verification will fail. */ + max_failed_shard_snapshots?: integer + /** The maximum number of snapshot metadata operations to run concurrently. + * The default behavior is to use at most half of the `snapshot_meta` thread pool at once. */ + meta_thread_pool_concurrency?: integer + /** The number of snapshots to verify concurrently. + * The default behavior is to use at most half of the `snapshot_meta` thread pool at once. */ + snapshot_verification_concurrency?: integer + /** Indicates whether to verify the checksum of every data blob in the repository. + * If this feature is enabled, Elasticsearch will read the entire repository contents, which may be extremely slow and expensive. */ + verify_blob_contents?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, blob_thread_pool_concurrency?: never, index_snapshot_verification_concurrency?: never, index_verification_concurrency?: never, max_bytes_per_sec?: never, max_failed_shard_snapshots?: never, meta_thread_pool_concurrency?: never, snapshot_verification_concurrency?: never, verify_blob_contents?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, blob_thread_pool_concurrency?: never, index_snapshot_verification_concurrency?: never, index_verification_concurrency?: never, max_bytes_per_sec?: never, max_failed_shard_snapshots?: never, meta_thread_pool_concurrency?: never, snapshot_verification_concurrency?: never, verify_blob_contents?: never } +} + +export type SnapshotRepositoryVerifyIntegrityResponse = any + +export interface SnapshotRestoreRequest extends RequestBase { + /** The name of the repository to restore a snapshot from. */ + repository: Name + /** The name of the snapshot to restore. */ + snapshot: Name + /** The period to wait for the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ + master_timeout?: Duration + /** If `true`, the request returns a response when the restore operation completes. + * The operation is complete when it finishes all attempts to recover primary shards for restored indices. + * This applies even if one or more of the recovery attempts fail. + * + * If `false`, the request returns a response when the restore operation initializes. */ + wait_for_completion?: boolean + /** The feature states to restore. + * If `include_global_state` is `true`, the request restores all feature states in the snapshot by default. + * If `include_global_state` is `false`, the request restores no feature states by default. + * Note that specifying an empty array will result in the default behavior. + * To restore no feature states, regardless of the `include_global_state` value, specify an array containing only the value `none` (`["none"]`). */ + feature_states?: string[] + /** The index settings to not restore from the snapshot. + * You can't use this option to ignore `index.number_of_shards`. + * + * For data streams, this option applies only to restored backing indices. + * New backing indices are configured using the data stream's matching index template. */ + ignore_index_settings?: string[] + /** If `true`, the request ignores any index or data stream in indices that's missing from the snapshot. + * If `false`, the request returns an error for any missing index or data stream. */ + ignore_unavailable?: boolean + /** If `true`, the request restores aliases for any restored data streams and indices. + * If `false`, the request doesn’t restore aliases. */ + include_aliases?: boolean + /** If `true`, restore the cluster state. The cluster state includes: + * + * * Persistent cluster settings + * * Index templates + * * Legacy index templates + * * Ingest pipelines + * * Index lifecycle management (ILM) policies + * * Stored scripts + * * For snapshots taken after 7.12.0, feature states + * + * If `include_global_state` is `true`, the restore operation merges the legacy index templates in your cluster with the templates contained in the snapshot, replacing any existing ones whose name matches one in the snapshot. + * It completely removes all persistent settings, non-legacy index templates, ingest pipelines, and ILM lifecycle policies that exist in your cluster and replaces them with the corresponding items from the snapshot. + * + * Use the `feature_states` parameter to configure how feature states are restored. + * + * If `include_global_state` is `true` and a snapshot was created without a global state then the restore request will fail. */ + include_global_state?: boolean + /** Index settings to add or change in restored indices, including backing indices. + * You can't use this option to change `index.number_of_shards`. + * + * For data streams, this option applies only to restored backing indices. + * New backing indices are configured using the data stream's matching index template. */ + index_settings?: IndicesIndexSettings + /** A comma-separated list of indices and data streams to restore. + * It supports a multi-target syntax. + * The default behavior is all regular indices and regular data streams in the snapshot. + * + * You can't use this parameter to restore system indices or system data streams. + * Use `feature_states` instead. */ + indices?: Indices + /** If `false`, the entire restore operation will fail if one or more indices included in the snapshot do not have all primary shards available. + * + * If true, it allows restoring a partial snapshot of indices with unavailable shards. + * Only shards that were successfully included in the snapshot will be restored. + * All missing shards will be recreated as empty. */ + partial?: boolean + /** A rename pattern to apply to restored data streams and indices. + * Data streams and indices matching the rename pattern will be renamed according to `rename_replacement`. + * + * The rename pattern is applied as defined by the regular expression that supports referencing the original text, according to the `appendReplacement` logic. */ + rename_pattern?: string + /** The rename replacement string that is used with the `rename_pattern`. */ + rename_replacement?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { repository?: never, snapshot?: never, master_timeout?: never, wait_for_completion?: never, feature_states?: never, ignore_index_settings?: never, ignore_unavailable?: never, include_aliases?: never, include_global_state?: never, index_settings?: never, indices?: never, partial?: never, rename_pattern?: never, rename_replacement?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { repository?: never, snapshot?: never, master_timeout?: never, wait_for_completion?: never, feature_states?: never, ignore_index_settings?: never, ignore_unavailable?: never, include_aliases?: never, include_global_state?: never, index_settings?: never, indices?: never, partial?: never, rename_pattern?: never, rename_replacement?: never } +} + +export interface SnapshotRestoreResponse { + accepted?: boolean + snapshot?: SnapshotRestoreSnapshotRestore +} + +export interface SnapshotRestoreSnapshotRestore { + indices: IndexName[] + snapshot: string + shards: ShardStatistics +} + +export interface SnapshotStatusRequest extends RequestBase { + /** The snapshot repository name used to limit the request. + * It supports wildcards (`*`) if `` isn't specified. */ + repository?: Name + /** A comma-separated list of snapshots to retrieve status for. + * The default is currently running snapshots. + * Wildcards (`*`) are not supported. */ + snapshot?: Names + /** If `false`, the request returns an error for any snapshots that are unavailable. + * If `true`, the request ignores snapshots that are unavailable, such as those that are corrupted or temporarily cannot be returned. */ + ignore_unavailable?: boolean + /** The period to wait for the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { repository?: never, snapshot?: never, ignore_unavailable?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { repository?: never, snapshot?: never, ignore_unavailable?: never, master_timeout?: never } +} + +export interface SnapshotStatusResponse { + snapshots: SnapshotStatus[] +} + +export interface SnapshotVerifyRepositoryCompactNodeInfo { + /** A human-readable name for the node. + * You can set this name using the `node.name` property in `elasticsearch.yml`. + * The default value is the machine's hostname. */ + name: Name +} + +export interface SnapshotVerifyRepositoryRequest extends RequestBase { + /** The name of the snapshot repository to verify. */ + name: Name + /** The period to wait for the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ + master_timeout?: Duration + /** The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. + * If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. + * To indicate that the request should never timeout, set it to `-1`. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } +} + +export interface SnapshotVerifyRepositoryResponse { + /** Information about the nodes connected to the snapshot repository. + * The key is the ID of the node. */ + nodes: Record +} + +export interface SqlColumn { + name: Name + type: string +} + +export type SqlRow = any[] + +export interface SqlClearCursorRequest extends RequestBase { + /** Cursor to clear. */ + cursor: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { cursor?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { cursor?: never } +} + +export interface SqlClearCursorResponse { + succeeded: boolean +} + +export interface SqlDeleteAsyncRequest extends RequestBase { + /** The identifier for the search. */ + id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } +} + +export type SqlDeleteAsyncResponse = AcknowledgedResponseBase + +export interface SqlGetAsyncRequest extends RequestBase { + /** The identifier for the search. */ + id: Id + /** The separator for CSV results. + * The API supports this parameter only for CSV responses. */ + delimiter?: string + /** The format for the response. + * You must specify a format using this parameter or the `Accept` HTTP header. + * If you specify both, the API uses this parameter. */ + format?: string + /** The retention period for the search and its results. + * It defaults to the `keep_alive` period for the original SQL search. */ + keep_alive?: Duration + /** The period to wait for complete results. + * It defaults to no timeout, meaning the request waits for complete search results. */ + wait_for_completion_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, delimiter?: never, format?: never, keep_alive?: never, wait_for_completion_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, delimiter?: never, format?: never, keep_alive?: never, wait_for_completion_timeout?: never } +} + +export interface SqlGetAsyncResponse { + /** Identifier for the search. + * This value is returned only for async and saved synchronous searches. + * For CSV, TSV, and TXT responses, this value is returned in the `Async-ID` HTTP header. */ + id: Id + /** If `true`, the search is still running. + * If `false`, the search has finished. + * This value is returned only for async and saved synchronous searches. + * For CSV, TSV, and TXT responses, this value is returned in the `Async-partial` HTTP header. */ + is_running: boolean + /** If `true`, the response does not contain complete search results. + * If `is_partial` is `true` and `is_running` is `true`, the search is still running. + * If `is_partial` is `true` but `is_running` is `false`, the results are partial due to a failure or timeout. + * This value is returned only for async and saved synchronous searches. + * For CSV, TSV, and TXT responses, this value is returned in the `Async-partial` HTTP header. */ + is_partial: boolean + /** Column headings for the search results. Each object is a column. */ + columns?: SqlColumn[] + /** The cursor for the next set of paginated results. + * For CSV, TSV, and TXT responses, this value is returned in the `Cursor` HTTP header. */ + cursor?: string + /** The values for the search results. */ + rows: SqlRow[] +} + +export interface SqlGetAsyncStatusRequest extends RequestBase { + /** The identifier for the search. */ + id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } +} + +export interface SqlGetAsyncStatusResponse { + /** The timestamp, in milliseconds since the Unix epoch, when Elasticsearch will delete the search and its results, even if the search is still running. */ + expiration_time_in_millis: EpochTime + /** The identifier for the search. */ + id: string + /** If `true`, the search is still running. + * If `false`, the search has finished. */ + is_running: boolean + /** If `true`, the response does not contain complete search results. + * If `is_partial` is `true` and `is_running` is `true`, the search is still running. + * If `is_partial` is `true` but `is_running` is `false`, the results are partial due to a failure or timeout. */ + is_partial: boolean + /** The timestamp, in milliseconds since the Unix epoch, when the search started. + * The API returns this property only for running searches. */ + start_time_in_millis: EpochTime + /** The HTTP status code for the search. + * The API returns this property only for completed searches. */ + completion_status?: uint +} + +export interface SqlQueryRequest extends RequestBase { + /** The format for the response. + * You can also specify a format using the `Accept` HTTP header. + * If you specify both this parameter and the `Accept` HTTP header, this parameter takes precedence. */ + format?: SqlQuerySqlFormat + /** Specifies a subset of projects to target for the search using project + * metadata tags in a subset of Lucene query syntax. + * Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). + * Examples: + * _alias:my-project + * _alias:_origin + * _alias:*pr* + * Supported in serverless only. */ + project_routing?: ProjectRouting + /** If `true`, the response has partial results when there are shard request timeouts or shard failures. + * If `false`, the API returns an error with no partial results. */ + allow_partial_search_results?: boolean + /** The default catalog (cluster) for queries. + * If unspecified, the queries execute on the data in the local cluster only. */ + catalog?: string + /** If `true`, the results are in a columnar fashion: one row represents all the values of a certain column from the current page of results. + * The API supports this parameter only for CBOR, JSON, SMILE, and YAML responses. */ + columnar?: boolean + /** The cursor used to retrieve a set of paginated results. + * If you specify a cursor, the API only uses the `columnar` and `time_zone` request body parameters. + * It ignores other request body parameters. */ + cursor?: string + /** The maximum number of rows (or entries) to return in one response. */ + fetch_size?: integer + /** If `false`, the API returns an exception when encountering multiple values for a field. + * If `true`, the API is lenient and returns the first value from the array with no guarantee of consistent results. */ + field_multi_value_leniency?: boolean + /** The Elasticsearch query DSL for additional filtering. */ + filter?: QueryDslQueryContainer + /** If `true`, the search can run on frozen indices. */ + index_using_frozen?: boolean + /** The retention period for an async or saved synchronous search. */ + keep_alive?: Duration + /** If `true`, Elasticsearch stores synchronous searches if you also specify the `wait_for_completion_timeout` parameter. + * If `false`, Elasticsearch only stores async searches that don't finish before the `wait_for_completion_timeout`. */ + keep_on_completion?: boolean + /** The minimum retention period for the scroll cursor. + * After this time period, a pagination request might fail because the scroll cursor is no longer available. + * Subsequent scroll requests prolong the lifetime of the scroll cursor by the duration of `page_timeout` in the scroll request. */ + page_timeout?: Duration + /** The values for parameters in the query. */ + params?: any[] + /** The SQL query to run. */ + query?: string + /** The timeout before the request fails. */ + request_timeout?: Duration + /** One or more runtime fields for the search request. + * These fields take precedence over mapped fields with the same name. */ + runtime_mappings?: MappingRuntimeFields + /** The ISO-8601 time zone ID for the search. */ + time_zone?: TimeZone + /** The period to wait for complete results. + * It defaults to no timeout, meaning the request waits for complete search results. + * If the search doesn't finish within this period, the search becomes async. + * + * To save a synchronous search, you must specify this parameter and the `keep_on_completion` parameter. */ + wait_for_completion_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { format?: never, project_routing?: never, allow_partial_search_results?: never, catalog?: never, columnar?: never, cursor?: never, fetch_size?: never, field_multi_value_leniency?: never, filter?: never, index_using_frozen?: never, keep_alive?: never, keep_on_completion?: never, page_timeout?: never, params?: never, query?: never, request_timeout?: never, runtime_mappings?: never, time_zone?: never, wait_for_completion_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { format?: never, project_routing?: never, allow_partial_search_results?: never, catalog?: never, columnar?: never, cursor?: never, fetch_size?: never, field_multi_value_leniency?: never, filter?: never, index_using_frozen?: never, keep_alive?: never, keep_on_completion?: never, page_timeout?: never, params?: never, query?: never, request_timeout?: never, runtime_mappings?: never, time_zone?: never, wait_for_completion_timeout?: never } +} + +export interface SqlQueryResponse { + /** Column headings for the search results. Each object is a column. */ + columns?: SqlColumn[] + /** The cursor for the next set of paginated results. + * For CSV, TSV, and TXT responses, this value is returned in the `Cursor` HTTP header. */ + cursor?: string + /** The identifier for the search. + * This value is returned only for async and saved synchronous searches. + * For CSV, TSV, and TXT responses, this value is returned in the `Async-ID` HTTP header. */ + id?: Id + /** If `true`, the search is still running. + * If `false`, the search has finished. + * This value is returned only for async and saved synchronous searches. + * For CSV, TSV, and TXT responses, this value is returned in the `Async-partial` HTTP header. */ + is_running?: boolean + /** If `true`, the response does not contain complete search results. + * If `is_partial` is `true` and `is_running` is `true`, the search is still running. + * If `is_partial` is `true` but `is_running` is `false`, the results are partial due to a failure or timeout. + * This value is returned only for async and saved synchronous searches. + * For CSV, TSV, and TXT responses, this value is returned in the `Async-partial` HTTP header. */ + is_partial?: boolean + /** The values for the search results. */ + rows: SqlRow[] +} + +export type SqlQuerySqlFormat = 'csv' | 'json' | 'tsv' | 'txt' | 'yaml' | 'cbor' | 'smile' + +export interface SqlTranslateRequest extends RequestBase { + /** The maximum number of rows (or entries) to return in one response. */ + fetch_size?: integer + /** The Elasticsearch query DSL for additional filtering. */ + filter?: QueryDslQueryContainer + /** The SQL query to run. */ + query: string + /** The ISO-8601 time zone ID for the search. */ + time_zone?: TimeZone + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { fetch_size?: never, filter?: never, query?: never, time_zone?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { fetch_size?: never, filter?: never, query?: never, time_zone?: never } +} + +export interface SqlTranslateResponse { + aggregations?: Record + size?: long + _source?: SearchSourceConfig + fields?: (QueryDslFieldAndFormat | Field)[] + query?: QueryDslQueryContainer + sort?: Sort + track_total_hits?: SearchTrackHits +} + +export interface SslCertificatesCertificateInformation { + /** If the path refers to a container file (a jks keystore, or a PKCS#12 file), it is the alias of the certificate. + * Otherwise, it is null. */ + alias: string | null + /** The ISO formatted date of the certificate's expiry (not-after) date. */ + expiry: DateTime + /** The format of the file. + * Valid values include `jks`, `PKCS12`, and `PEM`. */ + format: string + /** Indicates whether Elasticsearch has access to the private key for this certificate. */ + has_private_key: boolean + /** The Distinguished Name of the certificate's issuer. */ + issuer?: string + /** The path to the certificate, as configured in the `elasticsearch.yml` file. */ + path: string + /** The hexadecimal representation of the certificate's serial number. */ + serial_number: string + /** The Distinguished Name of the certificate's subject. */ + subject_dn: string +} + +export interface SslCertificatesRequest extends RequestBase { + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } +} + +export type SslCertificatesResponse = SslCertificatesCertificateInformation[] + +export interface StreamsLogsDisableRequest extends RequestBase { + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never, timeout?: never } +} + +export type StreamsLogsDisableResponse = AcknowledgedResponseBase + +export interface StreamsLogsEnableRequest extends RequestBase { + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never, timeout?: never } +} + +export type StreamsLogsEnableResponse = AcknowledgedResponseBase + +export interface StreamsStatusLogsStatus { + /** If true, the logs stream feature is enabled. */ + enabled: boolean +} + +export interface StreamsStatusRequest extends RequestBase { + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never } +} + +export interface StreamsStatusResponse { + logs: StreamsStatusLogsStatus +} + +export interface SynonymsSynonymRule { + /** The identifier for the synonym rule. + * If you do not specify a synonym rule ID when you create a rule, an identifier is created automatically by Elasticsearch. */ + id?: Id + /** The synonyms that conform the synonym rule in Solr format. */ + synonyms: SynonymsSynonymString +} + +export interface SynonymsSynonymRuleRead { + /** Synonym Rule identifier */ + id: Id + /** Synonyms, in Solr format, that conform the synonym rule. */ + synonyms: SynonymsSynonymString +} + +export type SynonymsSynonymString = string + +export interface SynonymsSynonymsUpdateResult { + /** The update operation result. */ + result: Result + /** Updating synonyms in a synonym set can reload the associated analyzers in case refresh is set to true. + * This information is the analyzers reloading result. */ + reload_analyzers_details?: IndicesReloadSearchAnalyzersReloadResult +} + +export interface SynonymsDeleteSynonymRequest extends RequestBase { + /** The synonyms set identifier to delete. */ + id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } +} + +export type SynonymsDeleteSynonymResponse = AcknowledgedResponseBase + +export interface SynonymsDeleteSynonymRuleRequest extends RequestBase { + /** The ID of the synonym set to update. */ + set_id: Id + /** The ID of the synonym rule to delete. */ + rule_id: Id + /** If `true`, the request will refresh the analyzers with the deleted synonym rule and wait for the new synonyms to be available before returning. + * If `false`, analyzers will not be reloaded with the deleted synonym rule + * @remarks This property is not supported on Elastic Cloud Serverless. */ + refresh?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { set_id?: never, rule_id?: never, refresh?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { set_id?: never, rule_id?: never, refresh?: never } +} + +export type SynonymsDeleteSynonymRuleResponse = SynonymsSynonymsUpdateResult + +export interface SynonymsGetSynonymRequest extends RequestBase { + /** The synonyms set identifier to retrieve. */ + id: Id + /** The starting offset for query rules to retrieve. */ + from?: integer + /** The max number of query rules to retrieve. */ + size?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, from?: never, size?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, from?: never, size?: never } +} + +export interface SynonymsGetSynonymResponse { + /** The total number of synonyms rules that the synonyms set contains. */ + count: integer + /** Synonym rule details. */ + synonyms_set: SynonymsSynonymRuleRead[] +} + +export interface SynonymsGetSynonymRuleRequest extends RequestBase { + /** The ID of the synonym set to retrieve the synonym rule from. */ + set_id: Id + /** The ID of the synonym rule to retrieve. */ + rule_id: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { set_id?: never, rule_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { set_id?: never, rule_id?: never } +} + +export type SynonymsGetSynonymRuleResponse = SynonymsSynonymRuleRead + +export interface SynonymsGetSynonymsSetsRequest extends RequestBase { + /** The starting offset for synonyms sets to retrieve. */ + from?: integer + /** The maximum number of synonyms sets to retrieve. */ + size?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { from?: never, size?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { from?: never, size?: never } +} + +export interface SynonymsGetSynonymsSetsResponse { + /** The total number of synonyms sets defined. */ + count: integer + /** The identifier and total number of defined synonym rules for each synonyms set. */ + results: SynonymsGetSynonymsSetsSynonymsSetItem[] +} + +export interface SynonymsGetSynonymsSetsSynonymsSetItem { + /** Synonyms set identifier */ + synonyms_set: Id + /** Number of synonym rules that the synonym set contains */ + count: integer +} + +export interface SynonymsPutSynonymRequest extends RequestBase { + /** The ID of the synonyms set to be created or updated. */ + id: Id + /** If `true`, the request will refresh the analyzers with the new synonyms set and wait for the new synonyms to be available before returning. + * If `false`, analyzers will not be reloaded with the new synonym set + * @remarks This property is not supported on Elastic Cloud Serverless. */ + refresh?: boolean + /** The synonym rules definitions for the synonyms set. */ + synonyms_set: SynonymsSynonymRule | SynonymsSynonymRule[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, refresh?: never, synonyms_set?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, refresh?: never, synonyms_set?: never } +} + +export interface SynonymsPutSynonymResponse { + /** The update operation result. */ + result: Result + /** Updating a synonyms set can reload the associated analyzers in case refresh is set to true. + * This information is the analyzers reloading result. */ + reload_analyzers_details?: IndicesReloadSearchAnalyzersReloadResult +} + +export interface SynonymsPutSynonymRuleRequest extends RequestBase { + /** The ID of the synonym set. */ + set_id: Id + /** The ID of the synonym rule to be updated or created. */ + rule_id: Id + /** If `true`, the request will refresh the analyzers with the new synonym rule and wait for the new synonyms to be available before returning. + * If `false`, analyzers will not be reloaded with the new synonym rule + * @remarks This property is not supported on Elastic Cloud Serverless. */ + refresh?: boolean + /** The synonym rule information definition, which must be in Solr format. */ + synonyms: SynonymsSynonymString + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { set_id?: never, rule_id?: never, refresh?: never, synonyms?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { set_id?: never, rule_id?: never, refresh?: never, synonyms?: never } +} + +export type SynonymsPutSynonymRuleResponse = SynonymsSynonymsUpdateResult + +export type TasksGroupBy = 'nodes' | 'parents' | 'none' + +export interface TasksNodeTasks { + name?: NodeId + transport_address?: TransportAddress + host?: Host + ip?: Ip + roles?: string[] + attributes?: Record + tasks: Record +} + +export interface TasksParentTaskInfo extends TasksTaskInfo { + children?: TasksTaskInfo[] +} + +export interface TasksTaskInfo { + action: string + cancelled?: boolean + cancellable: boolean + /** Human readable text that identifies the particular request that the task is performing. + * For example, it might identify the search request being performed by a search task. + * Other kinds of tasks have different descriptions, like `_reindex` which has the source and the destination, or `_bulk` which just has the number of requests and the destination indices. + * Many requests will have only an empty description because more detailed information about the request is not easily available or particularly helpful in identifying the request. */ + description?: string + headers: Record + id: long + node: NodeId + running_time?: Duration + running_time_in_nanos: DurationValue + start_time_in_millis: EpochTime + /** The internal status of the task, which varies from task to task. + * The format also varies. + * While the goal is to keep the status for a particular task consistent from version to version, this is not always possible because sometimes the implementation changes. + * Fields might be removed from the status for a particular request so any parsing you do of the status might break in minor releases. */ + status?: any + type: string + parent_task_id?: TaskId +} + +export type TasksTaskInfos = TasksTaskInfo[] | Record + +export interface TasksTaskListResponseBase { + node_failures?: ErrorCause[] + task_failures?: TaskFailure[] + /** Task information grouped by node, if `group_by` was set to `node` (the default). */ + nodes?: Record + /** Either a flat list of tasks if `group_by` was set to `none`, or grouped by parents if + * `group_by` was set to `parents`. */ + tasks?: TasksTaskInfos +} + +export interface TasksCancelRequest extends RequestBase { + /** The task identifier. */ + task_id?: TaskId + /** A comma-separated list or wildcard expression of actions that is used to limit the request. */ + actions?: string | string[] + /** A comma-separated list of node IDs or names that is used to limit the request. */ + nodes?: string[] + /** A parent task ID that is used to limit the tasks. */ + parent_task_id?: string + /** If true, the request blocks until all found tasks are complete. */ + wait_for_completion?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_id?: never, actions?: never, nodes?: never, parent_task_id?: never, wait_for_completion?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_id?: never, actions?: never, nodes?: never, parent_task_id?: never, wait_for_completion?: never } +} + +export type TasksCancelResponse = TasksTaskListResponseBase + +export interface TasksGetRequest extends RequestBase { + /** The task identifier. */ + task_id: Id + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** If `true`, the request blocks until the task has completed. */ + wait_for_completion?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_id?: never, timeout?: never, wait_for_completion?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_id?: never, timeout?: never, wait_for_completion?: never } +} + +export interface TasksGetResponse { + completed: boolean + task: TasksTaskInfo + response?: any + error?: ErrorCause +} + +export interface TasksListRequest extends RequestBase { + /** A comma-separated list or wildcard expression of actions used to limit the request. + * For example, you can use `cluser:*` to retrieve all cluster-related tasks. */ + actions?: string | string[] + /** If `true`, the response includes detailed information about the running tasks. + * This information is useful to distinguish tasks from each other but is more costly to run. */ + detailed?: boolean + /** A key that is used to group tasks in the response. + * The task lists can be grouped either by nodes or by parent tasks. */ + group_by?: TasksGroupBy + /** A comma-separated list of node IDs or names that is used to limit the returned information. */ + nodes?: NodeIds + /** A parent task identifier that is used to limit returned information. + * To return all tasks, omit this parameter or use a value of `-1`. + * If the parent task is not found, the API does not return a 404 response code. */ + parent_task_id?: Id + /** The period to wait for each node to respond. + * If a node does not respond before its timeout expires, the response does not include its information. + * However, timed out nodes are included in the `node_failures` property. */ + timeout?: Duration + /** If `true`, the request blocks until the operation is complete. */ + wait_for_completion?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { actions?: never, detailed?: never, group_by?: never, nodes?: never, parent_task_id?: never, timeout?: never, wait_for_completion?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { actions?: never, detailed?: never, group_by?: never, nodes?: never, parent_task_id?: never, timeout?: never, wait_for_completion?: never } +} + +export type TasksListResponse = TasksTaskListResponseBase + +export type TextStructureEcsCompatibilityType = 'disabled' | 'v1' + +export interface TextStructureFieldStat { + count: integer + cardinality: integer + top_hits: TextStructureTopHit[] + mean_value?: integer + median_value?: integer + max_value?: integer + min_value?: integer + earliest?: string + latest?: string +} + +export type TextStructureFormatType = 'delimited' | 'ndjson' | 'semi_structured_text' | 'xml' + +export interface TextStructureTopHit { + count: long + value: any +} + +export interface TextStructureFindFieldStructureRequest extends RequestBase { + /** If `format` is set to `delimited`, you can specify the column names in a comma-separated list. + * If this parameter is not specified, the structure finder uses the column names from the header row of the text. + * If the text does not have a header row, columns are named "column1", "column2", "column3", for example. */ + column_names?: string | string[] + /** If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. + * Only a single character is supported; the delimiter cannot have multiple characters. + * By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). + * In this default scenario, all rows must have the same number of fields for the delimited format to be detected. + * If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. */ + delimiter?: string + /** The number of documents to include in the structural analysis. + * The minimum value is 2. */ + documents_to_sample?: uint + /** The mode of compatibility with ECS compliant Grok patterns. + * Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. + * This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. + * If the structure finder identifies a common structure but has no idea of the meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output. + * The intention in that situation is that a user who knows the meanings will rename the fields before using them. */ + ecs_compatibility?: TextStructureEcsCompatibilityType + /** If `true`, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result. */ + explain?: boolean + /** The field that should be analyzed. */ + field: Field + /** The high level structure of the text. + * By default, the API chooses the format. + * In this default scenario, all rows must have the same number of fields for a delimited format to be detected. + * If the format is set to delimited and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. */ + format?: TextStructureFormatType + /** If the format is `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. + * The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. + * If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". + * If `grok_pattern` is not specified, the structure finder creates a Grok pattern. */ + grok_pattern?: GrokPattern + /** The name of the index that contains the analyzed field. */ + index: IndexName + /** If the format is `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. + * Only a single character is supported. + * If this parameter is not specified, the default value is a double quote (`"`). + * If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. */ + quote?: string + /** If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. + * If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. + * Otherwise, the default value is `false`. */ + should_trim_fields?: boolean + /** The maximum amount of time that the structure analysis can take. + * If the analysis is still running when the timeout expires, it will be stopped. */ + timeout?: Duration + /** The name of the field that contains the primary timestamp of each record in the text. + * In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field. + * + * If the format is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. + * Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. + * + * For structured text, if you specify this parameter, the field must exist within the text. + * + * If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. + * For structured text, it is not compulsory to have a timestamp in the text. */ + timestamp_field?: Field + /** The Java time format of the timestamp field in the text. + * Only a subset of Java time format letter groups are supported: + * + * * `a` + * * `d` + * * `dd` + * * `EEE` + * * `EEEE` + * * `H` + * * `HH` + * * `h` + * * `M` + * * `MM` + * * `MMM` + * * `MMMM` + * * `mm` + * * `ss` + * * `XX` + * * `XXX` + * * `yy` + * * `yyyy` + * * `zzz` + * + * Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and are separated from the `ss` by a period (`.`), comma (`,`), or colon (`:`). + * Spacing and punctuation is also permitted with the exception a question mark (`?`), newline, and carriage return, together with literal text enclosed in single quotes. + * For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. + * + * One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. + * Another is when the timestamp format is one that the structure finder does not consider by default. + * + * If this parameter is not specified, the structure finder chooses the best format from a built-in set. + * + * If the special value `null` is specified, the structure finder will not look for a primary timestamp in the text. + * When the format is semi-structured text, this will result in the structure finder treating the text as single-line messages. */ + timestamp_format?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { column_names?: never, delimiter?: never, documents_to_sample?: never, ecs_compatibility?: never, explain?: never, field?: never, format?: never, grok_pattern?: never, index?: never, quote?: never, should_trim_fields?: never, timeout?: never, timestamp_field?: never, timestamp_format?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { column_names?: never, delimiter?: never, documents_to_sample?: never, ecs_compatibility?: never, explain?: never, field?: never, format?: never, grok_pattern?: never, index?: never, quote?: never, should_trim_fields?: never, timeout?: never, timestamp_field?: never, timestamp_format?: never } +} + +export interface TextStructureFindFieldStructureResponse { + charset: string + ecs_compatibility?: TextStructureEcsCompatibilityType + field_stats: Record + format: TextStructureFormatType + grok_pattern?: GrokPattern + java_timestamp_formats?: string[] + joda_timestamp_formats?: string[] + ingest_pipeline: IngestPipelineConfig + mappings: MappingTypeMapping + multiline_start_pattern?: string + need_client_timezone: boolean + num_lines_analyzed: integer + num_messages_analyzed: integer + sample_start: string + timestamp_field?: Field +} + +export interface TextStructureFindMessageStructureRequest extends RequestBase { + /** If the format is `delimited`, you can specify the column names in a comma-separated list. + * If this parameter is not specified, the structure finder uses the column names from the header row of the text. + * If the text does not have a header role, columns are named "column1", "column2", "column3", for example. */ + column_names?: string | string[] + /** If you the format is `delimited`, you can specify the character used to delimit the values in each row. + * Only a single character is supported; the delimiter cannot have multiple characters. + * By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). + * In this default scenario, all rows must have the same number of fields for the delimited format to be detected. + * If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. */ + delimiter?: string + /** The mode of compatibility with ECS compliant Grok patterns. + * Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. + * This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. + * If the structure finder identifies a common structure but has no idea of meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output, with the intention that a user who knows the meanings rename these fields before using it. */ + ecs_compatibility?: TextStructureEcsCompatibilityType + /** If this parameter is set to true, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result. */ + explain?: boolean + /** The high level structure of the text. + * By default, the API chooses the format. + * In this default scenario, all rows must have the same number of fields for a delimited format to be detected. + * If the format is `delimited` and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. */ + format?: TextStructureFormatType + /** If the format is `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. + * The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. + * If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". + * If `grok_pattern` is not specified, the structure finder creates a Grok pattern. */ + grok_pattern?: GrokPattern + /** If the format is `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. + * Only a single character is supported. + * If this parameter is not specified, the default value is a double quote (`"`). + * If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. */ + quote?: string + /** If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. + * If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. + * Otherwise, the default value is `false`. */ + should_trim_fields?: boolean + /** The maximum amount of time that the structure analysis can take. + * If the analysis is still running when the timeout expires, it will be stopped. */ + timeout?: Duration + /** The name of the field that contains the primary timestamp of each record in the text. + * In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field. + * + * If the format is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. + * Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. + * + * For structured text, if you specify this parameter, the field must exist within the text. + * + * If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. + * For structured text, it is not compulsory to have a timestamp in the text. */ + timestamp_field?: Field + /** The Java time format of the timestamp field in the text. + * Only a subset of Java time format letter groups are supported: + * + * * `a` + * * `d` + * * `dd` + * * `EEE` + * * `EEEE` + * * `H` + * * `HH` + * * `h` + * * `M` + * * `MM` + * * `MMM` + * * `MMMM` + * * `mm` + * * `ss` + * * `XX` + * * `XXX` + * * `yy` + * * `yyyy` + * * `zzz` + * + * Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and are separated from the `ss` by a period (`.`), comma (`,`), or colon (`:`). + * Spacing and punctuation is also permitted with the exception a question mark (`?`), newline, and carriage return, together with literal text enclosed in single quotes. + * For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. + * + * One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. + * Another is when the timestamp format is one that the structure finder does not consider by default. + * + * If this parameter is not specified, the structure finder chooses the best format from a built-in set. + * + * If the special value `null` is specified, the structure finder will not look for a primary timestamp in the text. + * When the format is semi-structured text, this will result in the structure finder treating the text as single-line messages. */ + timestamp_format?: string + /** The list of messages you want to analyze. */ + messages: string[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { column_names?: never, delimiter?: never, ecs_compatibility?: never, explain?: never, format?: never, grok_pattern?: never, quote?: never, should_trim_fields?: never, timeout?: never, timestamp_field?: never, timestamp_format?: never, messages?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { column_names?: never, delimiter?: never, ecs_compatibility?: never, explain?: never, format?: never, grok_pattern?: never, quote?: never, should_trim_fields?: never, timeout?: never, timestamp_field?: never, timestamp_format?: never, messages?: never } +} + +export interface TextStructureFindMessageStructureResponse { + charset: string + ecs_compatibility?: TextStructureEcsCompatibilityType + field_stats: Record + format: TextStructureFormatType + grok_pattern?: GrokPattern + java_timestamp_formats?: string[] + joda_timestamp_formats?: string[] + ingest_pipeline: IngestPipelineConfig + mappings: MappingTypeMapping + multiline_start_pattern?: string + need_client_timezone: boolean + num_lines_analyzed: integer + num_messages_analyzed: integer + sample_start: string + timestamp_field?: Field +} + +export interface TextStructureFindStructureRequest { + /** The text's character set. + * It must be a character set that is supported by the JVM that Elasticsearch uses. + * For example, `UTF-8`, `UTF-16LE`, `windows-1252`, or `EUC-JP`. + * If this parameter is not specified, the structure finder chooses an appropriate character set. */ + charset?: string + /** If you have set format to `delimited`, you can specify the column names in a comma-separated list. + * If this parameter is not specified, the structure finder uses the column names from the header row of the text. + * If the text does not have a header role, columns are named "column1", "column2", "column3", for example. */ + column_names?: string + /** If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. + * Only a single character is supported; the delimiter cannot have multiple characters. + * By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). + * In this default scenario, all rows must have the same number of fields for the delimited format to be detected. + * If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. */ + delimiter?: string + /** The mode of compatibility with ECS compliant Grok patterns. + * Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. + * Valid values are `disabled` and `v1`. + * This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. + * If the structure finder identifies a common structure but has no idea of meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output, with the intention that a user who knows the meanings rename these fields before using it. */ + ecs_compatibility?: string + /** If this parameter is set to `true`, the response includes a field named explanation, which is an array of strings that indicate how the structure finder produced its result. + * If the structure finder produces unexpected results for some text, use this query parameter to help you determine why the returned structure was chosen. */ + explain?: boolean + /** The high level structure of the text. + * Valid values are `ndjson`, `xml`, `delimited`, and `semi_structured_text`. + * By default, the API chooses the format. + * In this default scenario, all rows must have the same number of fields for a delimited format to be detected. + * If the format is set to `delimited` and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. */ + format?: string + /** If you have set `format` to `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. + * The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. + * If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". + * If `grok_pattern` is not specified, the structure finder creates a Grok pattern. */ + grok_pattern?: GrokPattern + /** If you have set `format` to `delimited`, you can use this parameter to indicate whether the column names are in the first row of the text. + * If this parameter is not specified, the structure finder guesses based on the similarity of the first row of the text to other rows. */ + has_header_row?: boolean + /** The maximum number of characters in a message when lines are merged to form messages while analyzing semi-structured text. + * If you have extremely long messages you may need to increase this, but be aware that this may lead to very long processing times if the way to group lines into messages is misdetected. */ + line_merge_size_limit?: uint + /** The number of lines to include in the structural analysis, starting from the beginning of the text. + * The minimum is 2. + * If the value of this parameter is greater than the number of lines in the text, the analysis proceeds (as long as there are at least two lines in the text) for all of the lines. + * + * NOTE: The number of lines and the variation of the lines affects the speed of the analysis. + * For example, if you upload text where the first 1000 lines are all variations on the same message, the analysis will find more commonality than would be seen with a bigger sample. + * If possible, however, it is more efficient to upload sample text with more variety in the first 1000 lines than to request analysis of 100000 lines to achieve some variety. */ + lines_to_sample?: uint + /** If you have set `format` to `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. + * Only a single character is supported. + * If this parameter is not specified, the default value is a double quote (`"`). + * If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. */ + quote?: string + /** If you have set `format` to `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. + * If this parameter is not specified and the delimiter is pipe (`|`), the default value is `true`. + * Otherwise, the default value is `false`. */ + should_trim_fields?: boolean + /** The maximum amount of time that the structure analysis can take. + * If the analysis is still running when the timeout expires then it will be stopped. */ + timeout?: Duration + /** The name of the field that contains the primary timestamp of each record in the text. + * In particular, if the text were ingested into an index, this is the field that would be used to populate the `@timestamp` field. + * + * If the `format` is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. + * Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. + * + * For structured text, if you specify this parameter, the field must exist within the text. + * + * If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. + * For structured text, it is not compulsory to have a timestamp in the text. */ + timestamp_field?: Field + /** The Java time format of the timestamp field in the text. + * + * Only a subset of Java time format letter groups are supported: + * + * * `a` + * * `d` + * * `dd` + * * `EEE` + * * `EEEE` + * * `H` + * * `HH` + * * `h` + * * `M` + * * `MM` + * * `MMM` + * * `MMMM` + * * `mm` + * * `ss` + * * `XX` + * * `XXX` + * * `yy` + * * `yyyy` + * * `zzz` + * + * Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and separated from the `ss` by a `.`, `,` or `:`. + * Spacing and punctuation is also permitted with the exception of `?`, newline and carriage return, together with literal text enclosed in single quotes. + * For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. + * + * One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. + * Another is when the timestamp format is one that the structure finder does not consider by default. + * + * If this parameter is not specified, the structure finder chooses the best format from a built-in set. + * + * If the special value `null` is specified the structure finder will not look for a primary timestamp in the text. + * When the format is semi-structured text this will result in the structure finder treating the text as single-line messages. */ + timestamp_format?: string + text_files?: TJsonDocument[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { charset?: never, column_names?: never, delimiter?: never, ecs_compatibility?: never, explain?: never, format?: never, grok_pattern?: never, has_header_row?: never, line_merge_size_limit?: never, lines_to_sample?: never, quote?: never, should_trim_fields?: never, timeout?: never, timestamp_field?: never, timestamp_format?: never, text_files?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { charset?: never, column_names?: never, delimiter?: never, ecs_compatibility?: never, explain?: never, format?: never, grok_pattern?: never, has_header_row?: never, line_merge_size_limit?: never, lines_to_sample?: never, quote?: never, should_trim_fields?: never, timeout?: never, timestamp_field?: never, timestamp_format?: never, text_files?: never } +} + +export interface TextStructureFindStructureResponse { + /** The character encoding used to parse the text. */ + charset: string + has_header_row?: boolean + /** For UTF character encodings, it indicates whether the text begins with a byte order marker. */ + has_byte_order_marker: boolean + /** Valid values include `ndjson`, `xml`, `delimited`, and `semi_structured_text`. */ + format: string + /** The most common values of each field, plus basic numeric statistics for the numeric `page_count` field. + * This information may provide clues that the data needs to be cleaned or transformed prior to use by other Elastic Stack functionality. */ + field_stats: Record + /** The first two messages in the text verbatim. + * This may help diagnose parse errors or accidental uploads of the wrong text. */ + sample_start: string + /** The number of distinct messages the lines contained. + * For NDJSON, this value is the same as `num_lines_analyzed`. + * For other text formats, messages can span several lines. */ + num_messages_analyzed: integer + /** Some suitable mappings for an index into which the data could be ingested. */ + mappings: MappingTypeMapping + quote?: string + delimiter?: string + /** If a timestamp format is detected that does not include a timezone, `need_client_timezone` is `true`. + * The server that parses the text must therefore be told the correct timezone by the client. */ + need_client_timezone: boolean + /** The number of lines of the text that were analyzed. */ + num_lines_analyzed: integer + /** If `format` is `delimited`, the `column_names` field lists the column names in the order they appear in the sample. */ + column_names?: string[] + explanation?: string[] + grok_pattern?: GrokPattern + multiline_start_pattern?: string + exclude_lines_pattern?: string + /** The Java time formats recognized in the time fields. + * Elasticsearch mappings and ingest pipelines use this format. */ + java_timestamp_formats?: string[] + /** Information that is used to tell Logstash how to parse timestamps. */ + joda_timestamp_formats?: string[] + /** The field considered most likely to be the primary timestamp of each document. */ + timestamp_field?: Field + should_trim_fields?: boolean + ingest_pipeline: IngestPipelineConfig +} + +export interface TextStructureTestGrokPatternMatchedField { + match: string + offset: integer + length: integer +} + +export interface TextStructureTestGrokPatternMatchedText { + matched: boolean + fields?: Record +} + +export interface TextStructureTestGrokPatternRequest extends RequestBase { + /** The mode of compatibility with ECS compliant Grok patterns. + * Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. + * Valid values are `disabled` and `v1`. */ + ecs_compatibility?: string + /** The Grok pattern to run on the text. */ + grok_pattern: GrokPattern + /** The lines of text to run the Grok pattern on. */ + text: string[] + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { ecs_compatibility?: never, grok_pattern?: never, text?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { ecs_compatibility?: never, grok_pattern?: never, text?: never } +} + +export interface TextStructureTestGrokPatternResponse { + matches: TextStructureTestGrokPatternMatchedText[] +} + +export interface TransformDestination { + /** The destination index for the transform. The mappings of the destination index are deduced based on the source + * fields when possible. If alternate mappings are required, use the create index API prior to starting the + * transform. */ + index?: IndexName + /** The unique identifier for an ingest pipeline. */ + pipeline?: string +} + +export interface TransformLatest { + /** Specifies the date field that is used to identify the latest documents. */ + sort: Field + /** Specifies an array of one or more fields that are used to group the data. */ + unique_key: Field[] +} + +export interface TransformPivot { + /** Defines how to aggregate the grouped data. The following aggregations are currently supported: average, bucket + * script, bucket selector, cardinality, filter, geo bounds, geo centroid, geo line, max, median absolute deviation, + * min, missing, percentiles, rare terms, scripted metric, stats, sum, terms, top metrics, value count, weighted + * average. */ + aggregations?: Record + /** Defines how to aggregate the grouped data. The following aggregations are currently supported: average, bucket + * script, bucket selector, cardinality, filter, geo bounds, geo centroid, geo line, max, median absolute deviation, + * min, missing, percentiles, rare terms, scripted metric, stats, sum, terms, top metrics, value count, weighted + * average. + * @alias aggregations */ + aggs?: Record + /** Defines how to group the data. More than one grouping can be defined per pivot. The following groupings are + * currently supported: date histogram, geotile grid, histogram, terms. */ + group_by?: Record +} + +export interface TransformPivotGroupByContainer { + date_histogram?: AggregationsDateHistogramAggregation + geotile_grid?: AggregationsGeoTileGridAggregation + histogram?: AggregationsHistogramAggregation + terms?: AggregationsTermsAggregation +} + +export interface TransformRetentionPolicy { + /** The date field that is used to calculate the age of the document. */ + field: Field + /** Specifies the maximum age of a document in the destination index. Documents that are older than the configured + * value are removed from the destination index. */ + max_age: Duration +} + +export interface TransformRetentionPolicyContainer { + /** Specifies that the transform uses a time field to set the retention policy. */ + time?: TransformRetentionPolicy +} + +export interface TransformSettings { + /** Specifies whether the transform checkpoint ranges should be optimized for performance. Such optimization can align + * checkpoint ranges with the date histogram interval when date histogram is specified as a group source in the + * transform config. As a result, less document updates in the destination index will be performed thus improving + * overall performance. */ + align_checkpoints?: boolean + /** Defines if dates in the ouput should be written as ISO formatted string or as millis since epoch. epoch_millis was + * the default for transforms created before version 7.11. For compatible output set this value to `true`. */ + dates_as_epoch_millis?: boolean + /** Specifies whether the transform should deduce the destination index mappings from the transform configuration. */ + deduce_mappings?: boolean + /** Specifies a limit on the number of input documents per second. This setting throttles the transform by adding a + * wait time between search requests. The default value is null, which disables throttling. */ + docs_per_second?: float + /** Defines the initial page size to use for the composite aggregation for each checkpoint. If circuit breaker + * exceptions occur, the page size is dynamically adjusted to a lower value. The minimum value is `10` and the + * maximum is `65,536`. */ + max_page_search_size?: integer + /** Specifies whether the transform checkpoint will use the Point In Time API while searching over the source index. + * In general, Point In Time is an optimization that will reduce pressure on the source index by reducing the amount + * of refreshes and merges, but it can be expensive if a large number of Point In Times are opened and closed for a + * given index. The benefits and impact depend on the data being searched, the ingest rate into the source index, and + * the amount of other consumers searching the same source index. */ + use_point_in_time?: boolean + /** If `true`, the transform runs in unattended mode. In unattended mode, the transform retries indefinitely in case + * of an error which means the transform never fails. Setting the number of retries other than infinite fails in + * validation. */ + unattended?: boolean +} + +export interface TransformSource { + /** The source indices for the transform. It can be a single index, an index pattern (for example, `"my-index-*""`), an + * array of indices (for example, `["my-index-000001", "my-index-000002"]`), or an array of index patterns (for + * example, `["my-index-*", "my-other-index-*"]`. For remote indices use the syntax `"remote_name:index_name"`. If + * any indices are in remote clusters then the master node and at least one transform node must have the `remote_cluster_client` node role. */ + index: Indices + /** A query clause that retrieves a subset of data from the source index. */ + query?: QueryDslQueryContainer + /** Definitions of search-time runtime fields that can be used by the transform. For search runtime fields all data + * nodes, including remote nodes, must be 7.12 or later. */ + runtime_mappings?: MappingRuntimeFields +} + +export interface TransformSyncContainer { + /** Specifies that the transform uses a time field to synchronize the source and destination indices. */ + time?: TransformTimeSync +} + +export interface TransformTimeSync { + /** The time delay between the current time and the latest input data time. */ + delay?: Duration + /** The date field that is used to identify new documents in the source. In general, it’s a good idea to use a field + * that contains the ingest timestamp. If you use a different field, you might need to set the delay such that it + * accounts for data transmission delays. */ + field: Field +} + +export interface TransformDeleteTransformRequest extends RequestBase { + /** Identifier for the transform. */ + transform_id: Id + /** If this value is false, the transform must be stopped before it can be deleted. If true, the transform is + * deleted regardless of its current state. */ + force?: boolean + /** If this value is true, the destination index is deleted together with the transform. If false, the destination + * index will not be deleted */ + delete_dest_index?: boolean + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { transform_id?: never, force?: never, delete_dest_index?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { transform_id?: never, force?: never, delete_dest_index?: never, timeout?: never } +} + +export type TransformDeleteTransformResponse = AcknowledgedResponseBase + +export interface TransformGetTransformRequest extends RequestBase { + /** Identifier for the transform. It can be a transform identifier or a + * wildcard expression. You can get information for all transforms by using + * `_all`, by specifying `*` as the ``, or by omitting the + * ``. */ + transform_id?: Names + /** Specifies what to do when the request: + * + * 1. Contains wildcard expressions and there are no transforms that match. + * 2. Contains the _all string or no identifiers and there are no matches. + * 3. Contains wildcard expressions and there are only partial matches. + * + * If this parameter is false, the request returns a 404 status code when + * there are no matches or only partial matches. */ + allow_no_match?: boolean + /** Skips the specified number of transforms. */ + from?: integer + /** Specifies the maximum number of transforms to obtain. */ + size?: integer + /** Excludes fields that were automatically added when creating the + * transform. This allows the configuration to be in an acceptable format to + * be retrieved and then added to another cluster. */ + exclude_generated?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { transform_id?: never, allow_no_match?: never, from?: never, size?: never, exclude_generated?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { transform_id?: never, allow_no_match?: never, from?: never, size?: never, exclude_generated?: never } +} + +export interface TransformGetTransformResponse { + count: long + transforms: TransformGetTransformTransformSummary[] +} + +export interface TransformGetTransformTransformSummary { + /** The security privileges that the transform uses to run its queries. If Elastic Stack security features were disabled at the time of the most recent update to the transform, this property is omitted. */ + authorization?: MlTransformAuthorization + /** The time the transform was created. */ + create_time?: EpochTime + create_time_string?: DateTime + /** Free text description of the transform. */ + description?: string + /** The destination for the transform. */ + dest: ReindexDestination + frequency?: Duration + id: Id + latest?: TransformLatest + /** The pivot method transforms the data by aggregating and grouping it. */ + pivot?: TransformPivot + retention_policy?: TransformRetentionPolicyContainer + /** Defines optional transform settings. */ + settings?: TransformSettings + /** The source of the data for the transform. */ + source: TransformSource + /** Defines the properties transforms require to run continuously. */ + sync?: TransformSyncContainer + /** The version of Elasticsearch that existed on the node when the transform was created. */ + version?: VersionString + _meta?: Metadata +} + +export interface TransformGetTransformStatsCheckpointStats { + checkpoint: long + checkpoint_progress?: TransformGetTransformStatsTransformProgress + timestamp?: DateTime + timestamp_millis?: EpochTime + time_upper_bound?: DateTime + time_upper_bound_millis?: EpochTime +} + +export interface TransformGetTransformStatsCheckpointing { + changes_last_detected_at?: long + changes_last_detected_at_string?: DateTime + last: TransformGetTransformStatsCheckpointStats + next?: TransformGetTransformStatsCheckpointStats + operations_behind?: long + last_search_time?: long + last_search_time_string?: DateTime +} + +export interface TransformGetTransformStatsRequest extends RequestBase { + /** Identifier for the transform. It can be a transform identifier or a + * wildcard expression. You can get information for all transforms by using + * `_all`, by specifying `*` as the ``, or by omitting the + * ``. */ + transform_id: Names + /** Specifies what to do when the request: + * + * 1. Contains wildcard expressions and there are no transforms that match. + * 2. Contains the _all string or no identifiers and there are no matches. + * 3. Contains wildcard expressions and there are only partial matches. + * + * If this parameter is false, the request returns a 404 status code when + * there are no matches or only partial matches. */ + allow_no_match?: boolean + /** Skips the specified number of transforms. */ + from?: long + /** Specifies the maximum number of transforms to obtain. */ + size?: long + /** Controls the time to wait for the stats */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { transform_id?: never, allow_no_match?: never, from?: never, size?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { transform_id?: never, allow_no_match?: never, from?: never, size?: never, timeout?: never } +} + +export interface TransformGetTransformStatsResponse { + count: long + transforms: TransformGetTransformStatsTransformStats[] +} + +export interface TransformGetTransformStatsTransformHealthIssue { + /** The type of the issue */ + type: string + /** A description of the issue */ + issue: string + /** Details about the issue */ + details?: string + /** Number of times this issue has occurred since it started */ + count: integer + /** The timestamp this issue occurred for for the first time */ + first_occurrence?: EpochTime + first_occurence_string?: DateTime +} + +export interface TransformGetTransformStatsTransformIndexerStats { + delete_time_in_ms?: EpochTime + documents_indexed: long + documents_deleted?: long + documents_processed: long + exponential_avg_checkpoint_duration_ms: DurationValue + exponential_avg_documents_indexed: double + exponential_avg_documents_processed: double + index_failures: long + index_time_in_ms: DurationValue + index_total: long + pages_processed: long + processing_time_in_ms: DurationValue + processing_total: long + search_failures: long + search_time_in_ms: DurationValue + search_total: long + trigger_count: long +} + +export interface TransformGetTransformStatsTransformProgress { + docs_indexed: long + docs_processed: long + docs_remaining?: long + percent_complete?: double + total_docs?: long +} + +export interface TransformGetTransformStatsTransformStats { + checkpointing: TransformGetTransformStatsCheckpointing + health?: TransformGetTransformStatsTransformStatsHealth + id: Id + /** @remarks This property is not supported on Elastic Cloud Serverless. */ + node?: NodeAttributes + reason?: string + state: string + stats: TransformGetTransformStatsTransformIndexerStats +} + +export interface TransformGetTransformStatsTransformStatsHealth { + status: HealthStatus + /** If a non-healthy status is returned, contains a list of issues of the transform. */ + issues?: TransformGetTransformStatsTransformHealthIssue[] +} + +export interface TransformPreviewTransformRequest extends RequestBase { + /** Identifier for the transform to preview. If you specify this path parameter, you cannot provide transform + * configuration details in the request body. */ + transform_id?: Id + /** Period to wait for a response. If no response is received before the + * timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** The destination for the transform. */ + dest?: TransformDestination + /** Free text description of the transform. */ + description?: string + /** The interval between checks for changes in the source indices when the + * transform is running continuously. Also determines the retry interval in + * the event of transient failures while the transform is searching or + * indexing. The minimum value is 1s and the maximum is 1h. */ + frequency?: Duration + /** The pivot method transforms the data by aggregating and grouping it. + * These objects define the group by fields and the aggregation to reduce + * the data. */ + pivot?: TransformPivot + /** The source of the data for the transform. */ + source?: TransformSource + /** Defines optional transform settings. */ + settings?: TransformSettings + /** Defines the properties transforms require to run continuously. */ + sync?: TransformSyncContainer + /** Defines a retention policy for the transform. Data that meets the defined + * criteria is deleted from the destination index. */ + retention_policy?: TransformRetentionPolicyContainer + /** The latest method transforms the data by finding the latest document for + * each unique key. */ + latest?: TransformLatest + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { transform_id?: never, timeout?: never, dest?: never, description?: never, frequency?: never, pivot?: never, source?: never, settings?: never, sync?: never, retention_policy?: never, latest?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { transform_id?: never, timeout?: never, dest?: never, description?: never, frequency?: never, pivot?: never, source?: never, settings?: never, sync?: never, retention_policy?: never, latest?: never } +} + +export interface TransformPreviewTransformResponse { + generated_dest_index: IndicesIndexState + preview: TTransform[] +} + +export interface TransformPutTransformRequest extends RequestBase { + /** Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), + * hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters. */ + transform_id: Id + /** When the transform is created, a series of validations occur to ensure its success. For example, there is a + * check for the existence of the source indices and a check that the destination index is not part of the source + * index pattern. You can use this parameter to skip the checks, for example when the source index does not exist + * until after the transform is created. The validations are always run when you start the transform, however, with + * the exception of privilege checks. */ + defer_validation?: boolean + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** The destination for the transform. */ + dest: TransformDestination + /** Free text description of the transform. */ + description?: string + /** The interval between checks for changes in the source indices when the transform is running continuously. Also + * determines the retry interval in the event of transient failures while the transform is searching or indexing. + * The minimum value is `1s` and the maximum is `1h`. */ + frequency?: Duration + /** The latest method transforms the data by finding the latest document for each unique key. */ + latest?: TransformLatest + /** Defines optional transform metadata. */ + _meta?: Metadata + /** The pivot method transforms the data by aggregating and grouping it. These objects define the group by fields + * and the aggregation to reduce the data. */ + pivot?: TransformPivot + /** Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the + * destination index. */ + retention_policy?: TransformRetentionPolicyContainer + /** Defines optional transform settings. */ + settings?: TransformSettings + /** The source of the data for the transform. */ + source: TransformSource + /** Defines the properties transforms require to run continuously. */ + sync?: TransformSyncContainer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { transform_id?: never, defer_validation?: never, timeout?: never, dest?: never, description?: never, frequency?: never, latest?: never, _meta?: never, pivot?: never, retention_policy?: never, settings?: never, source?: never, sync?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { transform_id?: never, defer_validation?: never, timeout?: never, dest?: never, description?: never, frequency?: never, latest?: never, _meta?: never, pivot?: never, retention_policy?: never, settings?: never, source?: never, sync?: never } +} + +export type TransformPutTransformResponse = AcknowledgedResponseBase + +export interface TransformResetTransformRequest extends RequestBase { + /** Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), + * hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters. */ + transform_id: Id + /** If this value is `true`, the transform is reset regardless of its current state. If it's `false`, the transform + * must be stopped before it can be reset. */ + force?: boolean + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { transform_id?: never, force?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { transform_id?: never, force?: never, timeout?: never } +} + +export type TransformResetTransformResponse = AcknowledgedResponseBase + +export interface TransformScheduleNowTransformRequest extends RequestBase { + /** Identifier for the transform. */ + transform_id: Id + /** Controls the time to wait for the scheduling to take place */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { transform_id?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { transform_id?: never, timeout?: never } +} + +export type TransformScheduleNowTransformResponse = AcknowledgedResponseBase + +export interface TransformSetUpgradeModeRequest extends RequestBase { + /** When `true`, it enables `upgrade_mode` which temporarily halts all + * transform tasks and prohibits new transform tasks from + * starting. */ + enabled?: boolean + /** The time to wait for the request to be completed. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { enabled?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { enabled?: never, timeout?: never } +} + +export type TransformSetUpgradeModeResponse = AcknowledgedResponseBase + +export interface TransformStartTransformRequest extends RequestBase { + /** Identifier for the transform. */ + transform_id: Id + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** Restricts the set of transformed entities to those changed after this time. Relative times like now-30d are supported. Only applicable for continuous transforms. */ + from?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { transform_id?: never, timeout?: never, from?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { transform_id?: never, timeout?: never, from?: never } +} + +export type TransformStartTransformResponse = AcknowledgedResponseBase + +export interface TransformStopTransformRequest extends RequestBase { + /** Identifier for the transform. To stop multiple transforms, use a comma-separated list or a wildcard expression. + * To stop all transforms, use `_all` or `*` as the identifier. */ + transform_id: Name + /** Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; + * contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there + * are only partial matches. + * + * If it is true, the API returns a successful acknowledgement message when there are no matches. When there are + * only partial matches, the API stops the appropriate transforms. + * + * If it is false, the request returns a 404 status code when there are no matches or only partial matches. */ + allow_no_match?: boolean + /** If it is true, the API forcefully stops the transforms. */ + force?: boolean + /** Period to wait for a response when `wait_for_completion` is `true`. If no response is received before the + * timeout expires, the request returns a timeout exception. However, the request continues processing and + * eventually moves the transform to a STOPPED state. */ + timeout?: Duration + /** If it is true, the transform does not completely stop until the current checkpoint is completed. If it is false, + * the transform stops as soon as possible. */ + wait_for_checkpoint?: boolean + /** If it is true, the API blocks until the indexer state completely stops. If it is false, the API returns + * immediately and the indexer is stopped asynchronously in the background. */ + wait_for_completion?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { transform_id?: never, allow_no_match?: never, force?: never, timeout?: never, wait_for_checkpoint?: never, wait_for_completion?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { transform_id?: never, allow_no_match?: never, force?: never, timeout?: never, wait_for_checkpoint?: never, wait_for_completion?: never } +} + +export type TransformStopTransformResponse = AcknowledgedResponseBase + +export interface TransformUpdateTransformRequest extends RequestBase { + /** Identifier for the transform. */ + transform_id: Id + /** When true, deferrable validations are not run. This behavior may be + * desired if the source index does not exist until after the transform is + * created. */ + defer_validation?: boolean + /** Period to wait for a response. If no response is received before the + * timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** The destination for the transform. */ + dest?: TransformDestination + /** Free text description of the transform. */ + description?: string + /** The interval between checks for changes in the source indices when the + * transform is running continuously. Also determines the retry interval in + * the event of transient failures while the transform is searching or + * indexing. The minimum value is 1s and the maximum is 1h. */ + frequency?: Duration + /** Defines optional transform metadata. */ + _meta?: Metadata + /** The source of the data for the transform. */ + source?: TransformSource + /** Defines optional transform settings. */ + settings?: TransformSettings + /** Defines the properties transforms require to run continuously. */ + sync?: TransformSyncContainer + /** Defines a retention policy for the transform. Data that meets the defined + * criteria is deleted from the destination index. */ + retention_policy?: TransformRetentionPolicyContainer | null + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { transform_id?: never, defer_validation?: never, timeout?: never, dest?: never, description?: never, frequency?: never, _meta?: never, source?: never, settings?: never, sync?: never, retention_policy?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { transform_id?: never, defer_validation?: never, timeout?: never, dest?: never, description?: never, frequency?: never, _meta?: never, source?: never, settings?: never, sync?: never, retention_policy?: never } +} + +export interface TransformUpdateTransformResponse { + authorization?: MlTransformAuthorization + create_time: long + description: string + dest: ReindexDestination + frequency?: Duration + id: Id + latest?: TransformLatest + pivot?: TransformPivot + retention_policy?: TransformRetentionPolicyContainer + settings: TransformSettings + source: ReindexSource + sync?: TransformSyncContainer + version: VersionString + _meta?: Metadata +} + +export interface TransformUpgradeTransformsRequest extends RequestBase { + /** When true, the request checks for updates but does not run them. */ + dry_run?: boolean + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and + * returns an error. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { dry_run?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { dry_run?: never, timeout?: never } +} + +export interface TransformUpgradeTransformsResponse { + /** The number of transforms that need to be upgraded. */ + needs_update: integer + /** The number of transforms that don’t require upgrading. */ + no_action: integer + /** The number of transforms that have been upgraded. */ + updated: integer +} + +export interface WatcherAcknowledgeState { + state: WatcherAcknowledgementOptions + timestamp: DateTime +} + +export type WatcherAcknowledgementOptions = 'awaits_successful_execution' | 'ackable' | 'acked' + +export interface WatcherAction { + action_type?: WatcherActionType + condition?: WatcherConditionContainer + foreach?: string + max_iterations?: integer + name?: Name + throttle_period?: Duration + throttle_period_in_millis?: DurationValue + transform?: TransformContainer + index?: WatcherIndexAction + logging?: WatcherLoggingAction + email?: WatcherEmailAction + pagerduty?: WatcherPagerDutyAction + slack?: WatcherSlackAction + webhook?: WatcherWebhookAction +} + +export type WatcherActionExecutionMode = 'simulate' | 'force_simulate' | 'execute' | 'force_execute' | 'skip' + +export interface WatcherActionStatus { + ack: WatcherAcknowledgeState + last_execution?: WatcherExecutionState + last_successful_execution?: WatcherExecutionState + last_throttle?: WatcherThrottleState +} + +export type WatcherActionStatusOptions = 'success' | 'failure' | 'simulated' | 'throttled' + +export type WatcherActionType = 'email' | 'webhook' | 'index' | 'logging' | 'slack' | 'pagerduty' + +export type WatcherActions = Record + +export interface WatcherActivationState { + active: boolean + timestamp: DateTime +} + +export interface WatcherActivationStatus { + actions: WatcherActions + state: WatcherActivationState + version: VersionNumber +} + +export interface WatcherAlwaysCondition { +} + +export interface WatcherArrayCompareConditionKeys { + path: string +} +export type WatcherArrayCompareCondition = WatcherArrayCompareConditionKeys +& { [property: string]: WatcherArrayCompareOpParams | string } + +export interface WatcherArrayCompareOpParams { + quantifier: WatcherQuantifier + value: FieldValue +} + +export interface WatcherChainInput { + inputs: Partial>[] +} + +export interface WatcherConditionContainer { + always?: WatcherAlwaysCondition + array_compare?: Partial> + compare?: Partial>>> + never?: WatcherNeverCondition + script?: WatcherScriptCondition +} + +export type WatcherConditionOp = 'not_eq' | 'eq' | 'lt' | 'gt' | 'lte' | 'gte' + +export type WatcherConditionType = 'always' | 'never' | 'script' | 'compare' | 'array_compare' + +export type WatcherConnectionScheme = 'http' | 'https' + +export type WatcherCronExpression = string + +export interface WatcherDailySchedule { + at: WatcherScheduleTimeOfDay[] +} + +export type WatcherDataAttachmentFormat = 'json' | 'yaml' + +export interface WatcherDataEmailAttachment { + format?: WatcherDataAttachmentFormat +} + +export type WatcherDay = 'sunday' | 'monday' | 'tuesday' | 'wednesday' | 'thursday' | 'friday' | 'saturday' + +export interface WatcherEmail { + id?: Id + bcc?: string | string[] + body?: WatcherEmailBody + cc?: string | string[] + from?: string + priority?: WatcherEmailPriority + reply_to?: string | string[] + sent_date?: DateTime + subject: string + to: string | string[] + attachments?: Record +} + +export interface WatcherEmailAction extends WatcherEmail { +} + +export interface WatcherEmailAttachmentContainer { + http?: WatcherHttpEmailAttachment + reporting?: WatcherReportingEmailAttachment + data?: WatcherDataEmailAttachment +} + +export interface WatcherEmailBody { + html?: string + text?: string +} + +export type WatcherEmailPriority = 'lowest' | 'low' | 'normal' | 'high' | 'highest' + +export interface WatcherEmailResult { + account?: string + message: WatcherEmail + reason?: string +} + +export type WatcherExecutionPhase = 'awaits_execution' | 'started' | 'input' | 'condition' | 'actions' | 'watch_transform' | 'aborted' | 'finished' + +export interface WatcherExecutionResult { + actions: WatcherExecutionResultAction[] + condition: WatcherExecutionResultCondition + execution_duration: DurationValue + execution_time: DateTime + input: WatcherExecutionResultInput +} + +export interface WatcherExecutionResultAction { + email?: WatcherEmailResult + id: Id + index?: WatcherIndexResult + logging?: WatcherLoggingResult + pagerduty?: WatcherPagerDutyResult + reason?: string + slack?: WatcherSlackResult + status: WatcherActionStatusOptions + type: WatcherActionType + webhook?: WatcherWebhookResult + error?: ErrorCause +} + +export interface WatcherExecutionResultCondition { + met: boolean + status: WatcherActionStatusOptions + type: WatcherConditionType +} + +export interface WatcherExecutionResultInput { + payload: Record + status: WatcherActionStatusOptions + type: WatcherInputType +} + +export interface WatcherExecutionState { + successful: boolean + timestamp: DateTime + reason?: string +} + +export type WatcherExecutionStatus = 'awaits_execution' | 'checking' | 'execution_not_needed' | 'throttled' | 'executed' | 'failed' | 'deleted_while_queued' | 'not_executed_already_queued' + +export interface WatcherExecutionThreadPool { + /** The largest size of the execution thread pool, which indicates the largest number of concurrent running watches. */ + max_size: long + /** The number of watches that were triggered and are currently queued. */ + queue_size: long +} + +export interface WatcherHourAndMinute { + hour: integer[] + minute: integer[] +} + +export interface WatcherHourlySchedule { + minute: integer[] +} + +export interface WatcherHttpEmailAttachment { + content_type?: string + inline?: boolean + request?: WatcherHttpInputRequestDefinition +} + +export interface WatcherHttpInput { + extract?: string[] + request?: WatcherHttpInputRequestDefinition + response_content_type?: WatcherResponseContentType +} + +export interface WatcherHttpInputAuthentication { + basic: WatcherHttpInputBasicAuthentication +} + +export interface WatcherHttpInputBasicAuthentication { + password: Password + username: Username +} + +export type WatcherHttpInputMethod = 'head' | 'get' | 'post' | 'put' | 'delete' + +export interface WatcherHttpInputProxy { + host: Host + port: uint +} + +export interface WatcherHttpInputRequestDefinition { + auth?: WatcherHttpInputAuthentication + body?: string + connection_timeout?: Duration + headers?: Record + host?: Host + method?: WatcherHttpInputMethod + params?: Record + path?: string + port?: uint + proxy?: WatcherHttpInputProxy + read_timeout?: Duration + scheme?: WatcherConnectionScheme + url?: string +} + +export interface WatcherHttpInputRequestResult extends WatcherHttpInputRequestDefinition { +} + +export interface WatcherHttpInputResponseResult { + body: string + headers: HttpHeaders + status: integer +} + +export interface WatcherIndexAction { + index: IndexName + doc_id?: Id + refresh?: Refresh + op_type?: OpType + timeout?: Duration + execution_time_field?: Field +} + +export interface WatcherIndexResult { + response: WatcherIndexResultSummary +} + +export interface WatcherIndexResultSummary { + created: boolean + id: Id + index: IndexName + result: Result + version: VersionNumber +} + +export interface WatcherInputContainer { + chain?: WatcherChainInput + http?: WatcherHttpInput + search?: WatcherSearchInput + simple?: Record +} + +export type WatcherInputType = 'http' | 'search' | 'simple' + +export interface WatcherLoggingAction { + level?: string + text: string + category?: string +} + +export interface WatcherLoggingResult { + logged_text: string +} + +export type WatcherMonth = 'january' | 'february' | 'march' | 'april' | 'may' | 'june' | 'july' | 'august' | 'september' | 'october' | 'november' | 'december' + +export interface WatcherNeverCondition { +} + +export interface WatcherPagerDutyAction extends WatcherPagerDutyEvent { +} + +export interface WatcherPagerDutyContext { + href?: string + src?: string + type: WatcherPagerDutyContextType +} + +export type WatcherPagerDutyContextType = 'link' | 'image' + +export interface WatcherPagerDutyEvent { + account?: string + attach_payload: boolean + client?: string + client_url?: string + contexts?: WatcherPagerDutyContext[] + /** @alias contexts */ + context?: WatcherPagerDutyContext[] + description: string + event_type?: WatcherPagerDutyEventType + incident_key: string + proxy?: WatcherPagerDutyEventProxy +} + +export interface WatcherPagerDutyEventProxy { + host?: Host + port?: integer +} + +export type WatcherPagerDutyEventType = 'trigger' | 'resolve' | 'acknowledge' + +export interface WatcherPagerDutyResult { + event: WatcherPagerDutyEvent + reason?: string + request?: WatcherHttpInputRequestResult + response?: WatcherHttpInputResponseResult +} + +export type WatcherQuantifier = 'some' | 'all' + +export interface WatcherQueryWatch { + _id: Id + status?: WatcherWatchStatus + watch?: WatcherWatch + _primary_term?: integer + _seq_no?: SequenceNumber +} + +export interface WatcherReportingEmailAttachment { + url: string + inline?: boolean + retries?: integer + interval?: Duration + request?: WatcherHttpInputRequestDefinition +} + +export type WatcherResponseContentType = 'json' | 'yaml' | 'text' + +export interface WatcherScheduleContainer { + timezone?: string + cron?: WatcherCronExpression + daily?: WatcherDailySchedule + hourly?: WatcherHourlySchedule + interval?: Duration + monthly?: WatcherTimeOfMonth | WatcherTimeOfMonth[] + weekly?: WatcherTimeOfWeek | WatcherTimeOfWeek[] + yearly?: WatcherTimeOfYear | WatcherTimeOfYear[] +} + +export type WatcherScheduleTimeOfDay = string | WatcherHourAndMinute + +export interface WatcherScheduleTriggerEvent { + scheduled_time: DateTime + triggered_time?: DateTime +} + +export interface WatcherScriptCondition { + lang?: ScriptLanguage + params?: Record + source?: ScriptSource + id?: string +} + +export interface WatcherSearchInput { + extract?: string[] + request: WatcherSearchInputRequestDefinition + timeout?: Duration +} + +export interface WatcherSearchInputRequestBody { + query: QueryDslQueryContainer +} + +export interface WatcherSearchInputRequestDefinition { + body?: WatcherSearchInputRequestBody + indices?: IndexName[] + indices_options?: IndicesOptions + search_type?: SearchType + template?: WatcherSearchTemplateRequestBody + rest_total_hits_as_int?: boolean +} + +export interface WatcherSearchTemplateRequestBody { + explain?: boolean + /** ID of the search template to use. If no source is specified, + * this parameter is required. */ + id?: Id + params?: Record + profile?: boolean + /** An inline search template. Supports the same parameters as the search API's + * request body. Also supports Mustache variables. If no id is specified, this + * parameter is required. */ + source?: string +} + +export interface WatcherSimulatedActions { + actions: string[] + all: WatcherSimulatedActions + use_all: boolean +} + +export interface WatcherSlackAction { + account?: string + message: WatcherSlackMessage +} + +export interface WatcherSlackAttachment { + author_icon?: string + author_link?: string + author_name: string + color?: string + fallback?: string + fields?: WatcherSlackAttachmentField[] + footer?: string + footer_icon?: string + image_url?: string + pretext?: string + text?: string + thumb_url?: string + title: string + title_link?: string + ts?: EpochTime +} + +export interface WatcherSlackAttachmentField { + short: boolean + title: string + value: string +} + +export interface WatcherSlackDynamicAttachment { + attachment_template: WatcherSlackAttachment + list_path: string +} + +export interface WatcherSlackMessage { + attachments: WatcherSlackAttachment[] + dynamic_attachments?: WatcherSlackDynamicAttachment + from: string + icon?: string + text: string + to: string[] +} + +export interface WatcherSlackResult { + account?: string + message: WatcherSlackMessage +} + +export interface WatcherThrottleState { + reason: string + timestamp: DateTime +} + +export interface WatcherTimeOfMonth { + at: string[] + on: integer[] +} + +export interface WatcherTimeOfWeek { + at: string[] + on: WatcherDay[] +} + +export interface WatcherTimeOfYear { + at: string[] + int: WatcherMonth[] + on: integer[] +} + +export interface WatcherTriggerContainer { + schedule?: WatcherScheduleContainer +} + +export interface WatcherTriggerEventContainer { + schedule?: WatcherScheduleTriggerEvent +} + +export interface WatcherTriggerEventResult { + manual: WatcherTriggerEventContainer + triggered_time: DateTime + type: string +} + +export interface WatcherWatch { + actions: Record + condition: WatcherConditionContainer + input: WatcherInputContainer + metadata?: Metadata + status?: WatcherWatchStatus + throttle_period?: Duration + throttle_period_in_millis?: DurationValue + transform?: TransformContainer + trigger: WatcherTriggerContainer +} + +export interface WatcherWatchStatus { + actions: WatcherActions + last_checked?: DateTime + last_met_condition?: DateTime + state: WatcherActivationState + version: VersionNumber + execution_state?: string +} + +export interface WatcherWebhookAction extends WatcherHttpInputRequestDefinition { +} + +export interface WatcherWebhookResult { + request: WatcherHttpInputRequestResult + response?: WatcherHttpInputResponseResult +} + +export interface WatcherAckWatchRequest extends RequestBase { + /** The watch identifier. */ + watch_id: Name + /** A comma-separated list of the action identifiers to acknowledge. + * If you omit this parameter, all of the actions of the watch are acknowledged. */ + action_id?: Names + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { watch_id?: never, action_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { watch_id?: never, action_id?: never } +} + +export interface WatcherAckWatchResponse { + status: WatcherWatchStatus +} + +export interface WatcherActivateWatchRequest extends RequestBase { + /** The watch identifier. */ + watch_id: Name + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { watch_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { watch_id?: never } +} + +export interface WatcherActivateWatchResponse { + status: WatcherActivationStatus +} + +export interface WatcherDeactivateWatchRequest extends RequestBase { + /** The watch identifier. */ + watch_id: Name + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { watch_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { watch_id?: never } +} + +export interface WatcherDeactivateWatchResponse { + status: WatcherActivationStatus +} + +export interface WatcherDeleteWatchRequest extends RequestBase { + /** The watch identifier. */ + id: Name + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } +} + +export interface WatcherDeleteWatchResponse { + found: boolean + _id: Id + _version: VersionNumber +} + +export interface WatcherExecuteWatchRequest extends RequestBase { + /** The watch identifier. */ + id?: Id + /** Defines whether the watch runs in debug mode. */ + debug?: boolean + /** Determines how to handle the watch actions as part of the watch execution. */ + action_modes?: Record + /** When present, the watch uses this object as a payload instead of executing its own input. */ + alternative_input?: Record + /** When set to `true`, the watch execution uses the always condition. This can also be specified as an HTTP parameter. */ + ignore_condition?: boolean + /** When set to `true`, the watch record representing the watch execution result is persisted to the `.watcher-history` index for the current time. + * In addition, the status of the watch is updated, possibly throttling subsequent runs. + * This can also be specified as an HTTP parameter. */ + record_execution?: boolean + simulated_actions?: WatcherSimulatedActions + /** This structure is parsed as the data of the trigger event that will be used during the watch execution. */ + trigger_data?: WatcherScheduleTriggerEvent + /** When present, this watch is used instead of the one specified in the request. + * This watch is not persisted to the index and `record_execution` cannot be set. */ + watch?: WatcherWatch + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, debug?: never, action_modes?: never, alternative_input?: never, ignore_condition?: never, record_execution?: never, simulated_actions?: never, trigger_data?: never, watch?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, debug?: never, action_modes?: never, alternative_input?: never, ignore_condition?: never, record_execution?: never, simulated_actions?: never, trigger_data?: never, watch?: never } +} + +export interface WatcherExecuteWatchResponse { + /** The watch record identifier as it would be stored in the `.watcher-history` index. */ + _id: Id + /** The watch record document as it would be stored in the `.watcher-history` index. */ + watch_record: WatcherExecuteWatchWatchRecord +} + +export interface WatcherExecuteWatchWatchRecord { + condition: WatcherConditionContainer + input: WatcherInputContainer + messages: string[] + metadata?: Metadata + node: string + result: WatcherExecutionResult + state: WatcherExecutionStatus + trigger_event: WatcherTriggerEventResult + user: Username + watch_id: Id + status?: WatcherWatchStatus +} + +export interface WatcherGetSettingsRequest extends RequestBase { + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never } +} + +export interface WatcherGetSettingsResponse { + index: IndicesIndexSettings +} + +export interface WatcherGetWatchRequest extends RequestBase { + /** The watch identifier. */ + id: Name + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never } +} + +export interface WatcherGetWatchResponse { + found: boolean + _id: Id + status?: WatcherWatchStatus + watch?: WatcherWatch + _primary_term?: integer + _seq_no?: SequenceNumber + _version?: VersionNumber +} + +export interface WatcherPutWatchRequest extends RequestBase { + /** The identifier for the watch. */ + id: Id + /** The initial state of the watch. + * The default value is `true`, which means the watch is active by default. */ + active?: boolean + /** only update the watch if the last operation that has changed the watch has the specified primary term */ + if_primary_term?: long + /** only update the watch if the last operation that has changed the watch has the specified sequence number */ + if_seq_no?: SequenceNumber + /** Explicit version number for concurrency control */ + version?: VersionNumber + /** The list of actions that will be run if the condition matches. */ + actions?: Record + /** The condition that defines if the actions should be run. */ + condition?: WatcherConditionContainer + /** The input that defines the input that loads the data for the watch. */ + input?: WatcherInputContainer + /** Metadata JSON that will be copied into the history entries. */ + metadata?: Metadata + /** The minimum time between actions being run. + * The default is 5 seconds. + * This default can be changed in the config file with the setting `xpack.watcher.throttle.period.default_period`. + * If both this value and the `throttle_period_in_millis` parameter are specified, Watcher uses the last parameter included in the request. */ + throttle_period?: Duration + /** Minimum time in milliseconds between actions being run. Defaults to 5000. If both this value and the throttle_period parameter are specified, Watcher uses the last parameter included in the request. */ + throttle_period_in_millis?: DurationValue + /** The transform that processes the watch payload to prepare it for the watch actions. */ + transform?: TransformContainer + /** The trigger that defines when the watch should run. */ + trigger?: WatcherTriggerContainer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { id?: never, active?: never, if_primary_term?: never, if_seq_no?: never, version?: never, actions?: never, condition?: never, input?: never, metadata?: never, throttle_period?: never, throttle_period_in_millis?: never, transform?: never, trigger?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { id?: never, active?: never, if_primary_term?: never, if_seq_no?: never, version?: never, actions?: never, condition?: never, input?: never, metadata?: never, throttle_period?: never, throttle_period_in_millis?: never, transform?: never, trigger?: never } +} + +export interface WatcherPutWatchResponse { + created: boolean + _id: Id + _primary_term: long + _seq_no: SequenceNumber + _version: VersionNumber +} + +export interface WatcherQueryWatchesRequest extends RequestBase { + /** The offset from the first result to fetch. + * It must be non-negative. */ + from?: integer + /** The number of hits to return. + * It must be non-negative. */ + size?: integer + /** A query that filters the watches to be returned. */ + query?: QueryDslQueryContainer + /** One or more fields used to sort the search results. */ + sort?: Sort + /** Retrieve the next page of hits using a set of sort values from the previous page. */ + search_after?: SortResults + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { from?: never, size?: never, query?: never, sort?: never, search_after?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { from?: never, size?: never, query?: never, sort?: never, search_after?: never } +} + +export interface WatcherQueryWatchesResponse { + /** The total number of watches found. */ + count: integer + /** A list of watches based on the `from`, `size`, or `search_after` request body parameters. */ + watches: WatcherQueryWatch[] +} + +export interface WatcherStartRequest extends RequestBase { + /** Period to wait for a connection to the master node. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never } +} + +export type WatcherStartResponse = AcknowledgedResponseBase + +export interface WatcherStatsRequest extends RequestBase { + /** Defines which additional metrics are included in the response. */ + metric?: WatcherStatsWatcherMetric | WatcherStatsWatcherMetric[] + /** Defines whether stack traces are generated for each watch that is running. */ + emit_stacktraces?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { metric?: never, emit_stacktraces?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { metric?: never, emit_stacktraces?: never } +} + +export interface WatcherStatsResponse { + _nodes: NodeStatistics + cluster_name: Name + manually_stopped: boolean + stats: WatcherStatsWatcherNodeStats[] +} + +export interface WatcherStatsWatchRecordQueuedStats { + /** The time the watch was run. + * This is just before the input is being run. */ + execution_time: DateTime +} + +export interface WatcherStatsWatchRecordStats extends WatcherStatsWatchRecordQueuedStats { + /** The current watch execution phase. */ + execution_phase: WatcherExecutionPhase + /** The time the watch was triggered by the trigger engine. */ + triggered_time: DateTime + executed_actions?: string[] + watch_id: Id + /** The watch record identifier. */ + watch_record_id: Id +} + +export type WatcherStatsWatcherMetric = '_all' | 'all' | 'queued_watches' | 'current_watches' | 'pending_watches' + +export interface WatcherStatsWatcherNodeStats { + /** The current executing watches metric gives insight into the watches that are currently being executed by Watcher. + * Additional information is shared per watch that is currently executing. + * This information includes the `watch_id`, the time its execution started and its current execution phase. + * To include this metric, the `metric` option should be set to `current_watches` or `_all`. + * In addition you can also specify the `emit_stacktraces=true` parameter, which adds stack traces for each watch that is being run. + * These stack traces can give you more insight into an execution of a watch. */ + current_watches?: WatcherStatsWatchRecordStats[] + execution_thread_pool: WatcherExecutionThreadPool + /** Watcher moderates the execution of watches such that their execution won't put too much pressure on the node and its resources. + * If too many watches trigger concurrently and there isn't enough capacity to run them all, some of the watches are queued, waiting for the current running watches to finish.s + * The queued watches metric gives insight on these queued watches. + * + * To include this metric, the `metric` option should include `queued_watches` or `_all`. */ + queued_watches?: WatcherStatsWatchRecordQueuedStats[] + /** The number of watches currently registered. */ + watch_count: long + /** The current state of Watcher. */ + watcher_state: WatcherStatsWatcherState + node_id: Id +} + +export type WatcherStatsWatcherState = 'stopped' | 'starting' | 'started' | 'stopping' + +export interface WatcherStopRequest extends RequestBase { + /** The period to wait for the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never } +} + +export type WatcherStopResponse = AcknowledgedResponseBase + +export interface WatcherUpdateSettingsRequest extends RequestBase { + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ + master_timeout?: Duration + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + 'index.auto_expand_replicas'?: string + 'index.number_of_replicas'?: integer + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never, 'index.auto_expand_replicas'?: never, 'index.number_of_replicas'?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never, timeout?: never, 'index.auto_expand_replicas'?: never, 'index.number_of_replicas'?: never } +} + +export interface WatcherUpdateSettingsResponse { + acknowledged: boolean +} + +export interface XpackInfoBuildInformation { + date: DateTime + hash: string +} + +export interface XpackInfoFeature { + available: boolean + description?: string + enabled: boolean + native_code_info?: XpackInfoNativeCodeInformation +} + +export interface XpackInfoFeatures { + aggregate_metric: XpackInfoFeature + analytics: XpackInfoFeature + ccr: XpackInfoFeature + data_streams: XpackInfoFeature + data_tiers: XpackInfoFeature + enrich: XpackInfoFeature + /** @remarks This property is not supported on Elastic Cloud Serverless. */ + enterprise_search: XpackInfoFeature + eql: XpackInfoFeature + /** @remarks This property is not supported on Elastic Cloud Serverless. */ + esql: XpackInfoFeature + graph: XpackInfoFeature + ilm: XpackInfoFeature + logstash: XpackInfoFeature + logsdb: XpackInfoFeature + ml: XpackInfoFeature + monitoring: XpackInfoFeature + rollup: XpackInfoFeature + runtime_fields?: XpackInfoFeature + searchable_snapshots: XpackInfoFeature + security: XpackInfoFeature + slm: XpackInfoFeature + spatial: XpackInfoFeature + sql: XpackInfoFeature + transform: XpackInfoFeature + /** @remarks This property is not supported on Elastic Cloud Serverless. */ + universal_profiling: XpackInfoFeature + voting_only: XpackInfoFeature + watcher: XpackInfoFeature + /** @remarks This property is not supported on Elastic Cloud Serverless. */ + archive: XpackInfoFeature +} + +export interface XpackInfoMinimalLicenseInformation { + expiry_date_in_millis: EpochTime + mode: LicenseLicenseType + status: LicenseLicenseStatus + type: LicenseLicenseType + uid: string +} + +export interface XpackInfoNativeCodeInformation { + build_hash: string + version: VersionString +} + +export interface XpackInfoRequest extends RequestBase { + /** A comma-separated list of the information categories to include in the response. + * For example, `build,license,features`. */ + categories?: XpackInfoXPackCategory[] + /** If this param is used it must be set to true */ + accept_enterprise?: boolean + /** Defines whether additional human-readable information is included in the response. + * In particular, it adds descriptions and a tag line. */ + human?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { categories?: never, accept_enterprise?: never, human?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { categories?: never, accept_enterprise?: never, human?: never } +} + +export interface XpackInfoResponse { + build: XpackInfoBuildInformation + features: XpackInfoFeatures + license: XpackInfoMinimalLicenseInformation + tagline: string +} + +export type XpackInfoXPackCategory = 'build' | 'features' | 'license' + +export interface XpackUsageAnalytics extends XpackUsageBase { + stats: XpackUsageAnalyticsStatistics +} + +export interface XpackUsageAnalyticsStatistics { + boxplot_usage: long + cumulative_cardinality_usage: long + string_stats_usage: long + top_metrics_usage: long + t_test_usage: long + moving_percentiles_usage: long + normalize_usage: long + rate_usage: long + multi_terms_usage?: long +} + +export interface XpackUsageArchive extends XpackUsageBase { + indices_count: long +} + +export interface XpackUsageAudit extends XpackUsageFeatureToggle { + outputs?: string[] +} + +export interface XpackUsageBase { + available: boolean + enabled: boolean +} + +export interface XpackUsageCcr extends XpackUsageBase { + auto_follow_patterns_count: integer + follower_indices_count: integer +} + +export interface XpackUsageCounter { + active: long + total: long +} + +export interface XpackUsageDataStreams extends XpackUsageBase { + data_streams: long + indices_count: long +} + +export interface XpackUsageDataTierPhaseStatistics { + node_count: long + index_count: long + total_shard_count: long + primary_shard_count: long + doc_count: long + total_size_bytes: long + primary_size_bytes: long + primary_shard_size_avg_bytes: long + primary_shard_size_median_bytes: long + primary_shard_size_mad_bytes: long +} + +export interface XpackUsageDataTiers extends XpackUsageBase { + data_warm: XpackUsageDataTierPhaseStatistics + data_frozen?: XpackUsageDataTierPhaseStatistics + data_cold: XpackUsageDataTierPhaseStatistics + data_content: XpackUsageDataTierPhaseStatistics + data_hot: XpackUsageDataTierPhaseStatistics +} + +export interface XpackUsageDatafeed { + count: long +} + +export interface XpackUsageEql extends XpackUsageBase { + features: XpackUsageEqlFeatures + queries: Record +} + +export interface XpackUsageEqlFeatures { + join: uint + joins: XpackUsageEqlFeaturesJoin + keys: XpackUsageEqlFeaturesKeys + event: uint + pipes: XpackUsageEqlFeaturesPipes + sequence: uint + sequences: XpackUsageEqlFeaturesSequences +} + +export interface XpackUsageEqlFeaturesJoin { + join_queries_two: uint + join_queries_three: uint + join_until: uint + join_queries_five_or_more: uint + join_queries_four: uint +} + +export interface XpackUsageEqlFeaturesKeys { + join_keys_two: uint + join_keys_one: uint + join_keys_three: uint + join_keys_five_or_more: uint + join_keys_four: uint +} + +export interface XpackUsageEqlFeaturesPipes { + pipe_tail: uint + pipe_head: uint +} + +export interface XpackUsageEqlFeaturesSequences { + sequence_queries_three: uint + sequence_queries_four: uint + sequence_queries_two: uint + sequence_until: uint + sequence_queries_five_or_more: uint + sequence_maxspan: uint +} + +export interface XpackUsageFeatureToggle { + enabled: boolean +} + +export interface XpackUsageFlattened extends XpackUsageBase { + field_count: integer +} + +export interface XpackUsageHealthStatistics extends XpackUsageBase { + invocations: XpackUsageInvocations +} + +export interface XpackUsageIlm { + policy_count: integer + policy_stats: XpackUsageIlmPolicyStatistics[] +} + +export interface XpackUsageIlmPolicyStatistics { + indices_managed: integer + phases: XpackUsagePhases +} + +export interface XpackUsageInvocations { + total: long +} + +export interface XpackUsageIpFilter { + http: boolean + transport: boolean +} + +export interface XpackUsageJobUsage { + count: integer + created_by: Record + detectors: MlJobStatistics + forecasts: XpackUsageMlJobForecasts + model_size: MlJobStatistics +} + +export interface XpackUsageMachineLearning extends XpackUsageBase { + datafeeds: Record + /** Job usage statistics. The `_all` entry is always present and gathers statistics for all jobs. */ + jobs: Record + node_count: integer + data_frame_analytics_jobs: XpackUsageMlDataFrameAnalyticsJobs + inference: XpackUsageMlInference +} + +export interface XpackUsageMlCounter { + count: long +} + +export interface XpackUsageMlDataFrameAnalyticsJobs { + memory_usage?: XpackUsageMlDataFrameAnalyticsJobsMemory + _all: XpackUsageMlDataFrameAnalyticsJobsCount + analysis_counts?: XpackUsageMlDataFrameAnalyticsJobsAnalysis + stopped?: XpackUsageMlDataFrameAnalyticsJobsCount +} + +export interface XpackUsageMlDataFrameAnalyticsJobsAnalysis { + classification?: integer + outlier_detection?: integer + regression?: integer +} + +export interface XpackUsageMlDataFrameAnalyticsJobsCount { + count: long +} + +export interface XpackUsageMlDataFrameAnalyticsJobsMemory { + peak_usage_bytes: MlJobStatistics +} + +export interface XpackUsageMlInference { + ingest_processors: Record + trained_models: XpackUsageMlInferenceTrainedModels + deployments?: XpackUsageMlInferenceDeployments +} + +export interface XpackUsageMlInferenceDeployments { + count: integer + inference_counts: MlJobStatistics + model_sizes_bytes: MlJobStatistics + time_ms: XpackUsageMlInferenceDeploymentsTimeMs +} + +export interface XpackUsageMlInferenceDeploymentsTimeMs { + avg: double +} + +export interface XpackUsageMlInferenceIngestProcessor { + num_docs_processed: XpackUsageMlInferenceIngestProcessorCount + pipelines: XpackUsageMlCounter + num_failures: XpackUsageMlInferenceIngestProcessorCount + time_ms: XpackUsageMlInferenceIngestProcessorCount +} + +export interface XpackUsageMlInferenceIngestProcessorCount { + max: long + sum: long + min: long +} + +export interface XpackUsageMlInferenceTrainedModels { + estimated_operations?: MlJobStatistics + estimated_heap_memory_usage_bytes?: MlJobStatistics + count?: XpackUsageMlInferenceTrainedModelsCount + _all: XpackUsageMlCounter + model_size_bytes?: MlJobStatistics +} + +export interface XpackUsageMlInferenceTrainedModelsCount { + total: long + prepackaged: long + other: long + pass_through?: long + regression?: long + classification?: long + ner?: long + text_embedding?: long +} + +export interface XpackUsageMlJobForecasts { + total: long + forecasted_jobs: long +} + +export interface XpackUsageMonitoring extends XpackUsageBase { + collection_enabled: boolean + enabled_exporters: Record +} + +export interface XpackUsagePhase { + actions: string[] + min_age: DurationValue +} + +export interface XpackUsagePhases { + cold?: XpackUsagePhase + delete?: XpackUsagePhase + frozen?: XpackUsagePhase + hot?: XpackUsagePhase + warm?: XpackUsagePhase +} + +export interface XpackUsageQuery { + count?: integer + failed?: integer + paging?: integer + total?: integer +} + +export interface XpackUsageRealm extends XpackUsageBase { + name?: string[] + order?: long[] + size?: long[] + cache?: XpackUsageRealmCache[] + has_authorization_realms?: boolean[] + has_default_username_pattern?: boolean[] + has_truststore?: boolean[] + is_authentication_delegated?: boolean[] +} + +export interface XpackUsageRealmCache { + size: long +} + +export interface XpackUsageRequest extends RequestBase { + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never } +} + +export interface XpackUsageResponse { + aggregate_metric: XpackUsageBase + analytics: XpackUsageAnalytics + archive: XpackUsageArchive + watcher: XpackUsageWatcher + ccr: XpackUsageCcr + data_frame?: XpackUsageBase + data_science?: XpackUsageBase + data_streams?: XpackUsageDataStreams + data_tiers: XpackUsageDataTiers + enrich?: XpackUsageBase + eql: XpackUsageEql + flattened?: XpackUsageFlattened + graph: XpackUsageBase + health_api?: XpackUsageHealthStatistics + ilm: XpackUsageIlm + logstash: XpackUsageBase + ml: XpackUsageMachineLearning + monitoring: XpackUsageMonitoring + rollup: XpackUsageBase + runtime_fields?: XpackUsageRuntimeFieldTypes + spatial: XpackUsageBase + searchable_snapshots: XpackUsageSearchableSnapshots + security: XpackUsageSecurity + slm: XpackUsageSlm + sql: XpackUsageSql + transform: XpackUsageBase + vectors?: XpackUsageVector + voting_only: XpackUsageBase +} + +export interface XpackUsageRoleMapping { + enabled: integer + size: integer +} + +export interface XpackUsageRuntimeFieldTypes extends XpackUsageBase { + field_types: XpackUsageRuntimeFieldsType[] +} + +export interface XpackUsageRuntimeFieldsType { + chars_max: long + chars_total: long + count: long + doc_max: long + doc_total: long + index_count: long + lang: string[] + lines_max: long + lines_total: long + name: Field + scriptless_count: long + shadowed_count: long + source_max: long + source_total: long +} + +export interface XpackUsageSearchableSnapshots extends XpackUsageBase { + indices_count: integer + full_copy_indices_count?: integer + shared_cache_indices_count?: integer +} + +export interface XpackUsageSecurity extends XpackUsageBase { + api_key_service: XpackUsageFeatureToggle + anonymous: XpackUsageFeatureToggle + audit: XpackUsageAudit + fips_140: XpackUsageFeatureToggle + ipfilter: XpackUsageIpFilter + realms: Record + role_mapping: Record + roles: XpackUsageSecurityRoles + ssl: XpackUsageSsl + system_key?: XpackUsageFeatureToggle + token_service: XpackUsageFeatureToggle + operator_privileges: XpackUsageBase +} + +export interface XpackUsageSecurityRoles { + native: XpackUsageSecurityRolesNative + dls: XpackUsageSecurityRolesDls + file: XpackUsageSecurityRolesFile +} + +export interface XpackUsageSecurityRolesDls { + bit_set_cache: XpackUsageSecurityRolesDlsBitSetCache +} + +export interface XpackUsageSecurityRolesDlsBitSetCache { + /** Number of entries in the cache. */ + count: integer + /** Human-readable amount of memory taken up by the cache. */ + memory?: ByteSize + /** Memory taken up by the cache in bytes. */ + memory_in_bytes: ulong + /** Total number of cache hits. */ + hits: long + /** Total number of cache misses. */ + misses: long + /** Total number of cache evictions. */ + evictions: long + /** Total combined time spent in cache for hits in milliseconds. */ + hits_time_in_millis: DurationValue + /** Total combined time spent in cache for misses in milliseconds. */ + misses_time_in_millis: DurationValue +} + +export interface XpackUsageSecurityRolesFile { + dls: boolean + fls: boolean + size: long +} + +export interface XpackUsageSecurityRolesNative { + dls: boolean + fls: boolean + size: long +} + +export interface XpackUsageSlm extends XpackUsageBase { + policy_count?: integer + policy_stats?: SlmStatistics +} + +export interface XpackUsageSql extends XpackUsageBase { + features: Record + queries: Record +} + +export interface XpackUsageSsl { + http: XpackUsageFeatureToggle + transport: XpackUsageFeatureToggle +} + +export interface XpackUsageVector extends XpackUsageBase { + dense_vector_dims_avg_count: integer + dense_vector_fields_count: integer + sparse_vector_fields_count?: integer +} + +export interface XpackUsageWatcher extends XpackUsageBase { + execution: XpackUsageWatcherActions + watch: XpackUsageWatcherWatch + count: XpackUsageCounter +} + +export interface XpackUsageWatcherActionTotals { + total: Duration + total_time_in_ms: DurationValue +} + +export interface XpackUsageWatcherActions { + actions: Record +} + +export interface XpackUsageWatcherWatch { + input: Record + condition?: Record + action?: Record + trigger: XpackUsageWatcherWatchTrigger +} + +export interface XpackUsageWatcherWatchTrigger { + schedule?: XpackUsageWatcherWatchTriggerSchedule + _all: XpackUsageCounter +} + +export interface XpackUsageWatcherWatchTriggerSchedule extends XpackUsageCounter { + cron: XpackUsageCounter + _all: XpackUsageCounter +} + +export interface SpecUtilsAdditionalProperties { +} + +export interface SpecUtilsAdditionalProperty { +} + +export interface SpecUtilsCommonQueryParameters { + /** When set to `true` Elasticsearch will include the full stack trace of errors + * when they occur. */ + error_trace?: boolean + /** Comma-separated list of filters in dot notation which reduce the response + * returned by Elasticsearch. */ + filter_path?: string | string[] + /** When set to `true` will return statistics in a format suitable for humans. + * For example `"exists_time": "1h"` for humans and + * `"exists_time_in_millis": 3600000` for computers. When disabled the human + * readable values will be omitted. This makes sense for responses being consumed + * only by machines. */ + human?: boolean + /** If set to `true` the returned JSON will be "pretty-formatted". Only use + * this option for debugging only. */ + pretty?: boolean +} + +export interface SpecUtilsOverloadOf { +} + +export interface SpecUtilsCommonCatQueryParameters { + /** Specifies the format to return the columnar data in, can be set to + * `text`, `json`, `cbor`, `yaml`, or `smile`. */ + format?: string + /** When set to `true` will output available columns. This option + * can't be combined with any other query string option. */ + help?: boolean + /** When set to `true` will enable verbose output. */ + v?: boolean + /** Sets the units for columns that contain a byte-size value. + * Note that byte-size value units work in terms of powers of 1024. For instance `1kb` means 1024 bytes, not 1000 bytes. + * If omitted, byte-size values are rendered with a suffix such as `kb`, `mb`, or `gb`, chosen such that the numeric value of the column is as small as possible whilst still being at least `1.0`. + * If given, byte-size values are rendered as an integer with no suffix, representing the value of the column in the chosen unit. + * Values that are not an exact multiple of the chosen unit are rounded down. */ + bytes?: Bytes + /** Sets the units for columns that contain a time duration. + * If omitted, time duration values are rendered with a suffix such as `ms`, `s`, `m` or `h`, chosen such that the numeric value of the column is as small as possible whilst still being at least `1.0`. + * If given, time duration values are rendered as an integer with no suffix. + * Values that are not an exact multiple of the chosen unit are rounded down. */ + time?: TimeUnit +} diff --git a/src/client.ts b/src/client.ts new file mode 100644 index 000000000..d8739e87d --- /dev/null +++ b/src/client.ts @@ -0,0 +1,522 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +import process from 'node:process' +import { ConnectionOptions as TlsConnectionOptions } from 'node:tls' +import { URL } from 'node:url' +import buffer from 'node:buffer' +import os from 'node:os' +import { + Transport, + TransportOptions, + UndiciConnection, + WeightedConnectionPool, + CloudConnectionPool, + Serializer, + Diagnostic, + errors, + BaseConnectionPool +} from '@elastic/transport' +import { + HttpAgentOptions, + UndiciAgentOptions, + agentFn, + nodeFilterFn, + nodeSelectorFn, + generateRequestIdFn, + BasicAuth, + ApiKeyAuth, + BearerAuth, + Context +} from '@elastic/transport/lib/types' +import { RedactionOptions } from '@elastic/transport/lib/Transport' +import BaseConnection, { prepareHeaders, ConnectionOptions } from '@elastic/transport/lib/connection/BaseConnection' +import SniffingTransport from './sniffingTransport' +import Helpers from './helpers' +import API from './api' +import packageJson from '../package.json' +import transportPackageJson from '@elastic/transport/package.json' + +const kChild = Symbol('elasticsearchjs-child') +const kInitialOptions = Symbol('elasticsearchjs-initial-options') +export const kAcceptedParams = Symbol('elasticsearchjs-accepted-params') + +let clientVersion: string = packageJson.version +/* istanbul ignore next */ +if (clientVersion.includes('-')) { + // clean prerelease + clientVersion = clientVersion.slice(0, clientVersion.indexOf('-')) + 'p' +} +let transportVersion: string = transportPackageJson.version // eslint-disable-line +/* istanbul ignore next */ +if (transportVersion.includes('-')) { + // clean prerelease + transportVersion = transportVersion.slice(0, transportVersion.indexOf('-')) + 'p' +} +const nodeVersion = process.versions.node + +const serverlessApiVersion = '2023-10-31' + +export interface NodeOptions { + /** @property url Elasticsearch node's location */ + url: URL + id?: string + /** @property agent Custom HTTP agent options */ + agent?: HttpAgentOptions | UndiciAgentOptions + /** @property ssl Overrides default TLS connection settings */ + ssl?: TlsConnectionOptions + /** @property headers Custom HTTP headers that should be sent with each request */ + headers?: Record + /** @property roles Common Elasticsearch roles that can be assigned to this node. Can be helpful when writing custom nodeFilter or nodeSelector functions. */ + roles?: { + master: boolean + data: boolean + ingest: boolean + ml: boolean + } +} + +export interface ClientOptions { + /** @property node Elasticsearch node settings, if there is only one node. Required if `nodes` or `cloud` is not set. */ + node?: string | string[] | NodeOptions | NodeOptions[] + /** @property nodes Elasticsearch node settings, if there are multiple nodes. Required if `node` or `cloud` is not set. */ + nodes?: string | string[] | NodeOptions | NodeOptions[] + /** @property Connection HTTP connection class to use + * @defaultValue `UndiciConnection` */ + Connection?: typeof BaseConnection + /** @property ConnectionPool HTTP connection pool class to use + * @defaultValue `CloudConnectionPool`, if connecting to Elastic Cloud, otherwise `WeightedConnectionPool` */ + ConnectionPool?: typeof BaseConnectionPool + /** @property Transport Elastic transport class to use + * @defaultValue `Transport` */ + Transport?: typeof Transport + /** @property Serializer Serialization class to use + * @defaultValue `Serializer` */ + Serializer?: typeof Serializer + /** @property maxRetries Max number of retries for each request + * @defaultValue 3 */ + maxRetries?: number + /** @property requestTimeout Max request timeout in milliseconds for each request + * @defaultValue No timeout + * @remarks Read [the Elasticsearch docs](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html#_http_client_configuration) about HTTP client configuration for details. */ + requestTimeout?: number + /** @property pingTimeout Max number of milliseconds a `ClusterConnectionPool` will wait when pinging nodes before marking them dead + * @defaultValue 3000 */ + pingTimeout?: number + /** @property sniffInterval Perform a sniff operation every `n` milliseconds + * @remarks Sniffing might not be the best solution for you. Read https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how to learn more. + * @defaultValue false */ + sniffInterval?: number | boolean + /** @property sniffOnStart Perform a sniff once the client is started + * @remarks Sniffing might not be the best solution for you. Read https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how to learn more. + * @defaultValue false */ + sniffOnStart?: boolean + /** @property sniffEndpoint Endpoint to ping during a sniff + * @remarks Sniffing might not be the best solution for you. Read https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how to learn more. + * @defaultValue "_nodes/_all/http" */ + sniffEndpoint?: string + /** @property sniffOnConnectionFault Perform a sniff on connection fault + * @remarks Sniffing might not be the best solution for you. Read https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how to learn more. + * @defaultValue false */ + sniffOnConnectionFault?: boolean + /** @property resurrectStrategy Strategy for resurrecting dead nodes when using `ClusterConnectionPool`. 'ping' will issue a test request to a node and resurrect it if it responds. 'optimistic' marks a node as alive without testing it. 'none` will never attempt to revive a dead connection. + * @defaultValue 'ping' */ + resurrectStrategy?: 'ping' | 'optimistic' | 'none' + /** @property compression Enables gzip request body compression + * @defaultValue `true` if connecting to Elastic Cloud, otherwise `false`. */ + compression?: boolean + /** @property tls [TLS configuraton](https://nodejs.org/api/tls.html) + * @defaultValue null */ + tls?: TlsConnectionOptions + /** @property agent Custom HTTP agent options + * @defaultValue null */ + agent?: HttpAgentOptions | UndiciAgentOptions | agentFn | false + /** @property nodeFilter A custom function used by the connection pool to determine which nodes are qualified to receive a request + * @defaultValue A function that uses the Connection `roles` property to avoid master-only nodes */ + nodeFilter?: nodeFilterFn + /** @property nodeSelector A custom function used by the connection pool to determine which node should receive the next request + * @defaultValue A "round robin" function that loops sequentially through each node in the pool. */ + nodeSelector?: nodeSelectorFn + /** @property headers Custom HTTP headers that should be sent with each request + * @defaultValue An object with a custom `user-agent` header */ + headers?: Record + /** @property opaqueIdPrefix A string prefix to apply to every generated X-Opaque-Id header + * @defaultValue null */ + opaqueIdPrefix?: string + /** @property generateRequestId A custom function for generating unique IDs for each request, to make it easier to associate each API request to a single response + * @defaultValue A function that increments a number counter starting from 1 */ + generateRequestId?: generateRequestIdFn + /** @property name A name for this client + * @defaultValue 'elasticsearch-js' */ + name?: string | symbol + /** @property auth Authentication options for this Elasticsearch cluster + * @defaultValue null */ + auth?: BasicAuth | ApiKeyAuth | BearerAuth + /** @property context A custom object attached to each request that can be used to pass data to client events + * @defaultValue null */ + context?: Context + /** @property proxy A proxy URL that, when provided, the client will automatically send all requests through + * @defaultValue null */ + proxy?: string | URL + /** @property enableMetaHeader If true, adds an header named `x-elastic-client-meta`, containing a small amount of high-level telemetry data, such as the client and platform version + * @defaultValue true */ + enableMetaHeader?: boolean + /** @property cloud Custom configuration for connecting to Elastic Cloud, in lieu of a `node` or `nodes` configuration + * @remarks Read https://www.elastic.co/docs/reference/elasticsearch/clients/javascript/connecting#client-usage for more details + * @defaultValue null */ + cloud?: { + id: string + } + /** @property disablePrototypePoisoningProtection Disables safe JSON parsing that protects execution of prototype poisoning attacks; disabled by default, as it can introduce a performance penalty + * @defaultValue true */ + disablePrototypePoisoningProtection?: boolean | 'proto' | 'constructor' + /** @property caFingerprint If configured, verifies that the fingerprint of the CA certificate that has signed the certificate of the server matches the supplied fingerprint; only accepts SHA256 digest fingerprints + * @defaultValue null */ + caFingerprint?: string + /** @property maxResponseSize When configured, verifies that the uncompressed response size is lower than the configured number. If it's higher, it will abort the request. It cannot be higher than `buffer.constants.MAX_STRING_LENGTH` + * @defaultValue null */ + maxResponseSize?: number + /** @property maxCompressedResponseSize When configured, verifies that the compressed response size is lower than the configured number. If it's higher, it will abort the request. It cannot be higher than `buffer.constants.MAX_LENGTH` + * @defaultValue null */ + maxCompressedResponseSize?: number + /** @property redaction Options for how to redact potentially sensitive data from metadata attached to `Error` objects + * @remarks Read https://www.elastic.co/docs/reference/elasticsearch/clients/javascript/advanced-config#redaction for more details + * @defaultValue Configuration that will replace known sources of sensitive data */ + redaction?: RedactionOptions + /** @property serverMode Setting to "serverless" will change some default behavior, like enabling compression and disabling features that assume the possibility of multiple Elasticsearch nodes. + * @defaultValue "stack", which sets defaults for a traditional (non-serverless) Elasticsearch instance. */ + serverMode?: 'stack' | 'serverless' +} + +export default class Client extends API { + diagnostic: Diagnostic + name: string | symbol + connectionPool: BaseConnectionPool + transport: SniffingTransport + serializer: Serializer + helpers: Helpers + + constructor (opts: ClientOptions) { + super() + + // @ts-expect-error kChild symbol is for internal use only + if ((opts.cloud != null || opts.serverMode === 'serverless') && opts[kChild] === undefined) { + if (opts.cloud != null) { + const { id } = opts.cloud + if (typeof id !== 'string') { + throw new errors.ConfigurationError('Cloud ID must be a string.') + } + + const parts = id.split(':') + if (parts.length !== 2 || parts[1] === '') { + throw new errors.ConfigurationError( + 'Cloud ID must be in the format "name:base64string".' + ) + } + + // the cloud id is `cluster-name:base64encodedurl` + // the url is a string divided by two '$', the first is the cloud url + // the second the elasticsearch instance, the third the kibana instance + + let cloudUrls + try { + cloudUrls = Buffer.from(parts[1], 'base64').toString().split('$') + } catch (err) { + throw new errors.ConfigurationError('Cloud ID base64 decoding failed.') + } + if (cloudUrls.length < 2 || cloudUrls[0] === '' || cloudUrls[1] === '') { + throw new errors.ConfigurationError( + 'Cloud ID base64 must contain at least two "$" separated parts: "$[$]".' + ) + } + + opts.node = `https://${cloudUrls[1]}.${cloudUrls[0]}` + } + + // Cloud has better performance with compression enabled + // see https://github.com/elastic/elasticsearch-py/pull/704. + // So unless the user specifies otherwise, we enable compression. + if (opts.compression == null) opts.compression = true + if (opts.tls == null || + (opts.tls != null && opts.tls.secureProtocol == null)) { + opts.tls = opts.tls ?? {} + opts.tls.secureProtocol = 'TLSv1_2_method' + } + } + + if (opts.node == null && opts.nodes == null) { + throw new errors.ConfigurationError('Missing node(s) option') + } + + // @ts-expect-error kChild symbol is for internal use only + if (opts[kChild] === undefined) { + const checkAuth = getAuth(opts.node ?? opts.nodes) + if ((checkAuth != null) && checkAuth.username !== '' && checkAuth.password !== '') { + opts.auth = Object.assign({}, opts.auth, { username: checkAuth.username, password: checkAuth.password }) + } + } + + const headers: Record = Object.assign({}, { + 'user-agent': `elasticsearch-js/${clientVersion} (${os.platform()} ${os.release()}-${os.arch()}; Node.js ${nodeVersion}; Transport ${transportVersion})` + }, opts.headers ?? {}) + if (opts.serverMode === 'serverless') headers['elastic-api-version'] = serverlessApiVersion + + const redaction = Object.assign({}, { type: 'replace', additionalKeys: [] }, opts.redaction ?? {}) + + const options: Required = Object.assign({}, { + Connection: UndiciConnection, + Transport: opts.serverMode === 'serverless' ? Transport : SniffingTransport, + Serializer, + ConnectionPool: (opts.cloud != null || opts.serverMode === 'serverless') ? CloudConnectionPool : WeightedConnectionPool, + maxRetries: 3, + pingTimeout: 3000, + sniffInterval: false, + sniffOnStart: false, + sniffEndpoint: '_nodes/_all/http', + sniffOnConnectionFault: false, + resurrectStrategy: 'ping', + compression: false, + tls: null, + caFingerprint: null, + agent: null, + nodeFilter: null, + generateRequestId: null, + name: 'elasticsearch-js', + auth: null, + opaqueIdPrefix: null, + context: null, + proxy: null, + enableMetaHeader: true, + maxResponseSize: null, + maxCompressedResponseSize: null, + serverMode: 'stack' + }, opts, { headers, redaction }) + + if (options.caFingerprint != null && isHttpConnection(opts.node ?? opts.nodes)) { + throw new errors.ConfigurationError('You can\'t configure the caFingerprint with a http connection') + } + + if (options.maxResponseSize != null && options.maxResponseSize > buffer.constants.MAX_STRING_LENGTH) { + throw new errors.ConfigurationError(`The maxResponseSize cannot be bigger than ${buffer.constants.MAX_STRING_LENGTH}`) + } + + if (options.maxCompressedResponseSize != null && options.maxCompressedResponseSize > buffer.constants.MAX_LENGTH) { + throw new errors.ConfigurationError(`The maxCompressedResponseSize cannot be bigger than ${buffer.constants.MAX_LENGTH}`) + } + + if (options.enableMetaHeader) { + let clientMeta = `es=${clientVersion},js=${nodeVersion},t=${transportVersion}` + if (options.Connection === UndiciConnection) { + clientMeta += `,un=${nodeVersion}` + } else { + // assumes HttpConnection + clientMeta += `,hc=${nodeVersion}` + } + options.headers['x-elastic-client-meta'] = clientMeta + } + + this.name = options.name + // @ts-expect-error kInitialOptions symbol is for internal use only + this[kInitialOptions] = options + + // @ts-expect-error kChild symbol is for internal use only + if (opts[kChild] !== undefined) { + // @ts-expect-error kChild symbol is for internal use only + this.serializer = opts[kChild].serializer + // @ts-expect-error kChild symbol is for internal use only + this.connectionPool = opts[kChild].connectionPool + // @ts-expect-error kChild symbol is for internal use only + this.diagnostic = opts[kChild].diagnostic + } else { + this.diagnostic = new Diagnostic() + + let serializerOptions + if (opts.disablePrototypePoisoningProtection != null) { + if (typeof opts.disablePrototypePoisoningProtection === 'boolean') { + serializerOptions = { + enablePrototypePoisoningProtection: !opts.disablePrototypePoisoningProtection + } + } else { + serializerOptions = { + enablePrototypePoisoningProtection: opts.disablePrototypePoisoningProtection + } + } + } + this.serializer = new options.Serializer(serializerOptions) + + this.connectionPool = new options.ConnectionPool({ + pingTimeout: options.pingTimeout, + resurrectStrategy: options.resurrectStrategy, + tls: options.tls, + agent: options.agent, + proxy: options.proxy, + Connection: options.Connection, + auth: options.auth, + diagnostic: this.diagnostic, + caFingerprint: options.caFingerprint + }) + + // ensure default connection values are inherited when creating new connections + // see https://github.com/elastic/elasticsearch-js/issues/1791 + let nodes = options.node ?? options.nodes + + // serverless only supports one node, so pick the first one + if (options.serverMode === 'serverless' && Array.isArray(nodes)) { + nodes = nodes[0] + } + + let nodeOptions: Array = Array.isArray(nodes) ? nodes : [nodes] + type ConnectionDefaults = Record + nodeOptions = nodeOptions.map(opt => { + const { tls, headers, auth, requestTimeout: timeout, agent, proxy, caFingerprint } = options + let defaults: ConnectionDefaults = { tls, headers, auth, timeout, agent, proxy, caFingerprint } + + // strip undefined values from defaults + defaults = Object.keys(defaults).reduce((acc: ConnectionDefaults, key) => { + const val = defaults[key] + if (val !== undefined) acc[key] = val + return acc + }, {}) + + let newOpts + if (typeof opt === 'string') { + newOpts = { + url: new URL(opt) + } + } else { + newOpts = opt + } + + return { ...defaults, ...newOpts } + }) + this.connectionPool.addConnection(nodeOptions) + } + + let transportOptions: TransportOptions = { + diagnostic: this.diagnostic, + connectionPool: this.connectionPool, + serializer: this.serializer, + maxRetries: options.maxRetries, + requestTimeout: options.requestTimeout, + compression: options.compression, + headers: options.headers, + generateRequestId: options.generateRequestId, + name: options.name, + opaqueIdPrefix: options.opaqueIdPrefix, + context: options.context, + productCheck: 'Elasticsearch', + maxResponseSize: options.maxResponseSize, + maxCompressedResponseSize: options.maxCompressedResponseSize, + redaction: options.redaction, + /* eslint-disable-next-line @typescript-eslint/prefer-ts-expect-error */ + // @ts-ignore enableMetaHeader will be available in transport v9.1.1 + enableMetaHeader: options.enableMetaHeader + } + if (options.serverMode !== 'serverless') { + transportOptions = Object.assign({}, transportOptions, { + sniffInterval: options.sniffInterval, + sniffOnStart: options.sniffOnStart, + sniffOnConnectionFault: options.sniffOnConnectionFault, + sniffEndpoint: options.sniffEndpoint, + nodeFilter: options.nodeFilter, + nodeSelector: options.nodeSelector, + vendoredHeaders: { + jsonContentType: 'application/vnd.elasticsearch+json; compatible-with=9', + ndjsonContentType: 'application/vnd.elasticsearch+x-ndjson; compatible-with=9', + accept: 'application/vnd.elasticsearch+json; compatible-with=9,text/plain' + } + }) + } + + this.transport = new options.Transport(transportOptions) + + this.helpers = new Helpers({ + client: this, + metaHeader: options.enableMetaHeader + ? `es=${clientVersion},js=${nodeVersion},t=${transportVersion},hc=${nodeVersion}` + : null, + maxRetries: options.maxRetries + }) + } + + /** + * Creates a child client instance that shared its connection pool with the parent client + * @see {@link https://www.elastic.co/docs/reference/elasticsearch/clients/javascript/child} + */ + child (opts: ClientOptions): Client { + // Merge the new options with the initial ones + // @ts-expect-error kChild symbol is for internal use only + const options: ClientOptions = Object.assign({}, this[kInitialOptions], opts) + // Pass to the child client the parent instances that cannot be overridden + // @ts-expect-error kInitialOptions symbol is for internal use only + options[kChild] = { + connectionPool: this.connectionPool, + serializer: this.serializer, + diagnostic: this.diagnostic, + initialOptions: options + } + + /* istanbul ignore else */ + if (options.auth !== undefined) { + options.headers = prepareHeaders(options.headers, options.auth) + } + + return new Client(options) + } + + /** + * Closes all connections in the connection pool. Connections shared with any parent or child instances will also be closed. + */ + async close (): Promise { + return await this.connectionPool.empty() + } +} + +function isHttpConnection (node?: string | string[] | NodeOptions | NodeOptions[]): boolean { + if (Array.isArray(node)) { + return node.some((n) => (typeof n === 'string' ? new URL(n).protocol : n.url.protocol) === 'http:') + } else { + if (node == null) return false + return (typeof node === 'string' ? new URL(node).protocol : node.url.protocol) === 'http:' + } +} + +function getAuth (node?: string | string[] | NodeOptions | NodeOptions[]): { username: string, password: string } | null { + if (Array.isArray(node)) { + for (const url of node) { + const auth = getUsernameAndPassword(url) + if (auth != null && auth.username !== '' && auth.password !== '') { + return auth + } + } + + return null + } else { + const auth = getUsernameAndPassword(node) + if (auth != null && auth.username !== '' && auth.password !== '') { + return auth + } + + return null + } + + function getUsernameAndPassword (node?: string | NodeOptions): { username: string, password: string } | null { + /* istanbul ignore else */ + if (typeof node === 'string') { + const { username, password } = new URL(node) + return { + username: decodeURIComponent(username), + password: decodeURIComponent(password) + } + } else if (node != null && node.url instanceof URL) { + return { + username: decodeURIComponent(node.url.username), + password: decodeURIComponent(node.url.password) + } + } else { + return null + } + } +} diff --git a/src/helpers.ts b/src/helpers.ts new file mode 100644 index 000000000..e8a64545a --- /dev/null +++ b/src/helpers.ts @@ -0,0 +1,1050 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable @typescript-eslint/naming-convention */ +/* eslint-disable @typescript-eslint/promise-function-async */ +/* eslint-disable @typescript-eslint/no-unnecessary-type-assertion */ + +import assert from 'node:assert' +import * as timersPromises from 'node:timers/promises' +import { Readable } from 'node:stream' +import { errors, TransportResult, TransportRequestOptions, TransportRequestOptionsWithMeta } from '@elastic/transport' +import { Table, TypeMap, tableFromIPC, AsyncRecordBatchStreamReader } from 'apache-arrow/Arrow.node' +import Client from './client' +import * as T from './api/types' +import { Id } from './api/types' + +export interface HelpersOptions { + client: Client + metaHeader: string | null + maxRetries: number +} + +export interface ScrollSearchOptions extends TransportRequestOptions { + wait?: number +} + +export interface ScrollSearchResponse extends TransportResult, unknown> { + clear: () => Promise + documents: TDocument[] +} + +export interface MsearchHelperOptions extends T.MsearchRequest { + operations?: number + flushInterval?: number + concurrency?: number + retries?: number + wait?: number +} + +export interface MsearchHelper extends Promise { + stop: (error?: Error | null) => void + search: (header: T.MsearchMultisearchHeader, body: T.SearchSearchRequestBody) => Promise> +} + +export interface MsearchHelperResponse { + body: T.SearchResponse + documents: TDocument[] + status: number + responses: T.MsearchResponse +} + +export interface BulkStats { + total: number + failed: number + retry: number + successful: number + noop: number + time: number + bytes: number + aborted: boolean +} + +interface IndexActionOperation { + index: T.BulkIndexOperation +} + +interface CreateActionOperation { + create: T.BulkCreateOperation +} + +interface UpdateActionOperation { + update: T.BulkUpdateOperation +} + +interface DeleteAction { + delete: T.BulkDeleteOperation +} + +type CreateAction = CreateActionOperation | [CreateActionOperation, unknown] +type IndexAction = IndexActionOperation | [IndexActionOperation, unknown] +type UpdateAction = [UpdateActionOperation, T.BulkUpdateAction] +type Action = IndexAction | CreateAction | UpdateAction | DeleteAction + +export interface OnDropDocument { + status: number + operation: Action + error: T.ErrorCause | null + document: TDocument + retried: boolean +} + +type BulkResponseItem = Partial> + +export interface OnSuccessDocument { + result: BulkResponseItem + document?: TDocument +} + +interface ZippedResult { + result: BulkResponseItem + raw: { + action: string + document?: string + } + // this is a function so that deserialization is only done when needed + // to avoid a performance hit + document?: () => TDocument +} + +export interface BulkHelperOptions extends T.BulkRequest { + datasource: TDocument[] | Buffer | Readable | AsyncIterator + onDocument: (doc: TDocument) => Action + flushBytes?: number + flushInterval?: number + concurrency?: number + retries?: number + wait?: number + onDrop?: (doc: OnDropDocument) => void + onSuccess?: (doc: OnSuccessDocument) => void + refreshOnCompletion?: boolean | string +} + +export interface BulkHelper extends Promise { + abort: () => BulkHelper + readonly stats: BulkStats +} + +export interface EsqlColumn { + name: string + type: string +} + +export interface EsqlHelper { + toRecords: () => Promise> + toArrowTable: () => Promise> + toArrowReader: () => Promise +} + +export interface EsqlToRecords { + columns: EsqlColumn[] + records: TDocument[] +} + +const { ResponseError, ConfigurationError } = errors +const sleep = timersPromises.setTimeout +const pImmediate = timersPromises.setImmediate +/* istanbul ignore next */ +const noop = (): void => {} +const kClient = Symbol('elasticsearch-client') +const kMetaHeader = Symbol('meta header') +const kMaxRetries = Symbol('max retries') + +export default class Helpers { + [kClient]: Client + [kMetaHeader]: string | null + [kMaxRetries]: number + constructor (opts: HelpersOptions) { + this[kClient] = opts.client + this[kMetaHeader] = opts.metaHeader + this[kMaxRetries] = opts.maxRetries + } + + /** + * Runs a search operation. The only difference between client.search and this utility, + * is that we are only returning the hits to the user and not the full ES response. + * This helper automatically adds `filter_path=hits.hits._source` to the querystring, + * as it will only need the documents source. + * @param {object} params - The Elasticsearch's search parameters. + * @param {object} options - The client optional configuration for this request. + * @return {array} The documents that matched the request. + */ + async search (params: T.SearchRequest, options: TransportRequestOptions = {}): Promise> { + appendFilterPath('hits.hits._id,hits.hits._source', params, true) + options.meta = true + const { body: result } = await this[kClient].search(params, options as TransportRequestOptionsWithMeta) + if (result.hits?.hits != null) { + return result.hits.hits.map(d => ({ + // Starting with version 8.14.0, _id is optional, but in our case it's always present. + // See @es_quirk documentation in elasticsearch-specification/specification/_global/search/_types/hits.ts + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + _id: d._id!, + ...(d._source as TDocument) + })) + } + return [] + } + + /** + * Runs a scroll search operation. This function returns an async iterator, allowing + * the user to use a for await loop to get all the results of a given search. + * ```js + * for await (const result of client.helpers.scrollSearch({ params })) { + * console.log(result) + * } + * ``` + * Each result represents the entire body of a single scroll search request, + * if you just need to scroll the results, use scrollDocuments. + * This function handles automatically retries on 429 status code. + * @param {object} params - The Elasticsearch's search parameters. + * @param {object} options - The client optional configuration for this request. + * @return {iterator} the async iterator + */ + async * scrollSearch (params: T.SearchRequest, options: ScrollSearchOptions = {}): AsyncIterable> { + options.meta = true + if (this[kMetaHeader] !== null) { + options.headers = options.headers ?? {} + options.headers['x-elastic-client-meta'] = `${this[kMetaHeader] as string},h=s` + } + const wait = options.wait ?? 5000 + const maxRetries = options.maxRetries ?? this[kMaxRetries] + if (Array.isArray(options.ignore)) { + options.ignore.push(429) + } else { + options.ignore = [429] + } + params.scroll = params.scroll ?? '1m' + appendFilterPath('_scroll_id', params, false) + + let response: TransportResult, unknown> | undefined + for (let i = 0; i <= maxRetries; i++) { + response = await this[kClient].search(params, options as TransportRequestOptionsWithMeta) + if (response.statusCode !== 429) break + await sleep(wait) + } + assert(response !== undefined, 'The response is undefined, please file a bug report') + + const { redaction = { type: 'replace' } } = options + const errorOptions = { redaction } + if (response.statusCode === 429) { + throw new ResponseError(response, errorOptions) + } + + let scroll_id = response.body._scroll_id + let stop = false + const clear = async (): Promise => { + stop = true + await this[kClient].clearScroll( + { scroll_id }, + { ignore: [400], ...options } + ) + } + + while (response.body.hits != null && response.body.hits.hits.length > 0) { + // scroll id is always present in the response, but it might + // change over time based on the number of shards + scroll_id = response.body._scroll_id + // @ts-expect-error + response.clear = clear + addDocumentsGetter(response) + + // @ts-expect-error + yield response + + if (stop) { + break + } + + for (let i = 0; i <= maxRetries; i++) { + const r = await this[kClient].scroll({ + scroll: params.scroll, + rest_total_hits_as_int: params.rest_total_hits_as_int, + scroll_id + }, options as TransportRequestOptionsWithMeta) + response = r as TransportResult, unknown> + assert(response !== undefined, 'The response is undefined, please file a bug report') + if (response.statusCode !== 429) break + await sleep(wait) + } + if (response.statusCode === 429) { + throw new ResponseError(response, errorOptions) + } + } + + if (!stop) { + await clear() + } + } + + /** + * Runs a scroll search operation. This function returns an async iterator, allowing + * the user to use a for await loop to get all the documents of a given search. + * ```js + * for await (const document of client.helpers.scrollSearch({ params })) { + * console.log(document) + * } + * ``` + * Each document is what you will find by running a scrollSearch and iterating on the hits array. + * This helper automatically adds `filter_path=hits.hits._source` to the querystring, + * as it will only need the documents source. + * @param {object} params - The Elasticsearch's search parameters. + * @param {object} options - The client optional configuration for this request. + * @return {iterator} the async iterator + */ + async * scrollDocuments (params: T.SearchRequest, options: ScrollSearchOptions = {}): AsyncIterable { + appendFilterPath('hits.hits._source', params, true) + for await (const { documents } of this.scrollSearch(params, options)) { + for (const document of documents) { + yield document + } + } + } + + /** + * Creates a msearch helper instance. Once you configure it, you can use the provided + * `search` method to add new searches in the queue. + * @param {object} options - The configuration of the msearch operations. + * @param {object} reqOptions - The client optional configuration for this request. + * @return {object} The possible operations to run. + */ + msearch (options: MsearchHelperOptions = {}, reqOptions: TransportRequestOptions = {}): MsearchHelper { + const client = this[kClient] + const { + operations = 5, + concurrency = 5, + flushInterval = 500, + retries = this[kMaxRetries], + wait = 5000, + ...msearchOptions + } = options + reqOptions.meta = true + + const { redaction = { type: 'replace' } } = reqOptions + const errorOptions = { redaction } + + let stopReading = false + let stopError: Error | null = null + let timeoutRef = null + const operationsStream = new Readable({ + objectMode: true, + read (size) {} + }) + + const p = iterate() + const helper: MsearchHelper = { + [Symbol.toStringTag]: 'Promise', + then (onFulfilled: any, onRejected?: any) { + return p.then(onFulfilled, onRejected) + }, + catch (onRejected: any) { + return p.catch(onRejected) + }, + finally (onFinally: any) { + return p.finally(onFinally) + }, + stop (error = null) { + if (stopReading) return + stopReading = true + stopError = error + operationsStream.push(null) + }, + // TODO: support abort a single search? + // NOTE: the validation checks are synchronous and the callback/promise will + // be resolved in the same tick. We might want to fix this in the future. + search (header: T.MsearchMultisearchHeader, body: T.SearchSearchRequestBody): Promise> { + if (stopReading) { + const error = stopError === null + ? new ConfigurationError('The msearch processor has been stopped') + : stopError + return Promise.reject(error) + } + + if (!(typeof header === 'object' && header !== null && !Array.isArray(header))) { + return Promise.reject(new ConfigurationError('The header should be an object')) + } + + if (!(typeof body === 'object' && body !== null && !Array.isArray(body))) { + return Promise.reject(new ConfigurationError('The body should be an object')) + } + + let onFulfilled: any = null + let onRejected: any = null + const promise = new Promise>((resolve, reject) => { + onFulfilled = resolve + onRejected = reject + }) + const callback = function callback (err: Error | null, result: T.MsearchResponse): void { + err !== null ? onRejected(err) : onFulfilled(result) + } + + operationsStream.push([header, body, callback]) + return promise + } + } + + return helper + + async function iterate (): Promise { + const { semaphore, finish } = buildSemaphore() + const msearchBody: Array = [] + const callbacks: any[] = [] + let loadedOperations = 0 + timeoutRef = setTimeout(onFlushTimeout, flushInterval) // eslint-disable-line + + try { + for await (const operation of operationsStream) { + timeoutRef.refresh() + loadedOperations += 1 + msearchBody.push(operation[0], operation[1]) + callbacks.push(operation[2]) + if (loadedOperations >= operations) { + const send = await semaphore() + send(msearchBody.slice(), callbacks.slice()) + msearchBody.length = 0 + callbacks.length = 0 + loadedOperations = 0 + } + } + } finally { + clearTimeout(timeoutRef) + } + + // In some cases the previos http call does not have finished, + // or we didn't reach the flush bytes threshold, so we force one last operation. + if (loadedOperations > 0) { + const send = await semaphore() + send(msearchBody, callbacks) + } + + await finish() + + if (stopError !== null) { + throw stopError + } + + async function onFlushTimeout (): Promise { + if (loadedOperations === 0) return + const msearchBodyCopy = msearchBody.slice() + const callbacksCopy = callbacks.slice() + msearchBody.length = 0 + callbacks.length = 0 + loadedOperations = 0 + try { + const send = await semaphore() + send(msearchBodyCopy, callbacksCopy) + } catch (err) { + /* istanbul ignore next */ + // @ts-expect-error + helper.stop(err) + } + } + } + + // This function builds a semaphore using the concurrency + // options of the msearch helper. It is used inside the iterator + // to guarantee that no more than the number of operations + // allowed to run at the same time are executed. + // It returns a semaphore function which resolves in the next tick + // if we didn't reach the maximim concurrency yet, otherwise it returns + // a promise that resolves as soon as one of the running request has finshed. + // The semaphore function resolves a send function, which will be used + // to send the actual msearch request. + // It also returns a finish function, which returns a promise that is resolved + // when there are no longer request running. + function buildSemaphore (): { semaphore: () => Promise, finish: () => Promise } { + let resolveSemaphore: ((value?: any) => void) | null = null + let resolveFinish: ((value?: any) => void) | null = null + let running = 0 + + return { semaphore, finish } + + function finish (): Promise { + return new Promise((resolve, reject) => { + if (running === 0) { + resolve() + } else { + resolveFinish = resolve + } + }) + } + + function semaphore (): Promise { + if (running < concurrency) { + running += 1 + return pImmediate(send) + } else { + return new Promise((resolve, reject) => { + resolveSemaphore = resolve + }) + } + } + + function send (msearchBody: Array, callbacks: any[]): void { + /* istanbul ignore if */ + if (running > concurrency) { + throw new Error('Max concurrency reached') + } + msearchOperation(msearchBody, callbacks, () => { + running -= 1 + if (resolveSemaphore !== null) { + running += 1 + resolveSemaphore(send) + resolveSemaphore = null + } else if (resolveFinish != null && running === 0) { + resolveFinish() + } + }) + } + } + + function msearchOperation (msearchBody: Array, callbacks: any[], done: () => void): void { + let retryCount = retries + + // Instead of going full on async-await, which would make the code easier to read, + // we have decided to use callback style instead. + // This because every time we use async await, V8 will create multiple promises + // behind the scenes, making the code slightly slower. + tryMsearch(msearchBody, callbacks, retrySearch) + function retrySearch (msearchBody: Array, callbacks: any[]): void { + if (msearchBody.length > 0 && retryCount > 0) { + retryCount -= 1 + setTimeout(tryMsearch, wait, msearchBody, callbacks, retrySearch) + return + } + + done() + } + + // This function never returns an error, if the msearch operation fails, + // the error is dispatched to all search executors. + function tryMsearch (msearchBody: Array, callbacks: any[], done: (msearchBody: Array, callbacks: any[]) => void): void { + client.msearch(Object.assign({}, msearchOptions, { body: msearchBody }), reqOptions as TransportRequestOptionsWithMeta) + .then(results => { + const retryBody = [] + const retryCallbacks = [] + const { responses } = results.body + for (let i = 0, len = responses.length; i < len; i++) { + const response = responses[i] + if (response.status === 429 && retryCount > 0) { + retryBody.push(msearchBody[i * 2]) + retryBody.push(msearchBody[(i * 2) + 1]) + retryCallbacks.push(callbacks[i]) + continue + } + const result = { ...results, body: response } + // @ts-expect-error + addDocumentsGetter(result) + if (response.status != null && response.status >= 400) { + callbacks[i](new ResponseError(result, errorOptions), result) + } else { + callbacks[i](null, result) + } + } + done(retryBody, retryCallbacks) + }) + .catch(err => { + for (const callback of callbacks) { + callback(err, null) + } + return done([], []) + }) + } + } + } + + /** + * Creates a bulk helper instance. Once you configure it, you can pick which operation + * to execute with the given dataset, index, create, update, and delete. + * @param {object} options - The configuration of the bulk operation. + * @param {object} reqOptions - The client optional configuration for this request. + * @return {object} The possible operations to run with the datasource. + */ + bulk (options: BulkHelperOptions, reqOptions: TransportRequestOptions = {}): BulkHelper { + assert(!(reqOptions.asStream ?? false), 'bulk helper: the asStream request option is not supported') + + const client = this[kClient] + const { serializer } = client + if (this[kMetaHeader] !== null) { + reqOptions.headers = reqOptions.headers ?? {} + reqOptions.headers['x-elastic-client-meta'] = `${this[kMetaHeader] as string},h=bp` + } + reqOptions.meta = true + const { + datasource, + onDocument, + flushBytes = 5000000, + flushInterval = 30000, + concurrency = 5, + retries = this[kMaxRetries], + wait = 5000, + onDrop = noop, + // onSuccess does not default to noop, to avoid the performance hit + // of deserializing every document in the bulk request + onSuccess, + refreshOnCompletion = false, + ...bulkOptions + } = options + + if (datasource === undefined) { + // @ts-expect-error + return Promise.reject(new ConfigurationError('bulk helper: the datasource is required')) + } + if (!(Array.isArray(datasource) || Buffer.isBuffer(datasource) || isReadableStream(datasource) || isAsyncIterator(datasource))) { + // @ts-expect-error + return Promise.reject(new ConfigurationError('bulk helper: the datasource must be an array or a buffer or a readable stream or an async generator')) + } + if (onDocument === undefined) { + // @ts-expect-error + return Promise.reject(new ConfigurationError('bulk helper: the onDocument callback is required')) + } + + let shouldAbort = false + let timeoutRef: any = null + const stats = { + total: 0, + failed: 0, + retry: 0, + successful: 0, + noop: 0, + time: 0, + bytes: 0, + aborted: false + } + + const p = iterate() + const helper: BulkHelper = { + [Symbol.toStringTag]: 'Promise', + then (onFulfilled: any, onRejected?: any) { + return p.then(onFulfilled, onRejected) + }, + catch (onRejected: any) { + return p.catch(onRejected) + }, + finally (onFinally: any) { + return p.finally(onFinally) + }, + get stats () { + return stats + }, + abort () { + clearTimeout(timeoutRef) + shouldAbort = true + stats.aborted = true + return this + } + } + + return helper + + /** + * Function that iterates over the given datasource and start a bulk operation as soon + * as it reaches the configured bulk size. It's designed to use the Node.js asynchronous + * model at this maximum capacity, as it will collect the next body to send while there is + * a running http call. In this way, the CPU time will be used carefully. + * The objects will be serialized right away, to approximate the byte length of the body. + * It creates an array of strings instead of a ndjson string because the bulkOperation + * will navigate the body for matching failed operations with the original document. + */ + async function iterate (): Promise { + const { semaphore, finish } = buildSemaphore() + const startTime = Date.now() + const bulkBody: string[] = [] + let actionBody = '' + let payloadBody = '' + let chunkBytes = 0 + timeoutRef = setTimeout(onFlushTimeout, flushInterval) // eslint-disable-line + + // @ts-expect-error datasource is an iterable + for await (const chunk of datasource) { + if (shouldAbort) break + timeoutRef.refresh() + const result = onDocument(chunk) + const [action, payload] = Array.isArray(result) ? result : [result, chunk] + const operation = Object.keys(action)[0] + if (operation === 'index' || operation === 'create') { + actionBody = serializer.serialize(action) + payloadBody = typeof payload === 'string' + ? payload + : serializer.serialize(payload) + chunkBytes += Buffer.byteLength(actionBody) + Buffer.byteLength(payloadBody) + bulkBody.push(actionBody, payloadBody) + } else if (operation === 'update') { + actionBody = serializer.serialize(action) + payloadBody = typeof chunk === 'string' + ? `{"doc":${chunk}}` + : serializer.serialize({ doc: chunk, ...payload }) + chunkBytes += Buffer.byteLength(actionBody) + Buffer.byteLength(payloadBody) + bulkBody.push(actionBody, payloadBody) + } else if (operation === 'delete') { + actionBody = serializer.serialize(action) + chunkBytes += Buffer.byteLength(actionBody) + bulkBody.push(actionBody) + } else { + clearTimeout(timeoutRef) + throw new ConfigurationError(`Bulk helper invalid action: '${operation}'`) + } + + if (chunkBytes >= flushBytes) { + stats.bytes += chunkBytes + const bulkBodyCopy = bulkBody.slice() + bulkBody.length = 0 + chunkBytes = 0 + const send = await semaphore() + send(bulkBodyCopy) + } + } + + clearTimeout(timeoutRef) + // In some cases the previous http call has not finished, + // or we didn't reach the flush bytes threshold, so we force one last operation. + if (!shouldAbort && chunkBytes > 0) { + const send = await semaphore() + stats.bytes += chunkBytes + send(bulkBody) + } + + await finish() + + if (refreshOnCompletion !== false) { + await client.indices.refresh({ + index: typeof refreshOnCompletion === 'string' + ? refreshOnCompletion + : '_all' + }, reqOptions) + } + + stats.time = Date.now() - startTime + stats.total = stats.successful + stats.failed + + return stats + + async function onFlushTimeout (): Promise { + if (chunkBytes === 0) return + stats.bytes += chunkBytes + const bulkBodyCopy = bulkBody.slice() + bulkBody.length = 0 + chunkBytes = 0 + try { + const send = await semaphore() + send(bulkBodyCopy) + } catch (err: any) { + /* istanbul ignore next */ + helper.abort() // eslint-disable-line + } + } + } + + // This function builds a semaphore using the concurrency + // options of the bulk helper. It is used inside the iterator + // to guarantee that no more than the number of operations + // allowed to run at the same time are executed. + // It returns a semaphore function which resolves in the next tick + // if we didn't reach the maximum concurrency yet, otherwise it returns + // a promise that resolves as soon as one of the running requests has finished. + // The semaphore function resolves a send function, which will be used + // to send the actual bulk request. + // It also returns a finish function, which returns a promise that is resolved + // when there are no longer request running. It rejects an error if one + // of the request has failed for some reason. + function buildSemaphore (): { semaphore: () => Promise, finish: () => Promise } { + let resolveSemaphore: ((value?: any) => void) | null = null + let resolveFinish: ((value?: any) => void) | null = null + let rejectFinish: ((value?: any) => void) | null = null + let error: Error | null = null + let running = 0 + + return { semaphore, finish } + + function finish (): Promise { + return new Promise((resolve, reject) => { + if (running === 0) { + if (error !== null) { + reject(error) + } else { + resolve() + } + } else { + resolveFinish = resolve + rejectFinish = reject + } + }) + } + + function semaphore (): Promise { + if (running < concurrency) { + running += 1 + return pImmediate(send) + } else { + return new Promise((resolve, reject) => { + resolveSemaphore = resolve + }) + } + } + + function send (bulkBody: string[]): void { + /* istanbul ignore if */ + if (running > concurrency) { + throw new Error('Max concurrency reached') + } + bulkOperation(bulkBody, err => { + running -= 1 + if (err != null) { + shouldAbort = true + error = err + } + if (resolveSemaphore !== null) { + running += 1 + resolveSemaphore(send) + resolveSemaphore = null + } else if (resolveFinish != null && rejectFinish != null && running === 0) { + if (error != null) { + rejectFinish(error) + } else { + resolveFinish() + } + } + }) + } + } + + function bulkOperation (bulkBody: string[], callback: (err?: Error | null) => void): void { + let retryCount = retries + let isRetrying = false + + // Instead of going full on async-await, which would make the code easier to read, + // we have decided to use callback style instead. + // This because every time we use async await, V8 will create multiple promises + // behind the scenes, making the code slightly slower. + tryBulk(bulkBody, retryDocuments) + function retryDocuments (err: Error | null, bulkBody: string[]): void { + if (err != null) return callback(err) + if (shouldAbort) return callback() + + if (bulkBody.length > 0) { + if (retryCount > 0) { + isRetrying = true + retryCount -= 1 + stats.retry += bulkBody.length + setTimeout(tryBulk, wait, bulkBody, retryDocuments) + return + } + for (let i = 0, len = bulkBody.length; i < len; i = i + 2) { + const operation = Object.keys(serializer.deserialize(bulkBody[i]))[0] + onDrop({ + status: 429, + error: null, + operation: serializer.deserialize(bulkBody[i]), + // @ts-expect-error + document: operation !== 'delete' + ? serializer.deserialize(bulkBody[i + 1]) + /* istanbul ignore next */ + : null, + retried: isRetrying + }) + stats.failed += 1 + } + } + callback() + } + + /** + * Zips bulk response items (the action's result) with the original document body. + * The raw string version of action and document lines are also included. + */ + function zipBulkResults (responseItems: BulkResponseItem[], bulkBody: string[]): ZippedResult[] { + const zipped = [] + let indexSlice = 0 + for (let i = 0, len = responseItems.length; i < len; i++) { + const result = responseItems[i] + const operation = Object.keys(result)[0] + let zipResult + + if (operation === 'delete') { + zipResult = { + result, + raw: { action: bulkBody[indexSlice] } + } + indexSlice += 1 + } else { + const document = bulkBody[indexSlice + 1] + zipResult = { + result, + raw: { action: bulkBody[indexSlice], document }, + // this is a function so that deserialization is only done when needed + // to avoid a performance hit + document: () => serializer.deserialize(document) + } + indexSlice += 2 + } + + zipped.push(zipResult as ZippedResult) + } + + return zipped + } + + function tryBulk (bulkBody: string[], callback: (err: Error | null, bulkBody: string[]) => void): void { + if (shouldAbort) return callback(null, []) + client.bulk(Object.assign({}, bulkOptions, { operations: bulkBody }), reqOptions as TransportRequestOptionsWithMeta) + .then(response => { + const result = response.body + const results = zipBulkResults(result.items, bulkBody) + + if (!result.errors) { + stats.successful += result.items.length + for (const item of results) { + const { result, document = noop } = item + if (result.update?.result === 'noop') { + stats.noop++ + } + if (onSuccess != null) onSuccess({ result, document: document() }) + } + return callback(null, []) + } + const retry = [] + for (const item of results) { + const { result, raw, document = noop } = item + const operation = Object.keys(result)[0] + // @ts-expect-error + const responseItem = result[operation as keyof T.BulkResponseItemContainer] + assert(responseItem !== undefined, 'The responseItem is undefined, please file a bug report') + + if (responseItem.status >= 400) { + // 429 is the only status code where we might want to retry + // a document, because it was not an error in the document itself, + // but the ES node was handling too many operations. + if (responseItem.status === 429) { + retry.push(raw.action) + /* istanbul ignore next */ + if (operation !== 'delete') { + retry.push(raw.document ?? '') + } + } else { + onDrop({ + status: responseItem.status, + error: responseItem.error ?? null, + operation: serializer.deserialize(raw.action), + // @ts-expect-error + document: document(), + retried: isRetrying + }) + stats.failed += 1 + } + } else { + stats.successful += 1 + if (onSuccess != null) onSuccess({ result, document: document() }) + } + } + callback(null, retry) + }) + .catch(err => { + callback(err, []) + }) + } + } + } + + /** + * Creates an ES|QL helper instance, to help transform the data returned by an ES|QL query into easy-to-use formats. + * @param {object} params - Request parameters sent to esql.query() + * @returns {object} EsqlHelper instance + */ + esql (params: T.EsqlQueryRequest, reqOptions: TransportRequestOptions = {}): EsqlHelper { + const client = this[kClient] + + function toRecords (response: T.EsqlEsqlResult): TDocument[] { + const { columns, values } = response + return values.map(row => { + const doc: Partial = {} + row.forEach((cell, index) => { + const { name } = columns[index] + // @ts-expect-error + doc[name] = cell + }) + return doc as TDocument + }) + } + + const metaHeader = this[kMetaHeader] + + const helper: EsqlHelper = { + /** + * Pivots ES|QL query results into an array of row objects, rather than the default format where each row is an array of values. + */ + async toRecords(): Promise> { + if (metaHeader !== null) { + reqOptions.headers = reqOptions.headers ?? {} + reqOptions.headers['x-elastic-client-meta'] = `${metaHeader as string},h=qo` + } + + params.format = 'json' + params.columnar = false + const response = await client.esql.query(params, reqOptions) + const records: TDocument[] = toRecords(response) + const { columns } = response + return { records, columns } + }, + + async toArrowTable (): Promise> { + if (metaHeader !== null) { + reqOptions.headers = reqOptions.headers ?? {} + reqOptions.headers['x-elastic-client-meta'] = `${metaHeader as string},h=qa` + } + + params.format = 'arrow' + + // @ts-expect-error the return type will be ArrayBuffer when the format is set to 'arrow' + const response: ArrayBuffer = await client.esql.query(params, reqOptions) + return tableFromIPC(response) + }, + + async toArrowReader (): Promise { + if (metaHeader !== null) { + reqOptions.headers = reqOptions.headers ?? {} + reqOptions.headers['x-elastic-client-meta'] = `${metaHeader as string},h=qa` + reqOptions.asStream = true + } + + params.format = 'arrow' + + // @ts-expect-error response is a Readable when asStream is true + const response: Readable = await client.esql.query(params, reqOptions) + return await AsyncRecordBatchStreamReader.from(Readable.from(response)) + } + } + + return helper + } +} + +// Using a getter will improve the overall performances of the code, +// as we will reed the documents only if needed. +function addDocumentsGetter (result: TransportResult, unknown>): void { + Object.defineProperty(result, 'documents', { + get () { + if (this.body.hits?.hits != null) { + // @ts-expect-error + return this.body.hits.hits.map(d => d._source) + } + return [] + } + }) +} + +function appendFilterPath (filter: string, params: Record, force: boolean): void { + if (params.filter_path !== undefined) { + params.filter_path += ',' + filter // eslint-disable-line + } else if (force) { + params.filter_path = filter + } +} + +function isReadableStream (obj: any): obj is Readable { + return obj != null && typeof obj.pipe === 'function' +} + +function isAsyncIterator (obj: any): obj is AsyncIterator { + return obj?.[Symbol.asyncIterator] != null +} diff --git a/src/sniffingTransport.ts b/src/sniffingTransport.ts new file mode 100644 index 000000000..389c54c3d --- /dev/null +++ b/src/sniffingTransport.ts @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +import assert from 'node:assert' +import { Transport, SniffOptions } from '@elastic/transport' + +export default class SniffingTransport extends Transport { + sniff (opts: SniffOptions): void { + if (this.isSniffing) return + this.isSniffing = true + + const request = { + method: 'GET', + path: this.sniffEndpoint ?? '/_nodes/_all/http' + } + + this.request(request, { id: opts.requestId, meta: true }) + .then(result => { + assert(isObject(result.body), 'The body should be an object') + this.isSniffing = false + const protocol = result.meta.connection?.url.protocol ?? /* istanbul ignore next */ 'http:' + const hosts = this.connectionPool.nodesToHost(result.body.nodes, protocol) + this.connectionPool.update(hosts) + + result.meta.sniff = { hosts, reason: opts.reason } + this.diagnostic.emit('sniff', null, result) + }) + .catch(err => { + this.isSniffing = false + err.meta.sniff = { hosts: [], reason: opts.reason } + this.diagnostic.emit('sniff', err, null) + }) + } +} + +function isObject (obj: any): obj is Record { + return typeof obj === 'object' +} diff --git a/test/behavior/observability.test.js b/test/behavior/observability.test.js deleted file mode 100644 index 65c16ca6d..000000000 --- a/test/behavior/observability.test.js +++ /dev/null @@ -1,334 +0,0 @@ -'use strict' - -const { test } = require('tap') -const lolex = require('lolex') -const { Client, Transport } = require('../../index') -const { - connection: { MockConnection, MockConnectionSniff } -} = require('../utils') -const noop = () => {} - -test('Request id', t => { - t.test('Default generateRequestId', t => { - const { generateRequestId } = Transport.internals - t.type(generateRequestId, 'function') - - const genReqId = generateRequestId() - t.type(genReqId, 'function') - - for (var i = 1; i <= 10; i++) { - t.strictEqual(genReqId(), i) - } - - t.end() - }) - - t.test('Custom generateRequestId', t => { - t.plan(7) - - const options = { context: { winter: 'is coming' } } - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection, - generateRequestId: function (requestParams, requestOptions) { - t.match(requestParams, { method: 'GET', path: '/' }) - t.match(requestOptions, options) - return 'custom-id' - } - }) - - client.on('request', (err, { meta }) => { - t.error(err) - t.strictEqual(meta.request.id, 'custom-id') - }) - - client.on('response', (err, { meta }) => { - t.error(err) - t.strictEqual(meta.request.id, 'custom-id') - }) - - client.info({}, options, t.error) - }) - - t.test('Custom request id in method options', t => { - t.plan(5) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - client.on('request', (err, { meta }) => { - t.error(err) - t.strictEqual(meta.request.id, 'custom-id') - }) - - client.on('response', (err, { meta }) => { - t.error(err) - t.strictEqual(meta.request.id, 'custom-id') - }) - - client.info({}, { id: 'custom-id' }, t.error) - }) - - t.test('Sniff and correlation id', t => { - t.test('sniffOnStart - should autogenerate the id', t => { - t.plan(2) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnectionSniff, - sniffOnStart: true - }) - - client.on('sniff', (err, { meta }) => { - t.error(err) - t.strictEqual(meta.request.id, 1) - }) - }) - - t.test('sniffOnConnectionFault - should use the request id', t => { - t.plan(5) - - const client = new Client({ - nodes: ['http://localhost:9200/', 'http://localhost:9201/'], - Connection: MockConnectionSniff, - sniffOnConnectionFault: true, - maxRetries: 0 - }) - - client.on('request', (e, { meta }) => { - t.strictEqual(meta.request.id, 'custom') - }) - - client.on('response', (e, { meta }) => { - t.strictEqual(meta.request.id, 'custom') - }) - - client.on('sniff', (e, { meta }) => { - t.strictEqual(meta.request.id, 'custom') - }) - - client.transport.request({ - path: '/500', - method: 'GET' - }, { - id: 'custom', - headers: { timeout: 'true' } - }, noop) - }) - - t.end() - }) - - t.test('Resurrect should use the same request id of the request that starts it', t => { - t.plan(2) - - const clock = lolex.install({ toFake: ['Date'] }) - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection, - sniffOnConnectionFault: true, - maxRetries: 0 - }) - - const conn = client.connectionPool.getConnection() - client.connectionPool.markDead(conn) - clock.tick(1000 * 61) - - client.on('resurrect', (err, meta) => { - t.error(err) - t.strictEqual(meta.request.id, 'custom') - clock.uninstall() - }) - - client.info({}, { id: 'custom' }, noop) - }) - - t.end() -}) - -test('Request context', t => { - t.test('no value', t => { - t.plan(5) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - client.on('request', (err, { meta }) => { - t.error(err) - t.strictEqual(meta.context, null) - }) - - client.on('response', (err, { meta }) => { - t.error(err) - t.strictEqual(meta.context, null) - }) - - client.info(t.error) - }) - - t.test('custom value', t => { - t.plan(5) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - client.on('request', (err, { meta }) => { - t.error(err) - t.deepEqual(meta.context, { winter: 'is coming' }) - }) - - client.on('response', (err, { meta }) => { - t.error(err) - t.deepEqual(meta.context, { winter: 'is coming' }) - }) - - client.info({}, { context: { winter: 'is coming' } }, t.error) - }) - - t.end() -}) - -test('Client name', t => { - t.test('Property of the client instance', t => { - const client = new Client({ - node: 'http://localhost:9200/', - name: 'cluster' - }) - t.strictEqual(client.name, 'cluster') - t.end() - }) - - t.test('Is present in the event metadata', t => { - t.plan(6) - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection, - name: 'cluster' - }) - - client.on('request', (err, { meta }) => { - t.error(err) - t.strictEqual(meta.name, 'cluster') - }) - - client.on('response', (err, { meta }) => { - t.error(err) - t.strictEqual(meta.name, 'cluster') - }) - - client.info((err, { meta }) => { - t.error(err) - t.strictEqual(meta.name, 'cluster') - }) - }) - - t.test('Sniff and client name', t => { - t.test('sniffOnStart', t => { - t.plan(2) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnectionSniff, - sniffOnStart: true - }) - - client.on('sniff', (err, { meta }) => { - t.error(err) - t.strictEqual(meta.name, 'elasticsearch-js') - }) - }) - - t.test('sniffOnConnectionFault', t => { - t.plan(5) - - const client = new Client({ - nodes: ['http://localhost:9200/', 'http://localhost:9201/'], - Connection: MockConnectionSniff, - sniffOnConnectionFault: true, - maxRetries: 0 - }) - - client.on('request', (e, { meta }) => { - t.strictEqual(meta.name, 'elasticsearch-js') - }) - - client.on('response', (e, { meta }) => { - t.strictEqual(meta.name, 'elasticsearch-js') - }) - - client.on('sniff', (e, { meta }) => { - t.strictEqual(meta.name, 'elasticsearch-js') - }) - - client.transport.request({ - path: '/500', - method: 'GET' - }, { - headers: { timeout: 'true' } - }, noop) - }) - - t.end() - }) - - t.test('Resurrect should have the client name configured', t => { - t.plan(2) - - const clock = lolex.install({ toFake: ['Date'] }) - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection, - sniffOnConnectionFault: true, - maxRetries: 0 - }) - - const conn = client.connectionPool.getConnection() - client.connectionPool.markDead(conn) - clock.tick(1000 * 61) - - client.on('resurrect', (err, meta) => { - t.error(err) - t.strictEqual(meta.name, 'elasticsearch-js') - clock.uninstall() - }) - - client.info({}, { id: 'custom' }, noop) - }) - - t.test('Resurrect should have the client name configured (child client)', t => { - t.plan(2) - - const clock = lolex.install({ toFake: ['Date'] }) - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection, - sniffOnConnectionFault: true, - maxRetries: 0 - }) - - const child = client.child({ - name: 'child-client' - }) - - const conn = client.connectionPool.getConnection() - client.connectionPool.markDead(conn) - clock.tick(1000 * 61) - - client.on('resurrect', (err, meta) => { - t.error(err) - t.strictEqual(meta.name, 'child-client') - clock.uninstall() - }) - - child.info({}, { id: 'custom' }, noop) - }) - - t.end() -}) diff --git a/test/behavior/resurrect.test.js b/test/behavior/resurrect.test.js deleted file mode 100644 index 28ba78e87..000000000 --- a/test/behavior/resurrect.test.js +++ /dev/null @@ -1,198 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -const { test } = require('tap') -const { URL } = require('url') -const lolex = require('lolex') -const workq = require('workq') -const { buildCluster } = require('../utils') -const { Client, events } = require('../../index') - -/** - * The aim of this test is to verify how the resurrect logic behaves - * in a multi node situation. - * The `buildCluster` utility can boot an arbitrary number - * of nodes, that you can kill or spawn at your will. - * The resurrect API can be tested with its callback - * or by using the `resurrect` event (to handle automatically - * triggered resurrections). - */ - -test('Should execute the recurrect API with the ping strategy', t => { - t.plan(8) - - const clock = lolex.install({ toFake: ['Date'] }) - const q = workq() - - buildCluster({ numberOfNodes: 2 }, cluster => { - const client = new Client({ - nodes: [{ - url: new URL(cluster.nodes[Object.keys(cluster.nodes)[0]].url), - id: 'node0' - }, { - url: new URL(cluster.nodes[Object.keys(cluster.nodes)[1]].url), - id: 'node1' - }], - maxRetries: 0 - }) - - client.on(events.RESURRECT, (err, meta) => { - t.error(err) - t.strictEqual(meta.strategy, 'ping') - t.false(meta.isAlive) - t.strictEqual(meta.connection.id, 'node0') - t.strictEqual(meta.name, 'elasticsearch-js') - t.deepEqual(meta.request, { id: 2 }) - }) - - q.add((q, done) => { - cluster.kill('node0', done) - }) - - q.add((q, done) => { - client.info((err, result) => { - t.ok(err) - done() - }) - }) - - q.add((q, done) => { - clock.tick(1000 * 61) - client.info((err, result) => { - t.error(err) - done() - }) - }) - - t.teardown(() => { - clock.uninstall() - cluster.shutdown() - }) - }) -}) - -test('Resurrect a node and handle 502/3/4 status code', t => { - t.plan(15) - - const clock = lolex.install({ toFake: ['Date'] }) - const q = workq() - - var count = 0 - function handler (req, res) { - res.statusCode = count++ < 2 ? 502 : 200 - res.setHeader('content-type', 'application/json') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildCluster({ handler, numberOfNodes: 2 }, ({ nodes, shutdown }) => { - const client = new Client({ - nodes: [{ - url: new URL(nodes[Object.keys(nodes)[0]].url), - id: 'node0' - }, { - url: new URL(nodes[Object.keys(nodes)[1]].url), - id: 'node1' - }], - maxRetries: 0 - }) - - var idCount = 2 - client.on(events.RESURRECT, (err, meta) => { - t.error(err) - t.strictEqual(meta.strategy, 'ping') - t.strictEqual(meta.connection.id, 'node0') - t.strictEqual(meta.name, 'elasticsearch-js') - t.deepEqual(meta.request, { id: idCount++ }) - if (count < 4) { - t.false(meta.isAlive) - } else { - t.true(meta.isAlive) - } - }) - - q.add((q, done) => { - client.info((err, result) => { - t.ok(err) - done() - }) - }) - - q.add((q, done) => { - clock.tick(1000 * 61) - client.info((err, result) => { - t.error(err) - done() - }) - }) - - q.add((q, done) => { - clock.tick(1000 * 10 * 60) - client.info((err, result) => { - t.error(err) - done() - }) - }) - - t.teardown(() => { - clock.uninstall() - shutdown() - }) - }) -}) - -test('Should execute the recurrect API with the optimistic strategy', t => { - t.plan(8) - - const clock = lolex.install({ toFake: ['Date'] }) - const q = workq() - - buildCluster({ numberOfNodes: 2 }, cluster => { - const client = new Client({ - nodes: [{ - url: new URL(cluster.nodes[Object.keys(cluster.nodes)[0]].url), - id: 'node0' - }, { - url: new URL(cluster.nodes[Object.keys(cluster.nodes)[1]].url), - id: 'node1' - }], - maxRetries: 0, - resurrectStrategy: 'optimistic' - }) - - client.on(events.RESURRECT, (err, meta) => { - t.error(err) - t.strictEqual(meta.strategy, 'optimistic') - t.true(meta.isAlive) - t.strictEqual(meta.connection.id, 'node0') - t.strictEqual(meta.name, 'elasticsearch-js') - t.deepEqual(meta.request, { id: 2 }) - }) - - q.add((q, done) => { - cluster.kill('node0', done) - }) - - q.add((q, done) => { - client.info((err, result) => { - t.ok(err) - done() - }) - }) - - q.add((q, done) => { - clock.tick(1000 * 61) - client.info((err, result) => { - t.error(err) - done() - }) - }) - - t.teardown(() => { - clock.uninstall() - cluster.shutdown() - }) - }) -}) diff --git a/test/behavior/sniff.test.js b/test/behavior/sniff.test.js deleted file mode 100644 index 5bbb2a438..000000000 --- a/test/behavior/sniff.test.js +++ /dev/null @@ -1,281 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -const { test } = require('tap') -const { URL } = require('url') -const lolex = require('lolex') -const workq = require('workq') -const { buildCluster } = require('../utils') -const { Client, Connection, Transport, events, errors } = require('../../index') - -/** - * The aim of this test is to verify how the sniffer behaves - * in a multi node situation. - * The `buildCluster` utility can boot an arbitrary number - * of nodes, that you can kill or spawn at your will. - * The sniffer component can be tested with its callback - * or by using the `sniff` event (to handle automatically - * triggered sniff). - */ - -test('Should update the connection pool', t => { - t.plan(10) - - buildCluster(({ nodes, shutdown }) => { - const client = new Client({ - node: nodes[Object.keys(nodes)[0]].url - }) - t.strictEqual(client.connectionPool.size, 1) - - client.on(events.SNIFF, (err, request) => { - t.error(err) - t.strictEqual( - request.meta.sniff.reason, - Transport.sniffReasons.DEFAULT - ) - }) - - // run the sniffer - client.transport.sniff((err, hosts) => { - t.error(err) - t.strictEqual(hosts.length, 4) - - const ids = Object.keys(nodes) - for (var i = 0; i < hosts.length; i++) { - const id = ids[i] - // the first node will be an update of the existing one - if (id === 'node0') { - t.deepEqual(hosts[i], { - url: new URL(nodes[id].url), - id: id, - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }) - } else { - t.deepEqual(hosts[i], { - url: new URL(nodes[id].url), - id: id, - roles: { - master: true, - data: true, - ingest: true, - ml: false - }, - ssl: null, - agent: null - }) - } - } - - t.strictEqual(client.connectionPool.size, 4) - }) - t.teardown(shutdown) - }) -}) - -test('Should handle hostnames in publish_address', t => { - t.plan(10) - - buildCluster({ hostPublishAddress: true }, ({ nodes, shutdown }) => { - const client = new Client({ - node: nodes[Object.keys(nodes)[0]].url - }) - t.strictEqual(client.connectionPool.size, 1) - - client.on(events.SNIFF, (err, request) => { - t.error(err) - t.strictEqual( - request.meta.sniff.reason, - Transport.sniffReasons.DEFAULT - ) - }) - - // run the sniffer - client.transport.sniff((err, hosts) => { - t.error(err) - t.strictEqual(hosts.length, 4) - - for (var i = 0; i < hosts.length; i++) { - // the first node will be an update of the existing one - t.strictEqual(hosts[i].url.hostname, 'localhost') - } - - t.strictEqual(client.connectionPool.size, 4) - }) - t.teardown(shutdown) - }) -}) - -test('Sniff interval', t => { - t.plan(11) - const clock = lolex.install({ toFake: ['Date'] }) - const q = workq() - - buildCluster(({ nodes, shutdown, kill }) => { - const client = new Client({ - node: nodes[Object.keys(nodes)[0]].url, - sniffInterval: 50 - }) - - // this event will be triggered by api calls - client.on(events.SNIFF, (err, request) => { - t.error(err) - const { hosts, reason } = request.meta.sniff - t.strictEqual( - client.connectionPool.size, - hosts.length - ) - t.strictEqual(reason, Transport.sniffReasons.SNIFF_INTERVAL) - }) - - t.strictEqual(client.connectionPool.size, 1) - - q.add((q, done) => { - clock.tick(51) - client.info(err => { - t.error(err) - waitSniffEnd(() => { - t.strictEqual(client.connectionPool.size, 4) - done() - }) - }) - }) - - q.add((q, done) => { - kill('node1', done) - }) - - q.add((q, done) => { - clock.tick(51) - client.info(err => { - t.error(err) - waitSniffEnd(() => { - t.strictEqual(client.connectionPool.size, 3) - done() - }) - }) - }) - - t.teardown(shutdown) - - // it can happen that the sniff operation resolves - // after the API call that trioggered it, so to - // be sure that we are checking the connectionPool size - // at the right moment, we verify that the transport - // is no longer sniffing - function waitSniffEnd (callback) { - if (client.transport._isSniffing) { - setTimeout(waitSniffEnd, 500, callback) - } else { - callback() - } - } - }) -}) - -test('Sniff on start', t => { - t.plan(4) - - buildCluster(({ nodes, shutdown, kill }) => { - const client = new Client({ - node: nodes[Object.keys(nodes)[0]].url, - sniffOnStart: true - }) - - client.on(events.SNIFF, (err, request) => { - t.error(err) - const { hosts, reason } = request.meta.sniff - t.strictEqual( - client.connectionPool.size, - hosts.length - ) - t.strictEqual(reason, Transport.sniffReasons.SNIFF_ON_START) - }) - - t.strictEqual(client.connectionPool.size, 1) - t.teardown(shutdown) - }) -}) - -test('Should not close living connections', t => { - t.plan(3) - - buildCluster(({ nodes, shutdown, kill }) => { - class MyConnection extends Connection { - close () { - t.fail('Should not be called') - } - } - - const client = new Client({ - node: { - url: new URL(nodes[Object.keys(nodes)[0]].url), - id: 'node1' - }, - Connection: MyConnection - }) - - t.strictEqual(client.connectionPool.size, 1) - client.transport.sniff((err, hosts) => { - t.error(err) - t.strictEqual( - client.connectionPool.size, - hosts.length - ) - }) - - t.teardown(shutdown) - }) -}) - -test('Sniff on connection fault', t => { - t.plan(5) - - buildCluster(({ nodes, shutdown, kill }) => { - class MyConnection extends Connection { - request (params, callback) { - if (this.id === 'http://localhost:9200/') { - callback(new errors.ConnectionError('kaboom'), null) - return {} - } else { - return super.request(params, callback) - } - } - } - - const client = new Client({ - nodes: [ - 'http://localhost:9200/', - nodes[Object.keys(nodes)[0]].url - ], - maxRetries: 0, - sniffOnConnectionFault: true, - Connection: MyConnection - }) - - t.strictEqual(client.connectionPool.size, 2) - // this event will be triggered by the connection fault - client.on(events.SNIFF, (err, request) => { - t.error(err) - const { hosts, reason } = request.meta.sniff - t.strictEqual( - client.connectionPool.size, - hosts.length - ) - t.strictEqual(reason, Transport.sniffReasons.SNIFF_ON_CONNECTION_FAULT) - }) - - client.info((err, result) => { - t.ok(err instanceof errors.ConnectionError) - }) - - t.teardown(shutdown) - }) -}) diff --git a/test/benchmarks/macro/complex.bench.js b/test/benchmarks/macro/complex.bench.js deleted file mode 100644 index 00cb6c47f..000000000 --- a/test/benchmarks/macro/complex.bench.js +++ /dev/null @@ -1,101 +0,0 @@ -'use strict' - -// This file must be run with --max-old-space-size=8192 -// because we need more than 1Gb of memory -// eg: node --max-old-space-size=8192 complex.bench.js - -const { Client } = require('../../../index') -const { statSync, createReadStream } = require('fs') -const { join } = require('path') -const split = require('split2') -const { bench, beforeEach, afterEach } = require('../suite')({ - report: { - url: process.env.ES_RESULT_CLUSTER_URL, - username: process.env.ES_RESULT_CLUSTER_USERNAME, - password: process.env.ES_RESULT_CLUSTER_PASSWORD - } -}) - -var stackoverflow = [] -const stackoverflowPath = join( - __dirname, - 'fixtures', - 'stackoverflow.json' -) -const stackoverflowInfo = { - name: 'stackoverflow.json', - size: statSync(join(stackoverflowPath)).size, - num_documents: 2000000 -} - -const INDEX = 'stackoverflow' -const node = process.env.ELASTICSEARCH_URL || 'http://localhost:9200/' - -const client = new Client({ node }) - -beforeEach(async b => { - if (stackoverflow.length === 0) { - stackoverflow = await readSOfile() - } - b.client = client - await b.client.indices.delete({ index: 'test-*' }) -}) - -afterEach(async b => { - await b.client.indices.delete({ index: 'test-*' }) -}) - -bench('Bulk index documents', { - warmup: 1, - measure: 1, - iterations: 1, - dataset: stackoverflowInfo, - action: 'bulk' -}, async b => { - b.start() - for (var i = 0; i < stackoverflow.length; i++) { - await b.client.bulk({ body: stackoverflow[i] }) - } - b.end() -}) - -bench('Complex search request', { - warmup: 3, - measure: 5, - iterations: 100, - dataset: stackoverflowInfo, - action: 'search' -}, async b => { - b.start() - for (var i = 0; i < b.iterations; i++) { - await b.client.search({ - index: INDEX, - body: { - query: { - match: { title: 'safe' } - } - } - }) - } - b.end() -}) - -function readSOfile () { - var i = 0 - var stackoverflow = [] - return new Promise((resolve, reject) => { - createReadStream(stackoverflowPath) - .pipe(split(JSON.parse)) - .on('data', chunk => { - stackoverflow[i] = stackoverflow[i] || [] - stackoverflow[i].push({ index: { _index: INDEX } }) - stackoverflow[i].push(chunk) - // 10k documents - if (stackoverflow[i].length >= 10000 * 2) { - i++ - } - }) - .on('error', reject) - .on('end', () => resolve(stackoverflow)) - }) -} diff --git a/test/benchmarks/macro/simple.bench.js b/test/benchmarks/macro/simple.bench.js deleted file mode 100644 index f734d1ff2..000000000 --- a/test/benchmarks/macro/simple.bench.js +++ /dev/null @@ -1,269 +0,0 @@ -'use strict' - -const { Client } = require('../../../index') -const { statSync } = require('fs') -const { join } = require('path') -const { bench, beforeEach, afterEach } = require('../suite')({ - report: { - url: process.env.ES_RESULT_CLUSTER_URL, - username: process.env.ES_RESULT_CLUSTER_USERNAME, - password: process.env.ES_RESULT_CLUSTER_PASSWORD - } -}) - -const node = process.env.ELASTICSEARCH_URL || 'http://localhost:9200/' - -const smallDocument = require('./fixtures/small_document.json') -const smallDocumentInfo = { - name: 'small_document.json', - size: statSync(join(__dirname, 'fixtures', 'small_document.json')).size, - num_documents: 1 -} -const largeDocument = require('./fixtures/large_document.json') -const largeDocumentInfo = { - name: 'large_document.json', - size: statSync(join(__dirname, 'fixtures', 'large_document.json')).size, - num_documents: 1 -} - -const client = new Client({ node }) - -beforeEach(async b => { - b.client = client - await b.client.indices.delete({ index: 'test-*' }) -}) - -afterEach(async b => { - await b.client.indices.delete({ index: 'test-*' }) -}) - -bench('Ping', { - warmup: 3, - measure: 5, - iterations: 100, - action: 'ping' -}, async b => { - b.start() - for (var i = 0; i < b.iterations; i++) { - await b.client.ping() - } - b.end() -}) - -bench('Create index', { - warmup: 3, - measure: 5, - iterations: 10, - action: 'indices.create' -}, async b => { - b.start() - for (var i = 0; i < b.iterations; i++) { - await b.client.indices.create({ index: `test-create-${i}` }) - } - b.end() -}) - -bench('Index small document', { - warmup: 3, - measure: 5, - iterations: 100, - dataset: smallDocumentInfo, - action: 'create' -}, async b => { - const now = Date.now() + '' - const index = `test-${now}` - await b.client.indices.create({ index }) - - b.start() - for (var i = 0; i < b.iterations; i++) { - await b.client.create({ - index, - type: '_doc', - id: i + now, - body: smallDocument - }) - } - b.end() -}) - -bench('Index large document', { - warmup: 3, - measure: 5, - iterations: 100, - dataset: largeDocumentInfo, - action: 'create' -}, async b => { - const now = Date.now() + '' - const index = `test-${now}` - await b.client.indices.create({ index }) - - b.start() - for (var i = 0; i < b.iterations; i++) { - await b.client.create({ - index, - type: '_doc', - id: i + now, - body: largeDocument - }) - } - b.end() -}) - -bench('Get small document', { - warmup: 3, - measure: 5, - iterations: 1000, - dataset: smallDocumentInfo, - action: 'get' -}, async b => { - const now = Date.now() + '' - const index = `test-${now}` - await b.client.indices.create({ index }) - - await b.client.create({ - index, - type: '_doc', - id: now, - body: smallDocument - }) - - b.start() - for (var i = 0; i < b.iterations; i++) { - await b.client.get({ - index, - type: '_doc', - id: now - }) - } - b.end() -}) - -bench('Get large document', { - warmup: 3, - measure: 5, - iterations: 1000, - dataset: largeDocumentInfo, - action: 'get' -}, async b => { - const now = Date.now() + '' - const index = `test-${now}` - await b.client.indices.create({ index }) - - await b.client.create({ - index, - type: '_doc', - id: now, - body: largeDocument - }) - - b.start() - for (var i = 0; i < b.iterations; i++) { - await b.client.get({ - index, - type: '_doc', - id: now - }) - } - b.end() -}) - -bench('Search small document', { - warmup: 3, - measure: 5, - iterations: 1000, - dataset: smallDocumentInfo, - action: 'search' -}, async b => { - const now = Date.now() + '' - const index = `test-${now}` - await b.client.indices.create({ index }) - - await b.client.create({ - index, - type: '_doc', - id: now, - refresh: true, - body: smallDocument - }) - - b.start() - for (var i = 0; i < b.iterations; i++) { - await b.client.search({ - index, - type: '_doc', - body: { - query: { - match: { cuisine: 'mexican' } - } - } - }) - } - b.end() -}) - -bench('Search large document', { - warmup: 3, - measure: 5, - iterations: 1000, - dataset: largeDocumentInfo, - action: 'search' -}, async b => { - const now = Date.now() + '' - const index = `test-${now}` - await b.client.indices.create({ index }) - - await b.client.create({ - index, - type: '_doc', - id: now, - refresh: true, - body: largeDocument - }) - - b.start() - for (var i = 0; i < b.iterations; i++) { - await b.client.search({ - index, - type: '_doc', - body: { - query: { - match: { 'user.lang': 'en' } - } - } - }) - } - b.end() -}) - -bench('Update small document', { - warmup: 3, - measure: 5, - iterations: 100, - dataset: smallDocumentInfo, - action: 'update' -}, async b => { - const now = Date.now() + '' - const index = `test-${now}` - await b.client.indices.create({ index }) - - await b.client.create({ - index, - type: '_doc', - id: now, - refresh: true, - body: smallDocument - }) - - b.start() - for (var i = 0; i < b.iterations; i++) { - await b.client.update({ - index, - type: '_doc', - id: now, - body: { - doc: { cuisine: 'italian' + i } - } - }) - } - b.end() -}) diff --git a/test/benchmarks/micro/basic.bench.js b/test/benchmarks/micro/basic.bench.js deleted file mode 100644 index d829945ca..000000000 --- a/test/benchmarks/micro/basic.bench.js +++ /dev/null @@ -1,98 +0,0 @@ -'use strict' - -const { bench } = require('../suite')({ - report: { - url: process.env.ES_RESULT_CLUSTER_URL, - username: process.env.ES_RESULT_CLUSTER_USERNAME, - password: process.env.ES_RESULT_CLUSTER_PASSWORD - } -}) -const { Client } = require('../../../index') -const { connection } = require('../../utils') - -bench('Initialization', { warmup: 5, measure: 10, iterations: 1000 }, async b => { - b.start() - for (var i = 0; i < b.iterations; i++) { - const client = new Client({ // eslint-disable-line - node: 'http://localhost:9200/' - }) - } - b.end() -}) - -bench('Call api with lazy loading', { warmup: 5, measure: 10 }, async b => { - const client = new Client({ - node: 'http://localhost:9200/', - Connection: connection.MockConnection - }) - - b.start() - await client.info() - b.end() -}) - -bench('Call api without lazy loading', { warmup: 5, measure: 10 }, async b => { - const client = new Client({ - node: 'http://localhost:9200/', - Connection: connection.MockConnection - }) - - await client.info() - b.start() - await client.info() - b.end() -}) - -bench('Basic get', { warmup: 5, measure: 10, iterations: 1000 }, async b => { - const client = new Client({ - node: 'http://localhost:9200/', - Connection: connection.MockConnection - }) - - // we run the method twice to skip the lazy loading overhead - await client.search({ - index: 'test', - type: 'doc', - q: 'foo:bar' - }) - b.start() - for (var i = 0; i < b.iterations; i++) { - await client.search({ - index: 'test', - type: 'doc', - q: 'foo:bar' - }) - } - b.end() -}) - -bench('Basic post', { warmup: 5, measure: 10, iterations: 1000 }, async b => { - const client = new Client({ - node: 'http://localhost:9200/', - Connection: connection.MockConnection - }) - - // we run the method twice to skip the lazy loading overhead - await client.search({ - index: 'test', - type: 'doc', - body: { - query: { - match: { foo: 'bar' } - } - } - }) - b.start() - for (var i = 0; i < b.iterations; i++) { - await client.search({ - index: 'test', - type: 'doc', - body: { - query: { - match: { foo: 'bar' } - } - } - }) - } - b.end() -}) diff --git a/test/benchmarks/suite.js b/test/benchmarks/suite.js deleted file mode 100644 index 251e58749..000000000 --- a/test/benchmarks/suite.js +++ /dev/null @@ -1,272 +0,0 @@ -'use strict' - -const { Client } = require('../../index') -const clientVersion = require('../../package.json').version -const { EventEmitter } = require('events') -const os = require('os') -const dezalgo = require('dezalgo') -const convertHrtime = require('convert-hrtime') -const Git = require('simple-git/promise') -const workq = require('workq') -const dedent = require('dedent') -const ss = require('simple-statistics') - -function buildBenchmark (options = {}) { - const q = workq() - const stats = {} - const reports = [] - var beforeEach = null - var afterEach = null - var setup = null - var teardown = null - - function setBeforeEach (fn) { - beforeEach = fn - } - - function setAfterEach (fn) { - afterEach = fn - } - - function setSetup (fn) { - setup = fn - } - - function setTeardown (fn) { - teardown = fn - } - - function runSetup (q, done) { - if (setup !== null) { - setup(() => { - setup = null - done() - }) - } else { - done() - } - } - - function benchmark (title, opts, fn) { - if (fn == null) { - fn = opts - opts = {} - } - - stats[title] = [] - var { measure, warmup } = opts - const b = new B({ iterations: opts.iterations }) - - q.add(runSetup) - q.add(runBenchmark) - q.add(elaborateStats) - - // Task that runs the benchmark and collects the stats - function runBenchmark (q, done) { - b.comment(`\n# ${title}`) - b.once('fail', err => { - b.comment(err) - if (b.client) { - b.client.close(done) - } else { - done() - } - }) - - process.nextTick(run) - async function run () { - if (beforeEach) { - try { - await beforeEach(b) - } catch (err) { - b.comment('Error: beforeEach hook has failed') - return b.fail(err) - } - } - - try { - await fn(b) - } catch (err) { - return b.fail(err) - } - - if (afterEach) { - try { - await afterEach(b) - } catch (err) { - b.comment('Error: afterEach hook has failed') - return b.fail(err) - } - } - - // still need to warmup - if (warmup-- > 0) { - process.nextTick(run) - // save the actual measure - } else if (measure-- > 0) { - stats[title].push(convertHrtime(b.time)) - process.nextTick(run) - // calculate the statistics - } else { - done() - } - } - } - - // task that elaborate the collected stats - async function elaborateStats (q) { - const times = stats[title].map(s => s.milliseconds / b.iterations) - reports.push({ - description: title, - action: opts.action, - category: opts.category || 'simple', - dataset: opts.dataset || null, - stats: { - mean: ss.mean(times), - median: ss.median(times), - min: ss.min(times), - max: ss.max(times), - standard_deviation: ss.standardDeviation(times) - }, - repetitions: { - measured: opts.measure, - warmup: opts.warmup, - iterations: opts.iterations - } - }) - - if (b.client) { - const { body } = await b.client.nodes.stats({ metric: 'http,jvm,os' }) - const esStats = body.nodes[Object.keys(body.nodes)[0]] - b.comment(dedent` - mean: ${ss.mean(times)} ms - median: ${ss.median(times)} ms - min: ${ss.min(times)} ms - max: ${ss.max(times)} ms - standard deviation: ${ss.standardDeviation(times)} - http total connections: ${esStats.http.total_opened} - jvm heap used: ${esStats.jvm.mem.heap_used_percent}% - `) - } else { - b.comment(dedent` - mean: ${ss.mean(times)} ms - median: ${ss.median(times)} ms - min: ${ss.min(times)} ms - max: ${ss.max(times)} ms - standard deviation: ${ss.standardDeviation(times)} - `) - } - } - } - - q.drain(done => { - if (teardown) { - teardown(done) - } else { - done() - } - if (options.report && options.report.url) { - sendReport() - } - }) - - async function sendReport () { - const client = new Client({ - node: { - url: new URL(options.report.url), - username: options.report.username, - password: options.report.password - } - }) - const git = Git(__dirname) - const commit = await git.log(['-1']) - const branch = await git.revparse(['--abbrev-ref', 'HEAD']) - const { body: esInfo } = await client.info() - const { body: esNodes } = await client.nodes.stats({ metric: 'os' }) - - const results = reports.map(report => { - return { - '@timestamp': new Date(), - event: { - description: report.description, - category: report.category, - action: report.action, - duration: 0, - statistics: report.stats, - repetitions: report.repetitions, - dataset: (report.dataset && report.dataset.name) || null, - dataset_details: { - size: (report.dataset && report.dataset.size) || 0, - num_documents: (report.dataset && report.dataset.num_documents) || 0 - } - }, - agent: { - version: clientVersion, - name: '@elastic/elasticsearch-js', - git: { - branch: branch.slice(0, -1), - sha: commit.latest.hash, - commit_message: commit.latest.message, - repository: 'elasticsearch-js' - }, - language: { - version: process.version - }, - os: { - platform: `${os.platform()} ${os.release()}`, - type: os.type(), - architecture: os.arch() - } - }, - server: { - version: esInfo.version.number, - nodes_info: esNodes - } - } - }) - - for (var i = 0; i < results.length; i++) { - await client.index({ - index: 'benchmarking_results', - type: '_doc', - body: results[i] - }) - } - } - - return { - bench: dezalgo(benchmark), - beforeEach: setBeforeEach, - afterEach: setAfterEach, - setup: setSetup, - teardown: setTeardown - } -} - -class B extends EventEmitter { - constructor (opts) { - super() - this.begin = 0 - this.time = 0 - this.iterations = opts.iterations || 1 - this.client = null - } - - start () { - this.begin = process.hrtime() - } - - end () { - this.time = process.hrtime(this.begin) - } - - fail (err) { - this.emit('fail', err) - } - - comment (...args) { - console.log(...args) - } -} - -module.exports = buildBenchmark diff --git a/test/esm/package.json b/test/esm/package.json new file mode 100644 index 000000000..5209563e8 --- /dev/null +++ b/test/esm/package.json @@ -0,0 +1,7 @@ +{ + "name": "esm", + "version": "1.0.0", + "dependencies": { + "@elastic/elasticsearch": "file:../.." + } +} diff --git a/test/esm/test-import.mjs b/test/esm/test-import.mjs new file mode 100644 index 000000000..693ac3e18 --- /dev/null +++ b/test/esm/test-import.mjs @@ -0,0 +1,14 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +import { Client } from '@elastic/elasticsearch' + +new Client({ + node: 'http://localhost:9200/', + auth: { + username: 'elastic', + password: 'changeme', + } +}) diff --git a/test/integration/README.md b/test/integration/README.md index 0861dd8b9..3b0218f7c 100644 --- a/test/integration/README.md +++ b/test/integration/README.md @@ -5,8 +5,8 @@ Yes. ## Background -Elasticsearch offers its entire API via HTTP REST endpoints. You can find the whole API specification for every version [here](https://github.com/elastic/elasticsearch/tree/master/rest-api-spec/src/main/resources/rest-api-spec/api).
-To support different languages at the same time, the Elasticsearch team decided to provide a [YAML specification](https://github.com/elastic/elasticsearch/tree/master/rest-api-spec/src/main/resources/rest-api-spec/test) to test every endpoint, body, headers, warning, error and so on.
+Elasticsearch offers its entire API via HTTP REST endpoints. You can find the whole API specification for every version [here](https://github.com/elastic/elasticsearch/tree/main/rest-api-spec/src/main/resources/rest-api-spec/api).
+To support different languages at the same time, the Elasticsearch team decided to provide a [YAML specification](https://github.com/elastic/elasticsearch/tree/main/rest-api-spec/src/main/resources/rest-api-spec/test) to test every endpoint, body, headers, warning, error and so on.
This testing suite uses that specification to generate the test for the specified version of Elasticsearch on the fly. ## Run @@ -20,20 +20,45 @@ Once the Elasticsearch repository has been cloned, the testing suite will connec The specification does not allow the test to be run in parallel, so it might take a while to run the entire testing suite; on my machine, `MacBookPro15,2 core i7 2.7GHz 16GB of RAM` it takes around four minutes. +### Running locally + +If you want to run the integration tests on your development machine, you must have an Elasticsearch instance running first. +A local instance can be spun up in a Docker container by running the [`.buildkite/run-elasticsearch.sh`](/.buildkite/run-elasticsearch.sh) script. +This is the same script CI jobs use to run Elasticsearch for integration tests, so your results should be relatively consistent. + +To simplify the process of starting a container, testing, and cleaning up the container, you can run the `make integration` target: + +```sh +# set some parameters +export STACK_VERSION=8.7.0 +export TEST_SUITE=free # can be `free` or `platinum` +make integration +``` + +If Elasticsearch doesn't come up, run `make integration-cleanup` and then `DETACH=false .buildkite/run-elasticsearch.sh` manually to read the startup logs. + +If you get an error about `vm.max_map_count` being too low, run `sudo sysctl -w vm.max_map_count=262144` to update the setting until the next reboot, or `sudo sysctl -w vm.max_map_count=262144; echo 'vm.max_map_count=262144' | sudo tee -a /etc/sysctl.conf` to update the setting permanently. + ### Exit on the first failure -Bu default the suite will run all the test, even if one assertion has failed. If you want to stop the test at the first failure, use the bailout option: + +By default the suite will run all the tests, even if one assertion has failed. If you want to stop the test at the first failure, use the bailout option: + ```sh npm run test:integration -- --bail ``` ### Calculate the code coverage + If you want to calculate the code coverage just run the testing suite with the following parameters, once the test ends, it will open a browser window with the results. + ```sh npm run test:integration -- --cov --coverage-report=html ``` ## How does this thing work? + At first sight, it might seem complicated, but once you understand what the moving parts are, it's quite easy. + 1. Connects to the given Elasticsearch instance 1. Gets the ES version and build hash 1. Checkout to the given hash (and clone the repository if it is not present) @@ -46,7 +71,4 @@ At first sight, it might seem complicated, but once you understand what the movi Inside the `index.js` file, you will find the connection, cloning, reading and parsing part of the test, while inside the `test-runner.js` file you will find the function to handle the assertions. Inside `test-runner.js`, we use a [queue](https://github.com/delvedor/workq) to be sure that everything is run in the correct order. -Checkout the [rest-api-spec readme](https://github.com/elastic/elasticsearch/blob/master/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc) if you want to know more about how the assertions work. - -#### Why are we running the test with the `--harmony` flag? -Because on Node v6 the regex lookbehinds are not supported. +Check out the [rest-api-spec readme](https://github.com/elastic/elasticsearch/blob/main/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc) if you want to know more about how the assertions work. diff --git a/test/integration/helper.js b/test/integration/helper.js index cc1a90d93..bfe2535fa 100644 --- a/test/integration/helper.js +++ b/test/integration/helper.js @@ -1,9 +1,13 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ 'use strict' +const assert = require('node:assert') +const fetch = require('node-fetch') + function runInParallel (client, operation, options, clientOptions) { if (options.length === 0) return Promise.resolve() const operations = options.map(opts => { @@ -34,4 +38,87 @@ function to (promise) { const sleep = ms => new Promise(resolve => setTimeout(resolve, ms)) -module.exports = { runInParallel, delve, to, sleep } +function isXPackTemplate (name) { + if (name.startsWith('.monitoring-')) { + return true + } + if (name.startsWith('.watch') || name.startsWith('.triggered_watches')) { + return true + } + if (name.startsWith('.data-frame-')) { + return true + } + if (name.startsWith('.ml-')) { + return true + } + if (name.startsWith('.transform-')) { + return true + } + if (name.startsWith('.deprecation-')) { + return true + } + switch (name) { + case '.watches': + case 'logstash-index-template': + case '.logstash-management': + case 'security_audit_log': + case '.slm-history': + case '.async-search': + case 'saml-service-provider': + case 'ilm-history': + case 'logs': + case 'logs-settings': + case 'logs-mappings': + case 'metrics': + case 'metrics-settings': + case 'metrics-mappings': + case 'synthetics': + case 'synthetics-settings': + case 'synthetics-mappings': + case '.snapshot-blob-cache': + case 'data-streams-mappings': + return true + } + return false +} + +async function getSpec () { + const response = await fetch('http://github.com/service/https://raw.githubusercontent.com/elastic/elasticsearch-specification/main/output/schema/schema.json') + return await response.json() +} + +let spec = null + +// some keys for the path used in the yaml test are not support in the client +// for example: snapshot.createRepository({ repository }) will not work. +// This code changes the params to the appropriate name, in the example above, +// "repository" will be renamed to "name" +async function updateParams (cmd) { + if (spec == null) { + spec = await getSpec() + } + const endpoint = spec.endpoints.find(endpoint => endpoint.name === cmd.api) + assert(endpoint != null) + if (endpoint.request == null) return cmd + + const type = spec.types.find(type => type.name.name === endpoint.request.name && type.name.namespace === endpoint.request.namespace) + assert(type != null) + + const pathParams = type.path.reduce((acc, val) => { + if (val.codegenName != null) { + acc[val.name] = val.codegenName + } + return acc + }, {}) + + for (const key in cmd.params) { + if (pathParams[key] != null) { + cmd.params[pathParams[key]] = cmd.params[key] + delete cmd.params[key] + } + } + + return cmd +} + +module.exports = { runInParallel, delve, to, sleep, isXPackTemplate, updateParams } diff --git a/test/integration/helpers/bulk.test.js b/test/integration/helpers/bulk.test.js index 4bef30ccd..bffad53b1 100644 --- a/test/integration/helpers/bulk.test.js +++ b/test/integration/helpers/bulk.test.js @@ -1,11 +1,12 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ 'use strict' -const { createReadStream } = require('fs') -const { join } = require('path') +const { createReadStream } = require('node:fs') +const { join } = require('node:path') const split = require('split2') const { test, beforeEach, afterEach } = require('tap') const { waitCluster } = require('../../utils') @@ -87,9 +88,9 @@ test('bulk index with custom id', async t => { id: '19273860' // id of document n° 4242 }) - t.strictEqual(body._index, INDEX) - t.strictEqual(body._id, '19273860') - t.strictEqual(body._source.id, '19273860') + t.equal(body._index, INDEX) + t.equal(body._id, '19273860') + t.equal(body._source.id, '19273860') }) test('abort the operation on document drop', async t => { @@ -98,9 +99,9 @@ test('abort the operation on document drop', async t => { datasource: stream.pipe(split(JSON.parse)), concurrency: 1, onDrop (doc) { - t.strictEqual(doc.status, 400) - t.strictEqual(doc.error.type, 'mapper_parsing_exception') - t.strictEqual(doc.document.id, '45924372') + t.equal(doc.status, 400) + t.equal(doc.error.type, 'mapper_parsing_exception') + t.equal(doc.document.id, '45924372') b.abort() }, onDocument (doc) { @@ -120,7 +121,7 @@ test('abort the operation on document drop', async t => { const result = await b t.type(result.time, 'number') t.type(result.bytes, 'number') - t.strictEqual(result.total - 1, result.successful) + t.equal(result.total - 1, result.successful) t.match(result, { retry: 0, failed: 1, diff --git a/test/integration/helpers/msearch.test.js b/test/integration/helpers/msearch.test.js index e95be5745..479ddfec7 100644 --- a/test/integration/helpers/msearch.test.js +++ b/test/integration/helpers/msearch.test.js @@ -1,11 +1,12 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ 'use strict' -const { createReadStream } = require('fs') -const { join } = require('path') +const { createReadStream } = require('node:fs') +const { join } = require('node:path') const split = require('split2') const { test, beforeEach, afterEach } = require('tap') const { waitCluster } = require('../../utils') @@ -40,67 +41,67 @@ afterEach(async () => { test('Basic', t => { t.plan(4) - const s = client.helpers.msearch({ operations: 1 }) + const m = client.helpers.msearch({ operations: 1 }) - s.search( + m.search( { index: INDEX }, { query: { match: { title: 'javascript' } } }, (err, result) => { t.error(err) - t.strictEqual(result.body.hits.total.value, 106) + t.equal(result.body.hits.total.value, 106) } ) - s.search( + m.search( { index: INDEX }, { query: { match: { title: 'ruby' } } }, (err, result) => { t.error(err) - t.strictEqual(result.body.hits.total.value, 29) + t.equal(result.body.hits.total.value, 29) } ) - t.teardown(() => s.stop()) + t.teardown(() => m.stop()) }) test('Bad request', t => { t.plan(3) - const s = client.helpers.msearch({ operations: 1 }) + const m = client.helpers.msearch({ operations: 1 }) - s.search( + m.search( { index: INDEX }, { query: { match: { title: 'javascript' } } }, (err, result) => { t.error(err) - t.strictEqual(result.body.hits.total.value, 106) + t.equal(result.body.hits.total.value, 106) } ) - s.search( + m.search( { index: INDEX }, { query: { foo: { title: 'ruby' } } }, (err, result) => { - t.true(err instanceof errors.ResponseError) + t.ok(err instanceof errors.ResponseError) } ) - t.teardown(() => s.stop()) + t.teardown(() => m.stop()) }) test('Send multiple request concurrently over the concurrency limit', t => { t.plan(20) - const s = client.helpers.msearch({ operations: 1 }) + const m = client.helpers.msearch({ operations: 1 }) for (let i = 0; i < 10; i++) { - s.search( + m.search( { index: INDEX }, { query: { match: { title: 'javascript' } } }, (err, result) => { t.error(err) - t.strictEqual(result.body.hits.total.value, 106) + t.equal(result.body.hits.total.value, 106) } ) } - t.teardown(() => s.stop()) + t.teardown(() => m.stop()) }) diff --git a/test/integration/helpers/scroll.test.js b/test/integration/helpers/scroll.test.js index c1a5e3a64..6d5148a9e 100644 --- a/test/integration/helpers/scroll.test.js +++ b/test/integration/helpers/scroll.test.js @@ -1,11 +1,12 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ 'use strict' -const { createReadStream } = require('fs') -const { join } = require('path') +const { createReadStream } = require('node:fs') +const { join } = require('node:path') const split = require('split2') const { test, beforeEach, afterEach } = require('tap') const { waitCluster } = require('../../utils') @@ -50,14 +51,14 @@ test('search helper', async t => { } }) - var count = 0 + let count = 0 for await (const search of scrollSearch) { count += 1 for (const doc of search.documents) { - t.true(doc.title.toLowerCase().includes('javascript')) + t.ok(doc.title.toLowerCase().includes('javascript')) } } - t.strictEqual(count, 11) + t.equal(count, 11) }) test('clear a scroll search', async t => { @@ -72,14 +73,14 @@ test('clear a scroll search', async t => { } }) - var count = 0 + let count = 0 for await (const search of scrollSearch) { count += 1 if (count === 2) { search.clear() } } - t.strictEqual(count, 2) + t.equal(count, 2) }) test('scroll documents', async t => { @@ -94,10 +95,10 @@ test('scroll documents', async t => { } }) - var count = 0 + let count = 0 for await (const doc of scrollSearch) { count += 1 - t.true(doc.title.toLowerCase().includes('javascript')) + t.ok(doc.title.toLowerCase().includes('javascript')) } - t.strictEqual(count, 106) + t.equal(count, 106) }) diff --git a/test/integration/helpers/search.test.js b/test/integration/helpers/search.test.js index bb845eaa9..2f0512177 100644 --- a/test/integration/helpers/search.test.js +++ b/test/integration/helpers/search.test.js @@ -1,11 +1,12 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ 'use strict' -const { createReadStream } = require('fs') -const { join } = require('path') +const { createReadStream } = require('node:fs') +const { join } = require('node:path') const split = require('split2') const { test, beforeEach, afterEach } = require('tap') const { waitCluster } = require('../../utils') @@ -49,8 +50,8 @@ test('search helper', async t => { } } }) - t.strictEqual(results.length, 10) + t.equal(results.length, 10) for (const result of results) { - t.true(result.title.toLowerCase().includes('javascript')) + t.ok(result.title.toLowerCase().includes('javascript')) } }) diff --git a/test/integration/index.js b/test/integration/index.js index 1edc2834c..a4d51ea4e 100644 --- a/test/integration/index.js +++ b/test/integration/index.js @@ -1,434 +1,72 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ 'use strict' -const { writeFileSync, readFileSync, accessSync, mkdirSync, readdirSync, statSync } = require('fs') -const { join, sep } = require('path') -const yaml = require('js-yaml') -const Git = require('simple-git') -const ms = require('ms') -const { Client } = require('../../index') -const build = require('./test-runner') -const { sleep } = require('./helper') -const createJunitReporter = require('./reporter') +process.on('unhandledRejection', function (err) { + console.error(err) + process.exit(1) +}) -const esRepo = 'https://github.com/elastic/elasticsearch.git' -const esFolder = join(__dirname, '..', '..', 'elasticsearch') -const yamlFolder = join(esFolder, 'rest-api-spec', 'src', 'main', 'resources', 'rest-api-spec', 'test') -const xPackYamlFolder = join(esFolder, 'x-pack', 'plugin', 'src', 'test', 'resources', 'rest-api-spec', 'test') +const assert = require('node:assert') +const url = require('node:url') +const fs = require('node:fs') +const path = require('node:path') +const globby = require('globby') +const semver = require('semver') +const downloadArtifacts = require('../../scripts/download-artifacts') -const MAX_API_TIME = 1000 * 90 -const MAX_FILE_TIME = 1000 * 30 -const MAX_TEST_TIME = 1000 * 3 +const buildTests = require('./test-builder') -const ossSkips = { - // TODO: remove this once 'arbitrary_key' is implemented - // https://github.com/elastic/elasticsearch/pull/41492 - 'indices.split/30_copy_settings.yml': ['*'], - // skipping because we are booting ES with `discovery.type=single-node` - // and this test will fail because of this configuration - 'nodes.stats/30_discovery.yml': ['*'], - // the expected error is returning a 503, - // which triggers a retry and the node to be marked as dead - 'search.aggregation/240_max_buckets.yml': ['*'], - // the yaml runner assumes that null means "does not exists", - // while null is a valid json value, so the check will fail - 'search/320_disallow_queries.yml': ['Test disallow expensive queries'] -} -const xPackBlackList = { - // this two test cases are broken, we should - // return on those in the future. - 'analytics/top_metrics.yml': [ - 'sort by keyword field fails', - 'sort by string script fails' - ], - 'cat.aliases/10_basic.yml': ['Empty cluster'], - 'index/10_with_id.yml': ['Index with ID'], - 'indices.get_alias/10_basic.yml': ['Get alias against closed indices'], - 'indices.get_alias/20_empty.yml': ['Check empty aliases when getting all aliases via /_alias'], - // https://github.com/elastic/elasticsearch/pull/39400 - 'ml/jobs_crud.yml': ['Test put job with id that is already taken'], - // object keys must me strings, and `0.0.toString()` is `0` - 'ml/evaluate_data_frame.yml': [ - 'Test binary_soft_classifition precision', - 'Test binary_soft_classifition recall', - 'Test binary_soft_classifition confusion_matrix' - ], - // it gets random failures on CI, must investigate - 'ml/set_upgrade_mode.yml': [ - 'Attempt to open job when upgrade_mode is enabled', - 'Setting upgrade mode to disabled from enabled' - ], - // The cleanup fails with a index not found when retrieving the jobs - 'ml/get_datafeed_stats.yml': ['Test get datafeed stats when total_search_time_ms mapping is missing'], - // investigate why this is failing - 'monitoring/bulk/10_basic.yml': ['*'], - 'monitoring/bulk/20_privileges.yml': ['*'], - 'license/20_put_license.yml': ['*'], - 'snapshot/10_basic.yml': ['*'], - // the body is correct, but the regex is failing - 'sql/sql.yml': ['Getting textual representation'], - // we are setting two certificates in the docker config - 'ssl/10_basic.yml': ['*'], - // very likely, the index template has not been loaded yet. - // we should run a indices.existsTemplate, but the name of the - // template may vary during time. - 'transforms_crud.yml': [ - 'Test basic transform crud', - 'Test transform with query and array of indices in source', - 'Test PUT continuous transform', - 'Test PUT continuous transform without delay set' - ], - 'transforms_force_delete.yml': [ - 'Test force deleting a running transform' - ], - 'transforms_cat_apis.yml': ['*'], - 'transforms_start_stop.yml': ['*'], - 'transforms_stats.yml': ['*'], - 'transforms_stats_continuous.yml': ['*'], - 'transforms_update.yml': ['*'], - // docker issue? - 'watcher/execute_watch/60_http_input.yml': ['*'], - // the checks are correct, but for some reason the test is failing on js side - // I bet is because the backslashes in the rg - 'watcher/execute_watch/70_invalid.yml': ['*'], - 'watcher/put_watch/10_basic.yml': ['*'], - 'xpack/15_basic.yml': ['*'] -} +const yamlFolder = downloadArtifacts.locations.testYamlFolder -function runner (opts = {}) { - const options = { node: opts.node } - if (opts.isXPack) { - options.ssl = { - ca: readFileSync(join(__dirname, '..', '..', '.ci', 'certs', 'ca.crt'), 'utf8'), - rejectUnauthorized: false +const getAllFiles = async dir => { + const files = await globby(dir, { + expandDirectories: { + extensions: ['yml', 'yaml'] } - } - const client = new Client(options) - log('Loading yaml suite') - start({ client, isXPack: opts.isXPack }) - .catch(console.log) -} - -async function waitCluster (client, times = 0) { - try { - await client.cluster.health({ waitForStatus: 'green', timeout: '50s' }) - } catch (err) { - if (++times < 10) { - await sleep(5000) - return waitCluster(client, times) - } - console.error(err) - process.exit(1) - } -} - -async function start ({ client, isXPack }) { - log('Waiting for Elasticsearch') - await waitCluster(client) - - const { body } = await client.info() - const { number: version, build_hash: sha } = body.version - - log(`Checking out sha ${sha}...`) - await withSHA(sha) - - log(`Testing ${isXPack ? 'XPack' : 'oss'} api...`) - const junit = createJunitReporter() - const junitTestSuites = junit.testsuites(`Integration test for ${isXPack ? 'XPack' : 'oss'} api`) - - const stats = { - total: 0, - skip: 0, - pass: 0, - assertions: 0 - } - const folders = getAllFiles(isXPack ? xPackYamlFolder : yamlFolder) - .filter(t => !/(README|TODO)/g.test(t)) - // we cluster the array based on the folder names, - // to provide a better test log output - .reduce((arr, file) => { - const path = file.slice(file.indexOf('/rest-api-spec/test'), file.lastIndexOf('/')) - var inserted = false - for (var i = 0; i < arr.length; i++) { - if (arr[i][0].includes(path)) { - inserted = true - arr[i].push(file) - break - } - } - if (!inserted) arr.push([file]) - return arr - }, []) - - const totalTime = now() - for (const folder of folders) { - // pretty name - const apiName = folder[0].slice( - folder[0].indexOf(`${sep}rest-api-spec${sep}test`) + 19, - folder[0].lastIndexOf(sep) - ) - - log('Testing ' + apiName.slice(1)) - const apiTime = now() - - for (const file of folder) { - const testRunner = build({ - client, - version, - isXPack: file.includes('x-pack') - }) - const fileTime = now() - const data = readFileSync(file, 'utf8') - // get the test yaml (as object), some file has multiple yaml documents inside, - // every document is separated by '---', so we split on the separator - // and then we remove the empty strings, finally we parse them - const tests = data - .split('\n---\n') - .map(s => s.trim()) - .filter(Boolean) - .map(parse) - - // get setup and teardown if present - var setupTest = null - var teardownTest = null - for (const test of tests) { - if (test.setup) setupTest = test.setup - if (test.teardown) teardownTest = test.teardown - } - - const cleanPath = file.slice(file.lastIndexOf(apiName)) - log(' ' + cleanPath) - const junitTestSuite = junitTestSuites.testsuite(apiName.slice(1) + ' - ' + cleanPath) - - for (const test of tests) { - const testTime = now() - const name = Object.keys(test)[0] - if (name === 'setup' || name === 'teardown') continue - const junitTestCase = junitTestSuite.testcase(name) - - stats.total += 1 - if (shouldSkip(isXPack, file, name)) { - stats.skip += 1 - junitTestCase.skip('This test is in the skip list of the client') - junitTestCase.end() - continue - } - log(' - ' + name) - try { - await testRunner.run(setupTest, test[name], teardownTest, stats, junitTestCase) - stats.pass += 1 - } catch (err) { - junitTestCase.failure(err) - junitTestCase.end() - junitTestSuite.end() - junitTestSuites.end() - generateJunitXmlReport(junit, isXPack ? 'xpack' : 'oss') - console.error(err) - process.exit(1) - } - const totalTestTime = now() - testTime - junitTestCase.end() - if (totalTestTime > MAX_TEST_TIME) { - log(' took too long: ' + ms(totalTestTime)) - } else { - log(' took: ' + ms(totalTestTime)) - } - } - junitTestSuite.end() - const totalFileTime = now() - fileTime - if (totalFileTime > MAX_FILE_TIME) { - log(` ${cleanPath} took too long: ` + ms(totalFileTime)) - } else { - log(` ${cleanPath} took: ` + ms(totalFileTime)) - } - } - const totalApiTime = now() - apiTime - if (totalApiTime > MAX_API_TIME) { - log(`${apiName} took too long: ` + ms(totalApiTime)) - } else { - log(`${apiName} took: ` + ms(totalApiTime)) - } - } - junitTestSuites.end() - generateJunitXmlReport(junit, isXPack ? 'xpack' : 'oss') - log(`Total testing time: ${ms(now() - totalTime)}`) - log(`Test stats: - - Total: ${stats.total} - - Skip: ${stats.skip} - - Pass: ${stats.pass} - - Assertions: ${stats.assertions} - `) -} - -function log (text) { - process.stdout.write(text + '\n') -} - -function now () { - var ts = process.hrtime() - return (ts[0] * 1e3) + (ts[1] / 1e6) -} - -function parse (data) { - try { - var doc = yaml.safeLoad(data) - } catch (err) { - console.error(err) - return - } - return doc -} - -/** - * Sets the elasticsearch repository to the given sha. - * If the repository is not present in `esFolder` it will - * clone the repository and the checkout the sha. - * If the repository is already present but it cannot checkout to - * the given sha, it will perform a pull and then try again. - * @param {string} sha - * @param {function} callback - */ -function withSHA (sha) { - return new Promise((resolve, reject) => { - _withSHA(err => err ? reject(err) : resolve()) }) - - function _withSHA (callback) { - var fresh = false - var retry = 0 - - if (!pathExist(esFolder)) { - if (!createFolder(esFolder)) { - return callback(new Error('Failed folder creation')) - } - fresh = true - } - - const git = Git(esFolder) - - if (fresh) { - clone(checkout) - } else { - checkout() - } - - function checkout () { - log(`Checking out sha '${sha}'`) - git.checkout(sha, err => { - if (err) { - if (retry++ > 0) { - return callback(err) - } - return pull(checkout) - } - callback() - }) - } - - function pull (cb) { - log('Pulling elasticsearch repository...') - git.pull(err => { - if (err) { - return callback(err) - } - cb() - }) - } - - function clone (cb) { - log('Cloning elasticsearch repository...') - git.clone(esRepo, esFolder, err => { - if (err) { - return callback(err) - } - cb() - }) - } - } + return files.sort() } -/** - * Checks if the given path exists - * @param {string} path - * @returns {boolean} true if exists, false if not - */ -function pathExist (path) { - try { - accessSync(path) - return true - } catch (err) { - return false - } -} - -/** - * Creates the given folder - * @param {string} name - * @returns {boolean} true on success, false on failure - */ -function createFolder (name) { - try { - mkdirSync(name) - return true - } catch (err) { - return false - } -} - -function generateJunitXmlReport (junit, suite) { - writeFileSync( - join(__dirname, '..', '..', `${suite}-report-junit.xml`), - junit.prettyPrint() - ) +async function doTestBuilder (version, clientOptions) { + await downloadArtifacts(undefined, version) + const files = await getAllFiles(yamlFolder) + await buildTests(files, clientOptions) } if (require.main === module) { - const node = process.env.TEST_ES_SERVER || 'http://localhost:9200/' - const opts = { - node, - isXPack: node.indexOf('@') > -1 + const node = process.env.TEST_ES_SERVER + const apiKey = process.env.ES_API_SECRET_KEY + const password = process.env.ELASTIC_PASSWORD + let version = process.env.STACK_VERSION + + assert(node != null, 'Environment variable missing: TEST_ES_SERVER') + assert(apiKey != null || password != null, 'Environment variable missing: ES_API_SECRET_KEY or ELASTIC_PASSWORD') + assert(version != null, 'Environment variable missing: STACK_VERSION') + + version = semver.clean(version.includes('SNAPSHOT') ? version.split('-')[0] : version) + + const clientOptions = { node } + if (apiKey != null) { + clientOptions.auth = { apiKey } + } else { + clientOptions.auth = { username: 'elastic', password } } - runner(opts) -} - -const shouldSkip = (isXPack, file, name) => { - var list = Object.keys(ossSkips) - for (var i = 0; i < list.length; i++) { - const ossTest = ossSkips[list[i]] - for (var j = 0; j < ossTest.length; j++) { - if (file.endsWith(list[i]) && (name === ossTest[j] || ossTest[j] === '*')) { - const testName = file.slice(file.indexOf(`${sep}elasticsearch${sep}`)) + ' / ' + name - log(`Skipping test ${testName} because is blacklisted in the oss test`) - return true - } - } - } - - if (file.includes('x-pack') || isXPack) { - list = Object.keys(xPackBlackList) - for (i = 0; i < list.length; i++) { - const platTest = xPackBlackList[list[i]] - for (j = 0; j < platTest.length; j++) { - if (file.endsWith(list[i]) && (name === platTest[j] || platTest[j] === '*')) { - const testName = file.slice(file.indexOf(`${sep}elasticsearch${sep}`)) + ' / ' + name - log(`Skipping test ${testName} because is blacklisted in the XPack test`) - return true - } - } + const nodeUrl = new url.URL(node) + if (nodeUrl.protocol === 'https:') { + clientOptions.tls = { + ca: fs.readFileSync(path.join(__dirname, '..', '..', '.buildkite', 'certs', 'ca.crt'), 'utf8'), + rejectUnauthorized: false } } - return false + doTestBuilder(version, clientOptions) + .then(() => process.exit(0)) + .catch(err => { + console.error(err) + process.exit(1) + }) } - -const getAllFiles = dir => - readdirSync(dir).reduce((files, file) => { - const name = join(dir, file) - const isDirectory = statSync(name).isDirectory() - return isDirectory ? [...files, ...getAllFiles(name)] : [...files, name] - }, []) - -module.exports = runner diff --git a/test/integration/reporter.js b/test/integration/reporter.js deleted file mode 100644 index 0d3621de7..000000000 --- a/test/integration/reporter.js +++ /dev/null @@ -1,109 +0,0 @@ -'use strict' - -const assert = require('assert') -const { create } = require('xmlbuilder2') - -function createJunitReporter () { - const report = {} - - return { testsuites, prettyPrint } - - function prettyPrint () { - return create(report).end({ prettyPrint: true }) - } - - function testsuites (name) { - assert(name, 'The testsuites name is required') - assert(report.testsuites === undefined, 'Cannot set more than one testsuites block') - const startTime = Date.now() - - report.testsuites = { - '@id': new Date().toISOString(), - '@name': name - } - - const testsuiteList = [] - - return { - testsuite: createTestSuite(testsuiteList), - end () { - report.testsuites['@time'] = Math.round((Date.now() - startTime) / 1000) - report.testsuites['@tests'] = testsuiteList.reduce((acc, val) => { - acc += val['@tests'] - return acc - }, 0) - report.testsuites['@failures'] = testsuiteList.reduce((acc, val) => { - acc += val['@failures'] - return acc - }, 0) - report.testsuites['@skipped'] = testsuiteList.reduce((acc, val) => { - acc += val['@skipped'] - return acc - }, 0) - if (testsuiteList.length) { - report.testsuites.testsuite = testsuiteList - } - } - } - } - - function createTestSuite (testsuiteList) { - return function testsuite (name) { - assert(name, 'The testsuite name is required') - const startTime = Date.now() - const suite = { - '@id': new Date().toISOString(), - '@name': name - } - const testcaseList = [] - testsuiteList.push(suite) - return { - testcase: createTestCase(testcaseList), - end () { - suite['@time'] = Math.round((Date.now() - startTime) / 1000) - suite['@tests'] = testcaseList.length - suite['@failures'] = testcaseList.filter(t => t.failure).length - suite['@skipped'] = testcaseList.filter(t => t.skipped).length - if (testcaseList.length) { - suite.testcase = testcaseList - } - } - } - } - } - - function createTestCase (testcaseList) { - return function testcase (name) { - assert(name, 'The testcase name is required') - const startTime = Date.now() - const tcase = { - '@id': new Date().toISOString(), - '@name': name - } - testcaseList.push(tcase) - return { - failure (error) { - assert(error, 'The failure error object is required') - tcase.failure = { - '#': error.stack, - '@message': error.message, - '@type': error.code - } - }, - skip (reason) { - if (typeof reason !== 'string') { - reason = JSON.stringify(reason, null, 2) - } - tcase.skipped = { - '#': reason - } - }, - end () { - tcase['@time'] = Math.round((Date.now() - startTime) / 1000) - } - } - } - } -} - -module.exports = createJunitReporter diff --git a/test/integration/test-builder.js b/test/integration/test-builder.js new file mode 100644 index 000000000..88c8b39b8 --- /dev/null +++ b/test/integration/test-builder.js @@ -0,0 +1,484 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +'use strict' + +const { join, sep } = require('node:path') +const { readFileSync, writeFileSync, promises } = require('node:fs') +const yaml = require('js-yaml') +const { rimraf } = require('rimraf') +const { mkdir } = promises + +const generatedTestsPath = join(__dirname, '..', '..', 'generated-tests') + +const stackSkips = [ + // test definition bug: response is empty string + 'cat/fielddata.yml', + // test definition bug: response is empty string + 'cluster/delete_voting_config_exclusions.yml', + // test definition bug: response is empty string + 'cluster/voting_config_exclusions.yml', + // client bug: ILM request takes a "body" param, but "body" is a special keyword in the JS client + 'ilm/10_basic.yml', + // health report is... not healthy + 'health_report.yml', + // TODO: `contains` action only supports checking for primitives inside arrays or strings inside strings, not referenced values like objects inside arrays + 'entsearch/10_basic.yml', + // test definition bug: error message does not match + 'entsearch/30_sync_jobs_stack.yml', + // no handler found for uri [/knn_test/_knn_search] + 'knn_search.yml', + // TODO: fix license on ES startup - "Operation failed: Current license is basic." + 'license/10_stack.yml', + // response.body should be truthy. found: "" + 'logstash/10_basic.yml', + // test definition bug? security_exception: unable to authenticate user [x_pack_rest_user] for REST request [/_ml/trained_models/test_model/definition/0] + 'machine_learning/clear_tm_deployment_cache.yml', + // client bug: 0.99995 does not equal 0.5 + 'machine_learning/data_frame_evaluate.yml', + // test definition bug? regex has whitespace, maybe needs to be removed + 'machine_learning/explain_data_frame_analytics.yml', + // client bug: 4 != 227 + 'machine_learning/preview_datafeed.yml', + // test definition bug: error message does not match + 'machine_learning/revert_model_snapshot.yml', + // test definition bug: error message does not match + 'machine_learning/update_model_snapshot.yml', + // version_conflict_engine_exception + 'machine_learning/jobs_crud.yml', + // test definition bug: error message does not match + 'machine_learning/model_snapshots.yml', + // test definition bug: error message does not match + 'query_rules/30_test.yml', + // client bug: 0 != 0.1 + 'script/10_basic.yml', + // client bug: request takes a "body" param, but "body" is a special keyword in the JS client + 'searchable_snapshots/10_basic.yml', + // test builder bug: does `match` action need to support "array contains value"? + 'security/10_api_key_basic.yml', + // test definition bug: error message does not match + 'security/140_user.yml', + // test definition bug: error message does not match + 'security/30_privileges_stack.yml', + // unknown issue: $profile.enabled path doesn't exist in response + 'security/130_user_profile.yml', + // test definition bug: error message does not match + 'security/change_password.yml', + // test builder bug: media_type_header_exception + 'simulate/ingest.yml', + // client bug: request takes a "body" param, but "body" is a special keyword in the JS client + 'snapshot/10_basic.yml', + // test definition bug: illegal_argument_exception + 'sql/10_basic.yml', + // test definition bug: illegal_argument_exception + 'text_structure/10_basic.yml', + // test definition bug: illegal_argument_exception + 'transform/10_basic.yml', + // attempts to retrieve index.routing.allocation.include, which does not exist + 'watcher/10_basic.yml' +] + +const serverlessSkips = [ + // TODO: sql.getAsync does not set a content-type header but ES expects one + // transport only sets a content-type if the body is not empty + 'sql/10_basic.yml', + // TODO: bulk call in setup fails due to "malformed action/metadata line" + // bulk body is being sent as a Buffer, unsure if related. + 'transform/10_basic.yml', + // TODO: scripts_painless_execute expects {"result":"0.1"}, gets {"result":"0"} + // body sent as Buffer, unsure if related + 'script/10_basic.yml', + // TODO: expects {"outlier_detection.auc_roc.value":0.99995}, gets {"outlier_detection.auc_roc.value":0.5} + // remove if/when https://github.com/elastic/elasticsearch-clients-tests/issues/37 is resolved + 'machine_learning/data_frame_evaluate.yml', + // TODO: Cannot perform requested action because job [job-crud-test-apis] is not open + 'machine_learning/jobs_crud.yml', + // TODO: test runner needs to support ignoring 410 errors + 'enrich/10_basic.yml', + // TODO: parameter `enabled` is not allowed in source + // Same underlying problem as https://github.com/elastic/elasticsearch-clients-tests/issues/55 + 'cluster/component_templates.yml', + // TODO: expecting `ct_field` field mapping to be returned, but instead only finds `field` + 'indices/simulate_template.yml', + 'indices/simulate_index_template.yml', + // TODO: test currently times out + 'inference/10_basic.yml', + // TODO: Fix: "Trained model deployment [test_model] is not allocated to any nodes" + 'machine_learning/20_trained_model_serverless.yml', + // TODO: query_rules api not available yet + 'query_rules/10_query_rules.yml', + 'query_rules/20_rulesets.yml', + 'query_rules/30_test.yml', + // TODO: security.putRole API not available + 'security/50_roles_serverless.yml', + // TODO: expected undefined to equal 'some_table' + 'entsearch/50_connector_updates.yml', + // TODO: resource_not_found_exception + 'tasks_serverless.yml', +] + +function parse (data) { + let doc + try { + doc = yaml.load(data, { schema: yaml.CORE_SCHEMA }) + } catch (err) { + console.error(err) + return + } + return doc +} + +async function build (yamlFiles, clientOptions) { + await rimraf(generatedTestsPath) + await mkdir(generatedTestsPath, { recursive: true }) + + for (const file of yamlFiles) { + const apiName = file.split(`${sep}tests${sep}`)[1] + const data = readFileSync(file, 'utf8') + + const tests = data + .split('\n---\n') + .map(s => s.trim()) + // empty strings + .filter(Boolean) + .map(parse) + // null values + .filter(Boolean) + + let code = "import { test } from 'tap'\n" + code += "import { Client } from '@elastic/elasticsearch'\n\n" + + const requires = tests.find(test => test.requires != null) + let skip = new Set() + if (requires != null) { + const { serverless = true, stack = true } = requires.requires + if (!serverless) skip.add('process.env.TEST_ES_SERVERLESS === "1"') + if (!stack) skip.add('process.env.TEST_ES_STACK === "1"') + } + + if (stackSkips.includes(apiName)) skip.add('process.env.TEST_ES_STACK === "1"') + if (serverlessSkips.includes(apiName)) skip.add('process.env.TEST_ES_SERVERLESS === "1"') + + if (skip.size > 0) { + code += `test('${apiName}', { skip: ${Array.from(skip).join(' || ')} }, t => {\n` + } else { + code += `test('${apiName}', t => {\n` + } + + for (const test of tests) { + if (test.setup != null) { + code += ' t.before(async () => {\n' + code += indent(buildActions(test.setup), 4) + code += ' })\n\n' + } + + if (test.teardown != null) { + code += ' t.after(async () => {\n' + code += indent(buildActions(test.teardown), 4) + code += ' })\n\n' + } + + for (const key of Object.keys(test).filter(k => !['setup', 'teardown', 'requires'].includes(k))) { + if (test[key].find(action => Object.keys(action)[0] === 'skip') != null) { + code += ` t.test('${key}', { skip: true }, async t => {\n` + } else { + code += ` t.test('${key}', async t => {\n` + } + code += indent(buildActions(test[key]), 4) + code += '\n t.end()\n' + code += ' })\n' + } + // if (test.requires != null) requires = test.requires + } + + code += '\n t.end()\n' + code += '})\n' + + const testDir = join(generatedTestsPath, apiName.split(sep).slice(0, -1).join(sep)) + const testFile = join(testDir, apiName.split(sep).pop().replace(/\.ya?ml$/, '.mjs')) + await mkdir(testDir, { recursive: true }) + writeFileSync(testFile, code, 'utf8') + } + + function buildActions (actions) { + let code = `const client = new Client(${JSON.stringify(clientOptions, null, 2)})\n` + code += 'let response\n\n' + + const vars = new Set() + + for (const action of actions) { + const key = Object.keys(action)[0] + switch (key) { + case 'do': + code += buildDo(action.do) + break + case 'set': + const setResult = buildSet(action.set, vars) + vars.add(setResult.varName) + code += setResult.code + break + case 'transform_and_set': + code += buildTransformAndSet(action.transform_and_set) + break + case 'match': + code += buildMatch(action.match) + break + case 'lt': + code += buildLt(action.lt) + break + case 'lte': + code += buildLte(action.lte) + break + case 'gt': + code += buildGt(action.gt) + break + case 'gte': + code += buildGte(action.gte) + break + case 'length': + code += buildLength(action.length) + break + case 'is_true': + code += buildIsTrue(action.is_true) + break + case 'is_false': + code += buildIsFalse(action.is_false) + break + case 'contains': + code += buildContains(action.contains) + break + case 'exists': + code += buildExists(action.exists) + break + case 'skip': + break + default: + console.warn(`Action not supported: ${key}`) + break + } + } + return code + } +} + +function buildDo (action) { + let code = '' + const keys = Object.keys(action) + if (keys.includes('catch')) { + code += 'try {\n' + code += indent(buildRequest(action), 2) + code += '} catch (err) {\n' + code += ` t.match(err.toString(), ${buildValLiteral(action.catch)})\n` + code += '}\n' + } else { + code += buildRequest(action) + } + return code +} + +function buildRequest(action) { + let code = '' + + const options = { meta: true } + + for (const key of Object.keys(action)) { + if (key === 'catch') continue + + if (key === 'headers') { + options.headers = action.headers + continue + } + + const params = action[key] + if (params.ignore != null) { + if (Array.isArray(params.ignore)) { + options.ignore = params.ignore + } else { + options.ignore = [params.ignore] + } + } + + code += `response = await client.${toCamelCase(key)}(${buildApiParams(action[key])}, ${JSON.stringify(options)})\n` + } + return code +} + +function buildSet (action, vars) { + const key = Object.keys(action)[0] + const varName = action[key] + const lookup = buildLookup(key) + + let code = '' + if (vars.has(varName)) { + code = `${varName} = ${lookup}\n` + } else { + code =`let ${varName} = ${lookup}\n` + } + return { code, varName } +} + +function buildTransformAndSet (action) { + return `// TODO buildTransformAndSet: ${JSON.stringify(action)}\n` +} + +function buildMatch (action) { + const key = Object.keys(action)[0] + let lookup = buildLookup(key) + const val = buildValLiteral(action[key]) + return `t.match(${lookup}, ${val})\n` +} + +function buildLt (action) { + const key = Object.keys(action)[0] + const lookup = buildLookup(key) + const val = buildValLiteral(action[key]) + return `t.ok(${lookup} < ${val})\n` +} + +function buildLte (action) { + const key = Object.keys(action)[0] + const lookup = buildLookup(key) + const val = buildValLiteral(action[key]) + return `t.ok(${lookup} <= ${val})\n` +} + +function buildGt (action) { + const key = Object.keys(action)[0] + const lookup = buildLookup(key) + const val = buildValLiteral(action[key]) + return `t.ok(${lookup} > ${val})\n` +} + +function buildGte (action) { + const key = Object.keys(action)[0] + const lookup = buildLookup(key) + const val = buildValLiteral(action[key]) + return `t.ok(${lookup} >= ${val})\n` +} + +function buildLength (action) { + const key = Object.keys(action)[0] + const lookup = buildLookup(key) + const val = buildValLiteral(action[key]) + + let code = '' + code += `if (typeof ${lookup} === 'object' && !Array.isArray(${lookup})) {\n` + code += ` t.equal(Object.keys(${lookup}).length, ${val})\n` + code += `} else {\n` + code += ` t.equal(${lookup}.length, ${val})\n` + code += `}\n` + return code +} + +function buildIsTrue (action) { + let lookup = `${buildLookup(action)}` + let errMessage = `\`${action} should be truthy. found: '\$\{JSON.stringify(${lookup})\}'\`` + if (lookup.includes('JSON.stringify')) errMessage = `\`${action} should be truthy. found: '\$\{${lookup}\}'\`` + return `t.ok(${lookup} === "true" || (Boolean(${lookup}) && ${lookup} !== "false"), ${errMessage})\n` +} + +function buildIsFalse (action) { + let lookup = `${buildLookup(action)}` + let errMessage = `\`${action} should be falsy. found: '\$\{JSON.stringify(${lookup})\}'\`` + if (lookup.includes('JSON.stringify')) errMessage = `\`${action} should be falsy. found: '\$\{${lookup}\}'\`` + return `t.ok(${lookup} === "false" || !Boolean(${lookup}), ${errMessage})\n` +} + +function buildContains (action) { + const key = Object.keys(action)[0] + const lookup = buildLookup(key) + const val = buildValLiteral(action[key]) + return `t.ok(${lookup}.includes(${val}), '${JSON.stringify(val)} not found in ${key}')\n` +} + +function buildExists (keyName) { + const lookup = buildLookup(keyName) + return `t.ok(${lookup} != null, \`Key "${keyName}" not found in response body: \$\{JSON.stringify(response.body, null, 2)\}\`)\n` +} + +function buildApiParams (params) { + if (Object.keys(params).length === 0) { + return 'undefined' + } else { + const out = {} + Object.keys(params).filter(k => k !== 'ignore' && k !== 'headers').forEach(k => out[k] = params[k]) + return buildValLiteral(out) + } +} + +function toCamelCase (name) { + return name.replace(/_([a-z])/g, g => g[1].toUpperCase()) +} + +function indent (str, spaces) { + const tabs = ' '.repeat(spaces) + return str.replace(/\s+$/, '').split('\n').map(l => `${tabs}${l}`).join('\n') + '\n' +} + +function buildLookup (path) { + if (path === '$body') return '(typeof response.body === "string" ? response.body : JSON.stringify(response.body))' + + const outPath = path.split('.').map(step => { + if (parseInt(step, 10).toString() === step) { + return `[${step}]` + } else if (step.match(/^\$[a-zA-Z0-9_]+$/)) { + const lookup = step.replace(/^\$/, '') + if (lookup === 'body') return '' + return `[${lookup}]` + } else if (step === '') { + return '' + } else { + return `['${step}']` + } + }).join('') + return `response.body${outPath}` +} + +function buildValLiteral (val) { + if (typeof val === 'string') val = val.trim() + if (isRegExp(val)) { + return JSON.stringify(val).replace(/^"/, '').replace(/"$/, '').replaceAll('\\\\', '\\') + } else if (isVariable(val)) { + if (val === '$body') return 'JSON.stringify(response.body)' + return val.replace(/^\$/, '') + } else if (isPlainObject(val)) { + return JSON.stringify(cleanObject(val), null, 2).replace(/"\$([a-zA-Z0-9_]+)"/g, '$1') + } else { + return JSON.stringify(val) + } +} + +function isRegExp (str) { + return typeof str === 'string' && str.startsWith('/') && str.endsWith('/') +} + +function isVariable (str) { + return typeof str === 'string' && str.match(/^\$[a-zA-Z0-9_]+$/) != null +} + +function cleanObject (obj) { + Object.keys(obj).forEach(key => { + let val = obj[key] + if (typeof val === 'string' && val.trim().startsWith('{') && val.trim().endsWith('}')) { + // attempt to parse as object + try { + val = JSON.parse(val) + } catch { + } + } else if (isPlainObject(val)) { + val = cleanObject(val) + } else if (Array.isArray(val)) { + val = val.map(item => isPlainObject(item) ? cleanObject(item) : item) + } + obj[key] = val + }) + return obj +} + +function isPlainObject(obj) { + return typeof obj === 'object' && !Array.isArray(obj) && obj != null +} + +module.exports = build diff --git a/test/integration/test-runner.js b/test/integration/test-runner.js deleted file mode 100644 index acfd29143..000000000 --- a/test/integration/test-runner.js +++ /dev/null @@ -1,902 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint camelcase: 0 */ - -const assert = require('assert') -const semver = require('semver') -const helper = require('./helper') -const deepEqual = require('fast-deep-equal') -const { ConfigurationError } = require('../../lib/errors') - -const { delve, to } = helper - -const supportedFeatures = [ - 'gtelte', - 'regex', - 'benchmark', - 'stash_in_path', - 'groovy_scripting', - 'headers', - 'transform_and_set', - 'catch_unauthorized', - 'arbitrary_key' -] - -function build (opts = {}) { - const client = opts.client - const esVersion = opts.version - const isXPack = opts.isXPack - const stash = new Map() - let response = null - - /** - * Runs a cleanup, removes all indices, aliases, templates, and snapshots - * @returns {Promise} - */ - async function cleanup () { - response = null - stash.clear() - - try { - await client.indices.deleteAlias({ - index: '_all', - name: '_all' - }, { ignore: 404 }) - } catch (err) { - assert.ifError(err, 'should not error: indices.deleteAlias') - } - - try { - await client.indices.delete({ - index: '_all', - expand_wildcards: 'open,closed,hidden' - }, { ignore: 404 }) - } catch (err) { - assert.ifError(err, 'should not error: indices.delete') - } - - try { - const { body: templates } = await client.indices.getTemplate() - await helper.runInParallel( - client, 'indices.deleteTemplate', - Object.keys(templates).map(t => ({ name: t })) - ) - } catch (err) { - assert.ifError(err, 'should not error: indices.deleteTemplate') - } - - try { - const { body: repositories } = await client.snapshot.getRepository() - for (const repository of Object.keys(repositories)) { - const { body: snapshots } = await client.snapshot.get({ repository, snapshot: '_all' }) - await helper.runInParallel( - client, 'snapshot.delete', - Object.keys(snapshots).map(snapshot => ({ snapshot, repository })), - { ignore: [404] } - ) - await client.snapshot.deleteRepository({ repository }, { ignore: [404] }) - } - } catch (err) { - assert.ifError(err, 'should not error: snapshot.delete / snapshot.deleteRepository') - } - } - - /** - * Runs some additional API calls to prepare ES for the xpack test, - * This set of calls should be executed before the final clenup. - * @returns {Promise} - */ - async function cleanupXPack () { - // tap.comment('XPack Cleanup') - - try { - const { body } = await client.security.getRole() - const roles = Object.keys(body).filter(n => !body[n].metadata._reserved) - await helper.runInParallel( - client, 'security.deleteRole', - roles.map(r => ({ name: r })) - ) - } catch (err) { - assert.ifError(err, 'should not error: security role cleanup') - } - - try { - const { body } = await client.security.getUser() - const users = Object.keys(body).filter(n => !body[n].metadata._reserved) - await helper.runInParallel( - client, 'security.deleteUser', - users.map(r => ({ username: r })) - ) - } catch (err) { - assert.ifError(err, 'should not error: security user cleanup') - } - - try { - const { body } = await client.security.getPrivileges() - const privileges = [] - Object.keys(body).forEach(app => { - Object.keys(body[app]).forEach(priv => { - privileges.push({ - name: body[app][priv].name, - application: body[app][priv].application - }) - }) - }) - await helper.runInParallel(client, 'security.deletePrivileges', privileges) - } catch (err) { - assert.ifError(err, 'should not error: security privileges cleanup') - } - - try { - await client.ml.stopDatafeed({ datafeedId: '*', force: true }) - const { body } = await client.ml.getDatafeeds({ datafeedId: '*' }) - const feeds = body.datafeeds.map(f => f.datafeed_id) - await helper.runInParallel( - client, 'ml.deleteDatafeed', - feeds.map(f => ({ datafeedId: f })) - ) - } catch (err) { - assert.ifError(err, 'should error: not ml datafeed cleanup') - } - - try { - await client.ml.closeJob({ jobId: '*', force: true }) - const { body } = await client.ml.getJobs({ jobId: '*' }) - const jobs = body.jobs.map(j => j.job_id) - await helper.runInParallel( - client, 'ml.deleteJob', - jobs.map(j => ({ jobId: j, waitForCompletion: true, force: true })) - ) - } catch (err) { - assert.ifError(err, 'should not error: ml job cleanup') - } - - try { - const { body } = await client.rollup.getJobs({ id: '_all' }) - const jobs = body.jobs.map(j => j.config.id) - await helper.runInParallel( - client, 'rollup.stopJob', - jobs.map(j => ({ id: j, waitForCompletion: true })) - ) - await helper.runInParallel( - client, 'rollup.deleteJob', - jobs.map(j => ({ id: j })) - ) - } catch (err) { - assert.ifError(err, 'should not error: rollup jobs cleanup') - } - - try { - const { body } = await client.tasks.list() - const tasks = Object.keys(body.nodes) - .reduce((acc, node) => { - const { tasks } = body.nodes[node] - Object.keys(tasks).forEach(id => { - if (tasks[id].cancellable) acc.push(id) - }) - return acc - }, []) - - await helper.runInParallel( - client, 'tasks.cancel', - tasks.map(id => ({ taskId: id })) - ) - } catch (err) { - assert.ifError(err, 'should not error: tasks cleanup') - } - - try { - await client.ilm.removePolicy({ index: '_all' }) - } catch (err) { - assert.ifError(err, 'should not error: ilm.removePolicy') - } - - // refresh the all indexes - try { - await client.indices.refresh({ index: '_all' }) - } catch (err) { - assert.ifError(err, 'should not error: indices.refresh') - } - } - - /** - * Runs the given test. - * It runs the test components in the following order: - * - skip check - * - xpack user - * - setup - * - the actual test - * - teardown - * - xpack cleanup - * - cleanup - * @param {object} setup (null if not needed) - * @param {object} test - * @oaram {object} teardown (null if not needed) - * @returns {Promise} - */ - async function run (setup, test, teardown, stats, junit) { - // if we should skip a feature in the setup/teardown section - // we should skip the entire test file - const skip = getSkip(setup) || getSkip(teardown) - if (skip && shouldSkip(esVersion, skip)) { - junit.skip(skip) - logSkip(skip) - return - } - - if (isXPack) { - // Some xpack test requires this user - // tap.comment('Creating x-pack user') - try { - await client.security.putUser({ - username: 'x_pack_rest_user', - body: { password: 'x-pack-test-password', roles: ['superuser'] } - }) - } catch (err) { - assert.ifError(err, 'should not error: security.putUser') - } - } - - if (setup) await exec('Setup', setup, stats, junit) - - await exec('Test', test, stats, junit) - - if (teardown) await exec('Teardown', teardown, stats, junit) - - if (isXPack) await cleanupXPack() - - await cleanup() - } - - /** - * Fill the stashed values of a command - * let's say the we have stashed the `master` value, - * is_true: nodes.$master.transport.profiles - * becomes - * is_true: nodes.new_value.transport.profiles - * @param {object|string} the action to update - * @returns {object|string} the updated action - */ - function fillStashedValues (obj) { - if (typeof obj === 'string') { - return getStashedValues(obj) - } - // iterate every key of the object - for (const key in obj) { - const val = obj[key] - // if the key value is a string, and the string includes '${' - // that we must update the content of '${...}'. - // eg: 'Basic ${auth}' we search the stahed value 'auth' - // and the resulting value will be 'Basic valueOfAuth' - if (typeof val === 'string' && val.includes('${')) { - const start = val.indexOf('${') - const end = val.indexOf('}', val.indexOf('${')) - const stashedKey = val.slice(start + 2, end) - const stashed = stash.get(stashedKey) - obj[key] = val.slice(0, start) + stashed + val.slice(end + 1) - continue - } - // handle json strings, eg: '{"hello":"$world"}' - if (typeof val === 'string' && val.includes('"$')) { - const start = val.indexOf('"$') - const end = val.indexOf('"', start + 1) - const stashedKey = val.slice(start + 2, end) - const stashed = '"' + stash.get(stashedKey) + '"' - obj[key] = val.slice(0, start) + stashed + val.slice(end + 1) - continue - } - // if the key value is a string, and the string includes '$' - // we run the "update value" code - if (typeof val === 'string' && val.includes('$')) { - // update the key value - obj[key] = getStashedValues(val) - continue - } - - // go deep in the object - if (val !== null && typeof val === 'object') { - fillStashedValues(val) - } - } - - return obj - - function getStashedValues (str) { - const arr = str - // we split the string on the dots - // handle the key with a dot inside that is not a part of the path - .split(/(? { - if (part[0] === '$') { - const stashed = stash.get(part.slice(1)) - if (stashed == null) { - throw new Error(`Cannot find stashed value '${part}' for '${JSON.stringify(obj)}'`) - } - return stashed - } - return part - }) - - // recreate the string value only if the array length is higher than one - // otherwise return the first element which in some test this could be a number, - // and call `.join` will coerce it to a string. - return arr.length > 1 ? arr.join('.') : arr[0] - } - } - - /** - * Stashes a value - * @param {string} the key to search in the previous response - * @param {string} the name to identify the stashed value - * @returns {TestRunner} - */ - function set (key, name) { - if (key.includes('_arbitrary_key_')) { - var currentVisit = null - for (const path of key.split('.')) { - if (path === '_arbitrary_key_') { - const keys = Object.keys(currentVisit) - const arbitraryKey = keys[getRandomInt(0, keys.length)] - stash.set(name, arbitraryKey) - } else { - currentVisit = delve(response, path) - } - } - } else { - stash.set(name, delve(response, key)) - } - } - - /** - * Applies a given transformation and stashes the result. - * @param {string} the name to identify the stashed value - * @param {string} the transformation function as string - * @returns {TestRunner} - */ - function transform_and_set (name, transform) { - if (/base64EncodeCredentials/.test(transform)) { - const [user, password] = transform - .slice(transform.indexOf('(') + 1, -1) - .replace(/ /g, '') - .split(',') - const userAndPassword = `${delve(response, user)}:${delve(response, password)}` - stash.set(name, Buffer.from(userAndPassword).toString('base64')) - } else { - throw new Error(`Unknown transform: '${transform}'`) - } - } - - /** - * Runs a client command - * @param {object} the action to perform - * @returns {Promise} - */ - async function doAction (action, stats) { - const cmd = parseDo(action) - try { - var api = delve(client, cmd.method).bind(client) - } catch (err) { - console.error(`\nError: Cannot find the method '${cmd.method}' in the client.\n`) - process.exit(1) - } - - const options = { ignore: cmd.params.ignore, headers: action.headers } - if (cmd.params.ignore) delete cmd.params.ignore - - const [err, result] = await to(api(cmd.params, options)) - var warnings = result ? result.warnings : null - var body = result ? result.body : null - - if (action.warnings && warnings === null) { - assert.fail('We should get a warning header', action.warnings) - } else if (!action.warnings && warnings !== null) { - // if there is only the 'default shard will change' - // warning we skip the check, because the yaml - // spec may not be updated - let hasDefaultShardsWarning = false - warnings.forEach(h => { - if (/default\snumber\sof\sshards/g.test(h)) { - hasDefaultShardsWarning = true - } - }) - - if (hasDefaultShardsWarning === true && warnings.length > 1) { - assert.fail('We are not expecting warnings', warnings) - } - } else if (action.warnings && warnings !== null) { - // if the yaml warnings do not contain the - // 'default shard will change' warning - // we do not check it presence in the warnings array - // because the yaml spec may not be updated - let hasDefaultShardsWarning = false - action.warnings.forEach(h => { - if (/default\snumber\sof\sshards/g.test(h)) { - hasDefaultShardsWarning = true - } - }) - - if (hasDefaultShardsWarning === false) { - warnings = warnings.filter(h => !h.test(/default\snumber\sof\sshards/g)) - } - - stats.assertions += 1 - assert.ok(deepEqual(warnings, action.warnings)) - } - - if (action.catch) { - stats.assertions += 1 - assert.ok( - parseDoError(err, action.catch), - `the error should be: ${action.catch}` - ) - try { - response = JSON.parse(err.body) - } catch (e) { - response = err.body - } - } else { - stats.assertions += 1 - assert.ifError(err, `should not error: ${cmd.method}`, action) - response = body - } - } - - /** - * Runs an actual test - * @param {string} the name of the test - * @param {object} the actions to perform - * @returns {Promise} - */ - async function exec (name, actions, stats, junit) { - // tap.comment(name) - for (const action of actions) { - if (action.skip) { - if (shouldSkip(esVersion, action.skip)) { - junit.skip(fillStashedValues(action.skip)) - logSkip(fillStashedValues(action.skip)) - break - } - } - - if (action.do) { - await doAction(fillStashedValues(action.do), stats) - } - - if (action.set) { - const key = Object.keys(action.set)[0] - set(fillStashedValues(key), action.set[key]) - } - - if (action.transform_and_set) { - const key = Object.keys(action.transform_and_set)[0] - transform_and_set(key, action.transform_and_set[key]) - } - - if (action.match) { - stats.assertions += 1 - const key = Object.keys(action.match)[0] - match( - // in some cases, the yaml refers to the body with an empty string - key === '$body' || key === '' - ? response - : delve(response, fillStashedValues(key)), - key === '$body' - ? action.match[key] - : fillStashedValues(action.match)[key], - action.match - ) - } - - if (action.lt) { - stats.assertions += 1 - const key = Object.keys(action.lt)[0] - lt( - delve(response, fillStashedValues(key)), - fillStashedValues(action.lt)[key] - ) - } - - if (action.gt) { - stats.assertions += 1 - const key = Object.keys(action.gt)[0] - gt( - delve(response, fillStashedValues(key)), - fillStashedValues(action.gt)[key] - ) - } - - if (action.lte) { - stats.assertions += 1 - const key = Object.keys(action.lte)[0] - lte( - delve(response, fillStashedValues(key)), - fillStashedValues(action.lte)[key] - ) - } - - if (action.gte) { - stats.assertions += 1 - const key = Object.keys(action.gte)[0] - gte( - delve(response, fillStashedValues(key)), - fillStashedValues(action.gte)[key] - ) - } - - if (action.length) { - stats.assertions += 1 - const key = Object.keys(action.length)[0] - length( - key === '$body' || key === '' - ? response - : delve(response, fillStashedValues(key)), - key === '$body' - ? action.length[key] - : fillStashedValues(action.length)[key] - ) - } - - if (action.is_true) { - stats.assertions += 1 - const isTrue = fillStashedValues(action.is_true) - is_true( - delve(response, isTrue), - isTrue - ) - } - - if (action.is_false) { - stats.assertions += 1 - const isFalse = fillStashedValues(action.is_false) - is_false( - delve(response, isFalse), - isFalse - ) - } - } - } - - return { run } -} - -/** - * Asserts that the given value is truthy - * @param {any} the value to check - * @param {string} an optional message - * @returns {TestRunner} - */ -function is_true (val, msg) { - assert.ok(val, `expect truthy value: ${msg} - value: ${JSON.stringify(val)}`) -} - -/** - * Asserts that the given value is falsey - * @param {any} the value to check - * @param {string} an optional message - * @returns {TestRunner} - */ -function is_false (val, msg) { - assert.ok(!val, `expect falsey value: ${msg} - value: ${JSON.stringify(val)}`) -} - -/** - * Asserts that two values are the same - * @param {any} the first value - * @param {any} the second value - * @returns {TestRunner} - */ -function match (val1, val2, action) { - // both values are objects - if (typeof val1 === 'object' && typeof val2 === 'object') { - assert.ok(deepEqual(val1, val2), action) - // the first value is the body as string and the second a pattern string - } else if ( - typeof val1 === 'string' && typeof val2 === 'string' && - val2.startsWith('/') && (val2.endsWith('/\n') || val2.endsWith('/')) - ) { - const regStr = val2 - // match all comments within a "regexp" match arg - .replace(/([\S\s]?)#[^\n]*\n/g, (match, prevChar) => { - return prevChar === '\\' ? match : `${prevChar}\n` - }) - // remove all whitespace from the expression, all meaningful - // whitespace is represented with \s - .replace(/\s/g, '') - .slice(1, -1) - // 'm' adds the support for multiline regex - assert.ok(new RegExp(regStr, 'm').test(val1), `should match pattern provided: ${val2}, action: ${JSON.stringify(action)}`) - // tap.match(val1, new RegExp(regStr, 'm'), `should match pattern provided: ${val2}, action: ${JSON.stringify(action)}`) - // everything else - } else { - assert.strictEqual(val1, val2, `should be equal: ${val1} - ${val2}, action: ${JSON.stringify(action)}`) - } -} - -/** - * Asserts that the first value is less than the second - * It also verifies that the two values are numbers - * @param {any} the first value - * @param {any} the second value - * @returns {TestRunner} - */ -function lt (val1, val2) { - ;[val1, val2] = getNumbers(val1, val2) - assert.ok(val1 < val2) -} - -/** - * Asserts that the first value is greater than the second - * It also verifies that the two values are numbers - * @param {any} the first value - * @param {any} the second value - * @returns {TestRunner} - */ -function gt (val1, val2) { - ;[val1, val2] = getNumbers(val1, val2) - assert.ok(val1 > val2) -} - -/** - * Asserts that the first value is less than or equal the second - * It also verifies that the two values are numbers - * @param {any} the first value - * @param {any} the second value - * @returns {TestRunner} - */ -function lte (val1, val2) { - ;[val1, val2] = getNumbers(val1, val2) - assert.ok(val1 <= val2) -} - -/** - * Asserts that the first value is greater than or equal the second - * It also verifies that the two values are numbers - * @param {any} the first value - * @param {any} the second value - * @returns {TestRunner} -*/ -function gte (val1, val2) { - ;[val1, val2] = getNumbers(val1, val2) - assert.ok(val1 >= val2) -} - -/** - * Asserts that the given value has the specified length - * @param {string|object|array} the object to check - * @param {number} the expected length - * @returns {TestRunner} - */ -function length (val, len) { - if (typeof val === 'string' || Array.isArray(val)) { - assert.strictEqual(val.length, len) - } else if (typeof val === 'object' && val !== null) { - assert.strictEqual(Object.keys(val).length, len) - } else { - assert.fail(`length: the given value is invalid: ${val}`) - } -} - -/** - * Gets a `do` action object and returns a structured object, - * where the action is the key and the parameter is the value. - * Eg: - * { - * 'indices.create': { - * 'index': 'test' - * }, - * 'warnings': [ - * '[index] is deprecated' - * ] - * } - * becomes - * { - * method: 'indices.create', - * params: { - * index: 'test' - * }, - * warnings: [ - * '[index] is deprecated' - * ] - * } - * @param {object} - * @returns {object} - */ -function parseDo (action) { - return Object.keys(action).reduce((acc, val) => { - switch (val) { - case 'catch': - acc.catch = action.catch - break - case 'warnings': - acc.warnings = action.warnings - break - case 'node_selector': - acc.node_selector = action.node_selector - break - default: - // converts underscore to camelCase - // eg: put_mapping => putMapping - acc.method = val.replace(/_([a-z])/g, g => g[1].toUpperCase()) - acc.params = camelify(action[val]) - } - return acc - }, {}) - - function camelify (obj) { - const newObj = {} - - // TODO: add camelCase support for this fields - const doNotCamelify = ['copy_settings'] - - for (const key in obj) { - const val = obj[key] - var newKey = key - if (!~doNotCamelify.indexOf(key)) { - // if the key starts with `_` we should not camelify the first occurence - // eg: _source_include => _sourceInclude - newKey = key[0] === '_' - ? '_' + key.slice(1).replace(/_([a-z])/g, k => k[1].toUpperCase()) - : key.replace(/_([a-z])/g, k => k[1].toUpperCase()) - } - - if ( - val !== null && - typeof val === 'object' && - !Array.isArray(val) && - key !== 'body' - ) { - newObj[newKey] = camelify(val) - } else { - newObj[newKey] = val - } - } - - return newObj - } -} - -function parseDoError (err, spec) { - const httpErrors = { - bad_request: 400, - unauthorized: 401, - forbidden: 403, - missing: 404, - request_timeout: 408, - conflict: 409, - unavailable: 503 - } - - if (httpErrors[spec]) { - return err.statusCode === httpErrors[spec] - } - - if (spec === 'request') { - return err.statusCode >= 400 && err.statusCode < 600 - } - - if (spec.startsWith('/') && spec.endsWith('/')) { - return new RegExp(spec.slice(1, -1), 'g').test(JSON.stringify(err.body)) - } - - if (spec === 'param') { - return err instanceof ConfigurationError - } - - return false -} - -function getSkip (arr) { - if (!Array.isArray(arr)) return null - for (var i = 0; i < arr.length; i++) { - if (arr[i].skip) return arr[i].skip - } - return null -} - -// Gets two *maybe* numbers and returns two valida numbers -// it throws if one or both are not a valid number -// the returned value is an array with the new values -function getNumbers (val1, val2) { - const val1Numeric = Number(val1) - if (isNaN(val1Numeric)) { - throw new TypeError(`val1 is not a valid number: ${val1}`) - } - const val2Numeric = Number(val2) - if (isNaN(val2Numeric)) { - throw new TypeError(`val2 is not a valid number: ${val2}`) - } - return [val1Numeric, val2Numeric] -} - -function getRandomInt (min, max) { - return Math.floor(Math.random() * (max - min)) + min -} - -/** - * Logs a skip - * @param {object} the actions - * @returns {TestRunner} - */ -function logSkip (action) { - if (action.reason && action.version) { - console.log(`Skip: ${action.reason} (${action.version})`) - } else if (action.features) { - console.log(`Skip: ${JSON.stringify(action.features)})`) - } else { - console.log('Skipped') - } -} - -/** - * Decides if a test should be skipped - * @param {object} the actions - * @returns {boolean} - */ -function shouldSkip (esVersion, action) { - var shouldSkip = false - // skip based on the version - if (action.version) { - if (action.version.trim() === 'all') return true - const versions = action.version.split(',').filter(Boolean) - for (const version of versions) { - const [min, max] = version.split('-').map(v => v.trim()) - // if both `min` and `max` are specified - if (min && max) { - shouldSkip = semver.satisfies(esVersion, action.version) - // if only `min` is specified - } else if (min) { - shouldSkip = semver.gte(esVersion, min) - // if only `max` is specified - } else if (max) { - shouldSkip = semver.lte(esVersion, max) - // something went wrong! - } else { - throw new Error(`skip: Bad version range: ${action.version}`) - } - } - } - - if (shouldSkip) return true - - if (action.features) { - if (!Array.isArray(action.features)) action.features = [action.features] - // returns true if one of the features is not present in the supportedFeatures - shouldSkip = !!action.features.filter(f => !~supportedFeatures.indexOf(f)).length - } - - if (shouldSkip) return true - - return false -} - -/** - * Updates the array syntax of keys and values - * eg: 'hits.hits.1.stuff' to 'hits.hits[1].stuff' - * @param {object} the action to update - * @returns {obj} the updated action - */ -// function updateArraySyntax (obj) { -// const newObj = {} - -// for (const key in obj) { -// const newKey = key.replace(/\.\d{1,}\./g, v => `[${v.slice(1, -1)}].`) -// const val = obj[key] - -// if (typeof val === 'string') { -// newObj[newKey] = val.replace(/\.\d{1,}\./g, v => `[${v.slice(1, -1)}].`) -// } else if (val !== null && typeof val === 'object') { -// newObj[newKey] = updateArraySyntax(val) -// } else { -// newObj[newKey] = val -// } -// } - -// return newObj -// } - -module.exports = build diff --git a/test/mock/index.js b/test/mock/index.js new file mode 100644 index 000000000..6d6452995 --- /dev/null +++ b/test/mock/index.js @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +'use strict' + +const { test } = require('tap') +const { Client, errors } = require('../../') +const Mock = require('@elastic/elasticsearch-mock') + +test('Mock should work', async t => { + t.plan(1) + + const mock = new Mock() + const client = new Client({ + node: 'http://localhost:9200/', + Connection: mock.getConnection() + }) + + mock.add({ + method: 'GET', + path: '/_cat/indices' + }, () => { + return { status: 'ok' } + }) + + const response = await client.cat.indices() + t.same(response.body, { status: 'ok' }) +}) + +test('Return an error', async t => { + t.plan(1) + + const mock = new Mock() + const client = new Client({ + node: 'http://localhost:9200/', + Connection: mock.getConnection() + }) + + mock.add({ + method: 'GET', + path: '/_cat/indices' + }, () => { + return new errors.ResponseError({ + body: { errors: {}, status: 500 }, + statusCode: 500 + }) + }) + + try { + await client.cat.indices() + t.fail('Should throw') + } catch (err) { + t.ok(err instanceof errors.ResponseError) + } +}) diff --git a/test/mock/package.json b/test/mock/package.json new file mode 100644 index 000000000..0017d01cd --- /dev/null +++ b/test/mock/package.json @@ -0,0 +1,18 @@ +{ + "name": "mock", + "version": "1.0.0", + "description": "", + "main": "index.js", + "scripts": { + "test": "standard && tap index.js" + }, + "keywords": [], + "author": "", + "license": "ISC", + "dependencies": { + "@elastic/elasticsearch": "file:../..", + "@elastic/elasticsearch-mock": "^0.3.1", + "standard": "^16.0.3", + "tap": "^15.0.9" + } +} diff --git a/test/types/api-response-body.test-d.ts b/test/types/api-response-body.test-d.ts deleted file mode 100644 index cba2b0777..000000000 --- a/test/types/api-response-body.test-d.ts +++ /dev/null @@ -1,269 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -import { expectType, expectError } from 'tsd' -import { Readable as ReadableStream } from 'stream'; -import { TransportRequestCallback } from '../../lib/Transport' -import { Client, ApiError } from '../../' - -const client = new Client({ - node: 'http://localhost:9200/' -}) - -interface SearchBody { - query: { - match: { foo: string } - } -} - -interface ShardsResponse { - total: number; - successful: number; - failed: number; - skipped: number; -} - -interface Explanation { - value: number; - description: string; - details: Explanation[]; -} - -interface SearchResponse { - took: number; - timed_out: boolean; - _scroll_id?: string; - _shards: ShardsResponse; - hits: { - total: number; - max_score: number; - hits: Array<{ - _index: string; - _type: string; - _id: string; - _score: number; - _source: T; - _version?: number; - _explanation?: Explanation; - fields?: any; - highlight?: any; - inner_hits?: any; - matched_queries?: string[]; - sort?: string[]; - }>; - }; - aggregations?: any; -} - -interface Source { - foo: string -} - -// body that does not respect the RequestBody constraint -expectError( - client.search({ - index: 'hello', - body: 42 - }).then(console.log) -) - -// No generics (promise style) -{ - const response = await client.search({ - index: 'test', - body: { - query: { - match: { foo: 'bar' } - } - } - }) - - expectType>(response.body) - expectType(response.meta.context) -} - -// Define only the response body (promise style) -{ - const response = await client.search>({ - index: 'test', - body: { - query: { - match: { foo: 'bar' } - } - } - }) - - expectType>(response.body) - expectType(response.meta.context) -} - -// Define response body and request body (promise style) -{ - const response = await client.search, SearchBody>({ - index: 'test', - body: { - query: { - match: { foo: 'bar' } - } - } - }) - - expectType>(response.body) - expectType(response.meta.context) -} - -// Define response body, request body and the context (promise style) -{ - const response = await client.search, SearchBody, string>({ - index: 'test', - body: { - query: { - match: { foo: 'bar' } - } - } - }) - - expectType>(response.body) - expectType(response.meta.context) -} - -// Send request body as string (promise style) -{ - const response = await client.search({ - index: 'test', - body: 'hello world' - }) - - expectType>(response.body) - expectType(response.meta.context) -} - -// Send request body as buffer (promise style) -{ - const response = await client.search({ - index: 'test', - body: Buffer.from('hello world') - }) - - expectType>(response.body) - expectType(response.meta.context) -} - -// Send request body as readable stream (promise style) -{ - const response = await client.search({ - index: 'test', - body: new ReadableStream() - }) - - expectType>(response.body) - expectType(response.meta.context) -} - -// No generics (callback style) -{ - const result = client.search({ - index: 'test', - body: { - query: { - match: { foo: 'bar' } - } - } - }, (err, response) => { - expectType(err) - expectType>(response.body) - expectType(response.meta.context) - }) - expectType(result) -} - -// Define only the response body (callback style) -{ - const result = client.search>({ - index: 'test', - body: { - query: { - match: { foo: 'bar' } - } - } - }, (err, response) => { - expectType(err) - expectType>(response.body) - expectType(response.meta.context) - }) - expectType(result) -} - -// Define response body and request body (callback style) -{ - const result = client.search, SearchBody>({ - index: 'test', - body: { - query: { - match: { foo: 'bar' } - } - } - }, (err, response) => { - expectType(err) - expectType>(response.body) - expectType(response.meta.context) - }) - expectType(result) -} - -// Define response body, request body and the context (callback style) -{ - const result = client.search, SearchBody, string>({ - index: 'test', - body: { - query: { - match: { foo: 'bar' } - } - } - }, (err, response) => { - expectType(err) - expectType>(response.body) - expectType(response.meta.context) - }) - expectType(result) -} - -// Send request body as string (callback style) -{ - const result = client.search({ - index: 'test', - body: 'hello world' - }, (err, response) => { - expectType(err) - expectType>(response.body) - expectType(response.meta.context) - }) - expectType(result) -} - -// Send request body as buffer (callback style) -{ - const result = client.search({ - index: 'test', - body: Buffer.from('hello world') - }, (err, response) => { - expectType(err) - expectType>(response.body) - expectType(response.meta.context) - }) - expectType(result) -} - -// Send request body as readable stream (callback style) -{ - const result = client.search({ - index: 'test', - body: new ReadableStream() - }, (err, response) => { - expectType(err) - expectType>(response.body) - expectType(response.meta.context) - }) - expectType(result) -} \ No newline at end of file diff --git a/test/types/api-response.test-d.ts b/test/types/api-response.test-d.ts deleted file mode 100644 index 06f22f318..000000000 --- a/test/types/api-response.test-d.ts +++ /dev/null @@ -1,65 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -import { expectType } from 'tsd' -import { TransportRequestCallback } from '../../lib/Transport' -import { Client, ApiError } from '../../' - -const client = new Client({ - node: 'http://localhost:9200/' -}) - -// No generics (promise style) -{ - const response = await client.cat.count({ index: 'test' }) - - expectType>(response.body) - expectType(response.meta.context) -} - -// Define only the response body (promise style) -{ - const response = await client.cat.count({ index: 'test' }) - - expectType(response.body) - expectType(response.meta.context) -} - -// Define response body and the context (promise style) -{ - const response = await client.cat.count({ index: 'test' }) - - expectType(response.body) - expectType(response.meta.context) -} - -// No generics (callback style) -{ - const result = client.cat.count({ index: 'test' }, (err, response) => { - expectType(err) - expectType>(response.body) - expectType(response.meta.context) - }) - expectType(result) -} - -// Define only the response body (callback style) -{ - const result = client.cat.count({ index: 'test' }, (err, response) => { - expectType(err) - expectType(response.body) - expectType(response.meta.context) - }) - expectType(result) -} - -// Define response body and the context (callback style) -{ - const result = client.cat.count({ index: 'test' }, (err, response) => { - expectType(err) - expectType(response.body) - expectType(response.meta.context) - }) - expectType(result) -} diff --git a/test/types/client-options.test-d.ts b/test/types/client-options.test-d.ts deleted file mode 100644 index d19a6a707..000000000 --- a/test/types/client-options.test-d.ts +++ /dev/null @@ -1,618 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -import { URL } from 'url' -import { expectType, expectError } from 'tsd' -import { TransportGetConnectionOptions } from '../../lib/Transport' -import { - Client, - Serializer, - Connection, - ConnectionPool, - Transport, - errors -} from '../../' - -/** - * `node` option - */ -expectType( - new Client({ - node: 'http://localhost:9200/' - }) -) - -expectType( - new Client({ - nodes: ['http://localhost:9200/', 'http://localhost:9200/'] - }) -) - -expectType( - new Client({ - node: { - url: new URL('http://localhost:9200/'), - id: 'my-node' - } - }) -) - -expectType( - new Client({ - nodes: [{ - url: new URL('http://localhost:9200/'), - id: 'my-node-1' - }, { - url: new URL('http://localhost:9201/'), - id: 'my-node-2' - }] - }) -) - -expectError( - new Client({ - node: 42 - }) -) - -expectError( - new Client({ - node: { - url: 'http://localhost:9200/', - id: 'my-node' - } - }) -) - -/** - * `maxRetries` option - */ -expectType( - new Client({ - node: 'http://localhost:9200/', - maxRetries: 5 - }) -) - -expectError( - new Client({ - node: 'http://localhost:9200/', - maxRetries: 'five' - }) -) - -/** - * `requestTimeout` option - */ -expectType( - new Client({ - node: 'http://localhost:9200/', - requestTimeout: 5 - }) -) - -expectError( - new Client({ - node: 'http://localhost:9200/', - requestTimeout: 'five' - }) -) - -/** - * `pingTimeout` option - */ -expectType( - new Client({ - node: 'http://localhost:9200/', - pingTimeout: 5 - }) -) - -expectError( - new Client({ - node: 'http://localhost:9200/', - pingTimeout: 'five' - }) -) - -/** - * `sniffInterval` option - */ -expectType( - new Client({ - node: 'http://localhost:9200/', - sniffInterval: 5 - }) -) - -expectType( - new Client({ - node: 'http://localhost:9200/', - sniffInterval: false - }) -) - -expectError( - new Client({ - node: 'http://localhost:9200/', - sniffInterval: 'five' - }) -) - -/** - * `sniffOnStart` option - */ -expectType( - new Client({ - node: 'http://localhost:9200/', - sniffOnStart: true - }) -) - -expectError( - new Client({ - node: 'http://localhost:9200/', - sniffOnStart: 'no' - }) -) - -/** - * `sniffEndpoint` option - */ -expectType( - new Client({ - node: 'http://localhost:9200/', - sniffEndpoint: '/custom' - }) -) - -expectError( - new Client({ - node: 'http://localhost:9200/', - sniffEndpoint: false - }) -) - -/** - * `sniffOnConnectionFault` option - */ -expectType( - new Client({ - node: 'http://localhost:9200/', - sniffOnConnectionFault: true - }) -) - -expectError( - new Client({ - node: 'http://localhost:9200/', - sniffOnConnectionFault: 'yes' - }) -) - -/** - * `resurrectStrategy` option - */ -expectType( - new Client({ - node: 'http://localhost:9200/', - resurrectStrategy: 'ping' - }) -) - -expectType( - new Client({ - node: 'http://localhost:9200/', - resurrectStrategy: 'optimistic' - }) -) - -expectType( - new Client({ - node: 'http://localhost:9200/', - resurrectStrategy: 'none' - }) -) - -expectError( - new Client({ - node: 'http://localhost:9200/', - resurrectStrategy: 'custom' - }) -) - -/** - * `suggestCompression` option - */ -expectType( - new Client({ - node: 'http://localhost:9200/', - suggestCompression: true - }) -) - -expectError( - new Client({ - node: 'http://localhost:9200/', - suggestCompression: 'no' - }) -) - -/** - * `compression` option - */ -expectType( - new Client({ - node: 'http://localhost:9200/', - compression: 'gzip' - }) -) - -expectError( - new Client({ - node: 'http://localhost:9200/', - compression: 'deflate' - }) -) - -/** - * `headers` option - */ -expectType( - new Client({ - node: 'http://localhost:9200/', - headers: { foo: 'bar' } - }) -) - -expectError( - new Client({ - node: 'http://localhost:9200/', - headers: 'foo=bar' - }) -) - -/** - * `opaqueIdPrefix` option - */ -expectType( - new Client({ - node: 'http://localhost:9200/', - opaqueIdPrefix: 'foo-' - }) -) - -expectError( - new Client({ - node: 'http://localhost:9200/', - opaqueIdPrefix: 42 - }) -) - -/** - * `name` option - */ -expectType( - new Client({ - node: 'http://localhost:9200/', - name: 'foo' - }) -) - -expectError( - new Client({ - node: 'http://localhost:9200/', - name: 42 - }) -) - -/** - * `auth` option - */ -expectType( - new Client({ - node: 'http://localhost:9200/', - auth: { - username: 'username', - password: 'password' - } - }) -) - -expectType( - new Client({ - node: 'http://localhost:9200/', - auth: { - apiKey: 'abcd' - } - }) -) - -expectType( - new Client({ - node: 'http://localhost:9200/', - auth: { - apiKey: { - api_key: 'foo', - id: 'bar' - } - } - }) -) - -expectError( - new Client({ - node: 'http://localhost:9200/', - auth: 'password' - }) -) - -/** - * `cloud` option - */ -expectType( - new Client({ - cloud: { - id: 'localhost:9200' - } - }) -) - -expectError( - new Client({ - cloud: { - id: 42 - } - }) -) - -/** - * `agent` option - */ -expectType( - new Client({ - node: 'http://localhost:9200/', - agent: { - keepAlive: true, - keepAliveMsecs: 42, - maxSockets: 42, - maxFreeSockets: 42 - } - }) -) - -expectError( - new Client({ - node: 'http://localhost:9200/', - agent: { - keepAlive: 'yes', - keepAliveMsecs: true, - maxSockets: 'all', - maxFreeSockets: null - } - }) -) - -/** - * `ssl` option - */ -expectType( - new Client({ - node: 'http://localhost:9200/', - ssl: { - ca: 'cert', - rejectUnauthorized: true - } - }) -) - -expectError( - new Client({ - node: 'http://localhost:9200/', - ssl: { - ca: 42, - rejectUnauthorized: 'yes' - } - }) -) - -/** - * `generateRequestId` option - */ -expectType( - new Client({ - node: 'http://localhost:9200/', - generateRequestId (params, options) { - return 'id' - } - }) -) - -expectError( - new Client({ - node: 'http://localhost:9200/', - generateRequestId: 'id' - }) -) - -/** - * `nodeSelector` option - */ -expectType( - new Client({ - node: 'http://localhost:9200/', - nodeSelector (connections) { - return connections[0] - } - }) -) - -expectType( - new Client({ - node: 'http://localhost:9200/', - nodeSelector: 'round-robin' - }) -) - -expectError( - new Client({ - node: 'http://localhost:9200/', - nodeSelector (connections) { - return 'id' - } - }) -) - -/** - * `nodeFilter` option - */ -expectType( - new Client({ - node: 'http://localhost:9200/', - nodeFilter (connection) { - return true - } - }) -) - -expectError( - new Client({ - node: 'http://localhost:9200/', - nodeFilter (connection) { - return 'id' - } - }) -) - -/** - * `Serializer` option - */ -{ - class CustomSerializer extends Serializer { - deserialize (str: string) { - return super.deserialize(str) - } - } - - expectType( - new Client({ - node: 'http://localhost:9200/', - Serializer: CustomSerializer - }) - ) -} - -{ - class CustomSerializer { - deserialize (str: string) { - return JSON.parse(str) - } - } - - expectError( - new Client({ - node: 'http://localhost:9200/', - Serializer: CustomSerializer - }) - ) -} - -/** - * `Connection` option - */ -{ - class CustomConnection extends Connection { - close () { - return super.close() - } - } - - expectType( - new Client({ - node: 'http://localhost:9200/', - Connection: CustomConnection - }) - ) -} - -{ - class CustomConnection { - close () { - return Promise.resolve() - } - } - - expectError( - new Client({ - node: 'http://localhost:9200/', - Connection: CustomConnection - }) - ) -} - -/** - * `ConnectionPool` option - */ -{ - class CustomConnectionPool extends ConnectionPool { - empty () { - return super.empty() - } - } - - expectType( - new Client({ - node: 'http://localhost:9200/', - ConnectionPool: CustomConnectionPool - }) - ) -} - -{ - class CustomConnectionPool { - empty () { - return this - } - } - - expectError( - new Client({ - node: 'http://localhost:9200/', - ConnectionPool: CustomConnectionPool - }) - ) -} - -/** - * `Transport` option - */ -{ - class CustomTransport extends Transport { - getConnection (opts: TransportGetConnectionOptions) { - return super.getConnection(opts) - } - } - - expectType( - new Client({ - node: 'http://localhost:9200/', - Transport: CustomTransport - }) - ) -} - -{ - class CustomTransport { - getConnection (opts: TransportGetConnectionOptions) { - return null - } - } - - expectError( - new Client({ - node: 'http://localhost:9200/', - Transport: CustomTransport - }) - ) -} diff --git a/test/types/client.test-d.ts b/test/types/client.test-d.ts deleted file mode 100644 index 339120f1e..000000000 --- a/test/types/client.test-d.ts +++ /dev/null @@ -1,123 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -import { expectType } from 'tsd' -import { Client, ApiError, ApiResponse, RequestEvent, ResurrectEvent } from '../../' -import { TransportRequestCallback, TransportRequestPromise } from '../..//lib/Transport'; - -const client = new Client({ - node: 'http://localhost:9200/' -}) - -client.on('request', (err, meta) => { - expectType(err) - expectType(meta) -}) - -client.on('response', (err, meta) => { - expectType(err) - expectType(meta) -}) - -client.on('sniff', (err, meta) => { - expectType(err) - expectType(meta) -}) - -client.on('resurrect', (err, meta) => { - expectType(err) - expectType(meta) -}) - -// Test all overloads - -// Callbacks style -{ - const result = client.info((err, result) => { - expectType(err) - expectType(result) - }) - expectType(result) - expectType(result.abort()) -} - -{ - const result = client.info({ pretty: true }, (err, result) => { - expectType(err) - expectType(result) - }) - expectType(result) - expectType(result.abort()) -} - -{ - const result = client.info({ pretty: true }, { ignore: [404] }, (err, result) => { - expectType(err) - expectType(result) - }) - expectType(result) - expectType(result.abort()) -} - -// Promise style -{ - const promise = client.info() - expectType>(promise) - promise - .then(result => expectType(result)) - .catch((err: ApiError) => expectType(err)) - expectType(promise.abort()) -} - -{ - const promise = client.info({ pretty: true }) - expectType>(promise) - promise - .then(result => expectType(result)) - .catch((err: ApiError) => expectType(err)) - expectType(promise.abort()) -} - -{ - const promise = client.info({ pretty: true }, { ignore: [404] }) - expectType>(promise) - promise - .then(result => expectType(result)) - .catch((err: ApiError) => expectType(err)) - expectType(promise.abort()) -} - -// Promise style with async await -{ - const promise = client.info() - expectType>(promise) - expectType(promise.abort()) - try { - expectType(await promise) - } catch (err) { - expectType(err) - } -} - -{ - const promise = client.info({ pretty: true }) - expectType>(promise) - expectType(promise.abort()) - try { - expectType(await promise) - } catch (err) { - expectType(err) - } -} - -{ - const promise = client.info({ pretty: true }, { ignore: [404] }) - expectType>(promise) - expectType(promise.abort()) - try { - expectType(await promise) - } catch (err) { - expectType(err) - } -} \ No newline at end of file diff --git a/test/types/connection-pool.test-d.ts b/test/types/connection-pool.test-d.ts deleted file mode 100644 index 4354e66ca..000000000 --- a/test/types/connection-pool.test-d.ts +++ /dev/null @@ -1,95 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -import { expectType, expectAssignable } from 'tsd' -import { URL } from 'url' -import { - BaseConnectionPool, - ConnectionPool, - CloudConnectionPool, - Connection -} from '../../' - -{ - const pool = new BaseConnectionPool({ - Connection: Connection, - ssl: { ca: 'stirng' }, - emit: (event, ...args) => true, - agent: { keepAlive: true }, - auth: { username: 'username', password: 'password' } - }) - - expectType(pool) - expectType(pool.connections) - expectType(pool.size) - - expectType(pool.markAlive(new Connection())) - expectType(pool.markDead(new Connection())) - expectType(pool.getConnection({ - filter (node) { return true }, - selector (connections) { return connections[0] }, - requestId: 'id', - name: 'name', - now: Date.now() - })) - expectType(pool.addConnection({})) - expectType(pool.removeConnection(new Connection())) - expectType(pool.empty()) - expectType(pool.update([])) - expectType(pool.nodesToHost([], 'https')) - expectType<{ url: URL }>(pool.urlToHost('url')) -} - -{ - const pool = new ConnectionPool({ - Connection: Connection, - ssl: { ca: 'stirng' }, - emit: (event, ...args) => true, - agent: { keepAlive: true }, - auth: { username: 'username', password: 'password' }, - pingTimeout: 1000, - resurrectStrategy: 'ping', - sniffEnabled: true - }) - - expectAssignable(pool) - expectType(pool.connections) - expectType(pool.size) - expectType(pool.dead) - - expectAssignable(pool.markAlive(new Connection())) - expectAssignable(pool.markDead(new Connection())) - expectType(pool.getConnection({ - filter (node) { return true }, - selector (connections) { return connections[0] }, - requestId: 'id', - name: 'name', - now: Date.now() - })) - expectType(pool.addConnection({})) - expectAssignable(pool.removeConnection(new Connection())) - expectAssignable(pool.empty()) - expectAssignable(pool.update([])) - expectType(pool.nodesToHost([], 'https')) - expectType<{ url: URL }>(pool.urlToHost('url')) - expectType(pool.resurrect({ - now: Date.now(), - requestId: 'id', - name: 'name' - })) -} - -{ - const pool = new CloudConnectionPool({ - Connection: Connection, - ssl: { ca: 'stirng' }, - emit: (event, ...args) => true, - agent: { keepAlive: true }, - auth: { username: 'username', password: 'password' } - }) - - expectAssignable(pool) - expectType(pool.cloudConnection) - expectType(pool.getConnection()) -} diff --git a/test/types/connection.test-d.ts b/test/types/connection.test-d.ts deleted file mode 100644 index b6c5ddba3..000000000 --- a/test/types/connection.test-d.ts +++ /dev/null @@ -1,26 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -import { expectType } from 'tsd' -import { URL } from 'url' -import { Connection } from '../../' - -const conn = new Connection({ - url: new URL('http://localhost:9200/'), - ssl: { ca: 'string' }, - id: 'id', - headers: {}, - agent: { keepAlive: false }, - status: 'alive', - roles: { master: true }, - auth: { username: 'username', password: 'password' } -}) - -expectType(conn) -expectType(conn.url) -expectType(conn.id) -expectType>(conn.headers) -expectType(conn.deadCount) -expectType(conn.resurrectTimeout) -expectType(conn.status) diff --git a/test/types/errors.test-d.ts b/test/types/errors.test-d.ts deleted file mode 100644 index 8c08ae7fa..000000000 --- a/test/types/errors.test-d.ts +++ /dev/null @@ -1,89 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -import { expectType } from 'tsd' -import { errors, ApiResponse, Connection } from '../../' - -const response = { - body: {}, - statusCode: 200, - headers: {}, - warnings: null, - meta: { - context: {}, - name: 'name', - request: { - params: { method: 'GET', path: '/' }, - options: {}, - id: 42 - }, - connection: new Connection(), - attempts: 0, - aborted: false, - } -} - -{ - const err = new errors.ElasticsearchClientError() - expectType(err.name) - expectType(err.message) -} - -{ - const err = new errors.TimeoutError('message', response) - expectType(err.name) - expectType(err.message) - expectType(err.meta) -} - -{ - const err = new errors.ConnectionError('message', response) - expectType(err.name) - expectType(err.message) - expectType(err.meta) -} - -{ - const err = new errors.NoLivingConnectionsError('message', response) - expectType(err.name) - expectType(err.message) - expectType(err.meta) -} - -{ - const err = new errors.SerializationError('message', {}) - expectType(err.name) - expectType(err.message) - expectType(err.data) -} - -{ - const err = new errors.DeserializationError('message', 'data') - expectType(err.name) - expectType(err.message) - expectType(err.data) -} - -{ - const err = new errors.ConfigurationError('message') - expectType(err.name) - expectType(err.message) -} - -{ - const err = new errors.ResponseError(response) - expectType(err.name) - expectType(err.message) - expectType(err.meta) - expectType>(err.body) - expectType(err.statusCode) - expectType>(err.headers) -} - -{ - const err = new errors.RequestAbortedError('message', response) - expectType(err.name) - expectType(err.message) - expectType(err.meta) -} \ No newline at end of file diff --git a/test/types/helpers.test-d.ts b/test/types/helpers.test-d.ts deleted file mode 100644 index 339f07e2f..000000000 --- a/test/types/helpers.test-d.ts +++ /dev/null @@ -1,461 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -import { expectType, expectError, expectAssignable } from 'tsd' -import { Client } from '../../' -import { - BulkHelper, - BulkStats, - BulkHelperOptions, - ScrollSearchResponse, - OnDropDocument, - MsearchHelper -} from '../../lib/Helpers' -import { ApiResponse, ApiError } from '../../lib/Transport' - -const client = new Client({ - node: 'http://localhost:9200/' -}) - -/// .helpers.bulk - -const b = client.helpers.bulk>({ - datasource: [], - onDocument (doc) { - expectType>(doc) - return { index: { _index: 'test' } } - }, - flushBytes: 5000000, - flushInterval: 30000, - concurrency: 5, - retries: 3, - wait: 5000, - onDrop (doc) { - expectType>>(doc) - }, - refreshOnCompletion: true, - pipeline: 'my-pipeline' -}) - -expectType>(b) -expectType>(b.abort()) -b.then(stats => expectType(stats)) - -// body can't be provided -expectError( - client.helpers.bulk({ - datasource: [], - onDocument (doc) { - return { index: { _index: 'test' } } - }, - body: [] - }) -) - -// test onDocument actions -// index -{ - const options = { - datasource: [], - onDocument (doc: Record) { - return { index: { _index: 'test' } } - } - } - expectAssignable>>(options) -} -// create -{ - const options = { - datasource: [], - onDocument (doc: Record) { - return { create: { _index: 'test' } } - } - } - expectAssignable>>(options) -} -// update -{ - // without `:BulkHelperOptions` this test cannot pass - // but if we write these options inline inside - // a `.helper.bulk`, it works as expected - const options: BulkHelperOptions> = { - datasource: [], - onDocument (doc: Record) { - return [{ update: { _index: 'test' } }, doc] - } - } - expectAssignable>>(options) -} -// delete -{ - const options = { - datasource: [], - onDocument (doc: Record) { - return { delete: { _index: 'test' } } - } - } - expectAssignable>>(options) -} - -/// .helpers.scrollSearch - -// just search params -{ - async function test () { - const scrollSearch = client.helpers.scrollSearch({ - index: 'test', - body: { - query: { - match: { foo: 'bar' } - } - } - }) - - for await (const response of scrollSearch) { - expectAssignable(response) - } - } -} - -// search params and options -{ - async function test () { - const scrollSearch = client.helpers.scrollSearch({ - index: 'test', - body: { - query: { - match: { foo: 'bar' } - } - } - }, { ignore: [404] }) - - for await (const response of scrollSearch) { - expectAssignable(response) - expectType>(response.body) - expectType(response.documents) - expectType(response.meta.context) - } - } -} - -// with type defs -{ - interface ShardsResponse { - total: number; - successful: number; - failed: number; - skipped: number; - } - - interface Explanation { - value: number; - description: string; - details: Explanation[]; - } - - interface SearchResponse { - took: number; - timed_out: boolean; - _scroll_id?: string; - _shards: ShardsResponse; - hits: { - total: number; - max_score: number; - hits: Array<{ - _index: string; - _type: string; - _id: string; - _score: number; - _source: T; - _version?: number; - _explanation?: Explanation; - fields?: any; - highlight?: any; - inner_hits?: any; - matched_queries?: string[]; - sort?: string[]; - }>; - }; - aggregations?: any; - } - - interface Source { - foo: string - } - - async function test () { - const scrollSearch = client.helpers.scrollSearch>({ - index: 'test', - body: { - query: { - match: { foo: 'bar' } - } - } - }) - - for await (const response of scrollSearch) { - expectAssignable(response) - expectType>(response.body) - expectType(response.documents) - expectType(response.meta.context) - } - } -} - -{ - interface SearchBody { - query: { - match: { foo: string } - } - } - - interface ShardsResponse { - total: number; - successful: number; - failed: number; - skipped: number; - } - - interface Explanation { - value: number; - description: string; - details: Explanation[]; - } - - interface SearchResponse { - took: number; - timed_out: boolean; - _scroll_id?: string; - _shards: ShardsResponse; - hits: { - total: number; - max_score: number; - hits: Array<{ - _index: string; - _type: string; - _id: string; - _score: number; - _source: T; - _version?: number; - _explanation?: Explanation; - fields?: any; - highlight?: any; - inner_hits?: any; - matched_queries?: string[]; - sort?: string[]; - }>; - }; - aggregations?: any; - } - - interface Source { - foo: string - } - - async function test () { - const scrollSearch = client.helpers.scrollSearch, SearchBody, string>({ - index: 'test', - body: { - query: { - match: { foo: 'bar' } - } - } - }) - - for await (const response of scrollSearch) { - expectAssignable(response) - expectType>(response.body) - expectType(response.documents) - expectType(response.meta.context) - } - } -} - -/// .helpers.scrollDocuments - -// just search params -{ - async function test () { - const scrollDocuments = client.helpers.scrollDocuments({ - index: 'test', - body: { - query: { - match: { foo: 'bar' } - } - } - }) - - for await (const document of scrollDocuments) { - expectType(document) - } - } -} - -// search params and options -{ - async function test () { - const scrollDocuments = client.helpers.scrollDocuments({ - index: 'test', - body: { - query: { - match: { foo: 'bar' } - } - } - }, { ignore: [404] }) - - for await (const document of scrollDocuments) { - expectType(document) - } - } -} - -// with type defs -{ - interface Source { - foo: string - } - - async function test () { - const scrollDocuments = client.helpers.scrollDocuments({ - index: 'test', - body: { - query: { - match: { foo: 'bar' } - } - } - }) - - for await (const document of scrollDocuments) { - expectType(document) - } - } -} - -{ - interface SearchBody { - query: { - match: { foo: string } - } - } - - interface Source { - foo: string - } - - async function test () { - const scrollDocuments = client.helpers.scrollDocuments({ - index: 'test', - body: { - query: { - match: { foo: 'bar' } - } - } - }) - - for await (const document of scrollDocuments) { - expectType(document) - } - } -} - -/// .helpers.search - -// just search params -{ - const p = client.helpers.search({ - index: 'test', - body: { - query: { - match: { foo: 'bar' } - } - } - }) - - expectType>(p) - expectType(await p) -} - -// search params and options -{ - const p = client.helpers.search({ - index: 'test', - body: { - query: { - match: { foo: 'bar' } - } - } - }, { ignore: [404] }) - - expectType>(p) - expectType(await p) -} - -// with type defs -{ - interface Source { - foo: string - } - - const p = client.helpers.search({ - index: 'test', - body: { - query: { - match: { foo: 'bar' } - } - } - }) - - expectType>(p) - expectType(await p) -} - -{ - interface SearchBody { - query: { - match: { foo: string } - } - } - - interface Source { - foo: string - } - - const p = client.helpers.search({ - index: 'test', - body: { - query: { - match: { foo: 'bar' } - } - } - }) - - expectType>(p) - expectType(await p) -} - -/// .helpers.msearch - -const s = client.helpers.msearch({ - operations: 5, - flushInterval: 500, - concurrency: 5, - retries: 5, - wait: 5000 -}) - -expectType(s) -expectType(s.stop()) -expectType(s.stop(new Error('kaboom'))) - -expectType, unknown>>>(s.search({ index: 'foo'}, { query: {} })) -expectType>>(s.search, string>({ index: 'foo'}, { query: {} })) - -expectType(s.search({ index: 'foo'}, { query: {} }, (err, result) => { - expectType(err) - expectType(result) -})) -expectType(s.search, string>({ index: 'foo'}, { query: {} }, (err, result) => { - expectType(err) - expectType>(result) -})) diff --git a/test/types/serializer.test-d.ts b/test/types/serializer.test-d.ts deleted file mode 100644 index 885e0d09f..000000000 --- a/test/types/serializer.test-d.ts +++ /dev/null @@ -1,13 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -import { expectType } from 'tsd' -import { Serializer } from '../../' - -const serializer = new Serializer() - -expectType(serializer.serialize({})) -expectType(serializer.deserialize('')) -expectType(serializer.ndserialize([])) -expectType(serializer.qserialize({})) diff --git a/test/types/transport.test-d.ts b/test/types/transport.test-d.ts deleted file mode 100644 index ef77b2292..000000000 --- a/test/types/transport.test-d.ts +++ /dev/null @@ -1,156 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -import { Readable as ReadableStream } from 'stream'; -import { expectType, expectAssignable, expectError } from 'tsd' -import { - Transport, - Connection, - ConnectionPool, - Serializer -} from '../..' -import { - TransportRequestParams, - TransportRequestOptions, - TransportRequestCallback, - RequestEvent, - ApiError, - RequestBody, - RequestNDBody, - ApiResponse -} from '../../lib/Transport' - -const params = { - method: 'POST', - path: '/search', - body: { foo: 'bar' }, - querystring: { baz: 'faz' } -} - -const options = { - ignore: [404], - requestTimeout: 5000, - maxRetries: 3, - asStream: false, - headers: {}, - querystring: {}, - id: 'id', - context: {}, - warnings: ['warn'], - opaqueId: 'id' -} - -const response = { - body: {}, - statusCode: 200, - headers: {}, - warnings: null, - meta: { - context: {}, - name: 'name', - request: { - params, - options, - id: 'id' - }, - connection: new Connection(), - attempts: 0, - aborted: false - } -} - -expectAssignable(params) -expectAssignable({ method: 'GET', path: '/' }) -expectAssignable(options) -expectAssignable(response) -expectAssignable(response) - -// verify that RequestBody, RequestNDBody and ResponseBody works as expected -interface TestBody { hello: string } -expectAssignable({ foo: 'bar' }) -expectAssignable>({ hello: 'world' }) -expectError>({ foo: 'bar' }) -expectAssignable('string') -expectAssignable>('string') -expectAssignable(Buffer.from('hello world')) -expectAssignable(new ReadableStream()) - -expectAssignable([{ foo: 'bar' }]) -expectAssignable[]>([{ hello: 'world' }]) -expectError({ foo: 'bar' }) -expectError[]>([{ foo: 'bar' }]) -expectAssignable(['string']) -expectAssignable(Buffer.from('hello world')) -expectAssignable(new ReadableStream()) - -const transport = new Transport({ - emit: (event, ...args) => true, - serializer: new Serializer(), - connectionPool: new ConnectionPool(), - maxRetries: 5, - requestTimeout: 1000, - suggestCompression: true, - compression: 'gzip', - sniffInterval: 1000, - sniffOnConnectionFault: true, - sniffEndpoint: '/sniff', - sniffOnStart: false -}) - -expectType(transport) - -expectType(transport.request(params, options, (err, result) => {})) - -// body as object -transport.request(params, options, (err, result) => { - expectType(err) - expectType(result) -}) - -// body as string -transport.request({ - method: 'POST', - path: '/search', - body: 'hello world', - querystring: { baz: 'faz' } -}, options, (err, result) => { - expectType(err) - expectType(result) -}) - -// body as Buffer -transport.request({ - method: 'POST', - path: '/search', - body: Buffer.from('hello world'), - querystring: { baz: 'faz' } -}, options, (err, result) => { - expectType(err) - expectType(result) -}) - -// body as ReadableStream -transport.request({ - method: 'POST', - path: '/search', - body: new ReadableStream(), - querystring: { baz: 'faz' } -}, options, (err, result) => { - expectType(err) - expectType(result) -}) - -const promise = transport.request(params, options) -expectType>(promise) -promise.then(result => expectType(result)) -expectType(await promise) - -// body that does not respect the RequestBody constraint -expectError( - transport.request({ - method: 'POST', - path: '/', - body: 42 - }) -) diff --git a/test/unit/api-async.js b/test/unit/api-async.js deleted file mode 100644 index 1821836a2..000000000 --- a/test/unit/api-async.js +++ /dev/null @@ -1,80 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -const { Client, errors } = require('../../index') -const { buildServer } = require('../utils') - -function runAsyncTest (test) { - test('async await (search)', t => { - t.plan(1) - - function handler (req, res) { - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, async ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}` - }) - - try { - const { body } = await client.search({ - index: 'test', - type: 'doc', - q: 'foo:bar' - }) - t.deepEqual(body, { hello: 'world' }) - } catch (err) { - t.fail(err) - } - server.stop() - }) - }) - - test('async await (index)', t => { - t.plan(1) - - function handler (req, res) { - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, async ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}` - }) - - try { - await client.index({ - index: 'test', - body: { foo: 'bar' } - }) - t.pass('ok') - } catch (err) { - t.fail(err) - } - server.stop() - }) - }) - - test('async await (ConfigurationError)', async t => { - t.plan(1) - - const client = new Client({ - node: 'http://localhost:9200/' - }) - - try { - await client.index({ body: { foo: 'bar' } }) - t.fail('Should throw') - } catch (err) { - t.ok(err instanceof errors.ConfigurationError) - } - }) -} - -module.exports = runAsyncTest diff --git a/test/unit/api.test.js b/test/unit/api.test.js deleted file mode 100644 index ad61a71e1..000000000 --- a/test/unit/api.test.js +++ /dev/null @@ -1,304 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -const { test } = require('tap') -const { Client, errors } = require('../../index') -const { buildServer } = require('../utils') - -test('Basic (callback)', t => { - t.plan(2) - - function handler (req, res) { - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}` - }) - - client.search({ - index: 'test', - q: 'foo:bar' - }, (err, { body }) => { - t.error(err) - t.deepEqual(body, { hello: 'world' }) - server.stop() - }) - }) -}) - -test('Basic (promises)', t => { - t.plan(1) - - function handler (req, res) { - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}` - }) - - client - .search({ - index: 'test', - q: 'foo:bar' - }) - .then(({ body }) => { - t.deepEqual(body, { hello: 'world' }) - server.stop() - }) - .catch(t.fail) - }) -}) - -test('Error (callback)', t => { - t.plan(1) - - function handler (req, res) { - res.statusCode = 500 - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}` - }) - - client.search({ - index: 'test', - q: 'foo:bar' - }, (err, { body }) => { - t.ok(err) - server.stop() - }) - }) -}) - -test('Error (promises)', t => { - t.plan(1) - - function handler (req, res) { - res.statusCode = 500 - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}` - }) - - client - .search({ - index: 'test', - q: 'foo:bar' - }) - .then(t.fail) - .catch(err => { - t.ok(err) - server.stop() - }) - }) -}) - -test('Abort method (callback)', t => { - t.plan(3) - - function handler (req, res) { - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}` - }) - - const request = client.search({ - index: 'test', - q: 'foo:bar' - }, (err, { body }) => { - t.error(err) - t.deepEqual(body, { hello: 'world' }) - server.stop() - }) - - t.type(request.abort, 'function') - }) -}) - -test('Abort method (promises)', t => { - t.plan(2) - - function handler (req, res) { - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}` - }) - - const request = client.search({ - index: 'test', - q: 'foo:bar' - }) - - request - .then(({ body }) => { - t.deepEqual(body, { hello: 'world' }) - server.stop() - }) - .catch(t.fail) - - t.type(request.abort, 'function') - }) -}) - -test('Basic (options and callback)', t => { - t.plan(2) - - function handler (req, res) { - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}` - }) - - client.search({ - index: 'test', - q: 'foo:bar' - }, { - requestTimeout: 10000 - }, (err, { body }) => { - t.error(err) - t.deepEqual(body, { hello: 'world' }) - server.stop() - }) - }) -}) - -test('Basic (options and promises)', t => { - t.plan(1) - - function handler (req, res) { - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}` - }) - - client - .search({ - index: 'test', - q: 'foo:bar' - }, { - requestTimeout: 10000 - }) - .then(({ body }) => { - t.deepEqual(body, { hello: 'world' }) - server.stop() - }) - .catch(t.fail) - }) -}) - -test('Pass unknown parameters as query parameters (and get a warning)', t => { - t.plan(4) - - function handler (req, res) { - t.strictEqual(req.url, '/test/_search?q=foo%3Abar&winter=is%20coming') - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}` - }) - - client.search({ - index: 'test', - q: 'foo:bar', - winter: 'is coming' - }, (err, { body, warnings }) => { - t.error(err) - t.deepEqual(body, { hello: 'world' }) - t.deepEqual(warnings, ['Client - Unknown parameter: "winter", sending it as query parameter']) - server.stop() - }) - }) -}) - -test('If the API uses the same key for both url and query parameter, the url should win', t => { - t.plan(2) - - function handler (req, res) { - t.strictEqual(req.url, '/index/_bulk') - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}` - }) - - // bulk has two `type` parameters - client.bulk({ - index: 'index', - body: [] - }, (err, { body, warnings }) => { - t.error(err) - server.stop() - }) - }) -}) - -test('ConfigurationError (callback)', t => { - t.plan(1) - - const client = new Client({ - node: 'http://localhost:9200/' - }) - - client.index({ - body: { foo: 'bar' } - }, (err, { body }) => { - t.ok(err instanceof errors.ConfigurationError) - }) -}) - -test('ConfigurationError (promises)', t => { - t.plan(1) - - const client = new Client({ - node: 'http://localhost:9200/' - }) - - client - .index({ body: { foo: 'bar' } }) - .then(t.fail) - .catch(err => { - t.ok(err instanceof errors.ConfigurationError) - }) -}) - -if (Number(process.version.split('.')[0].slice(1)) >= 8) { - require('./api-async')(test) -} diff --git a/test/unit/api.test.ts b/test/unit/api.test.ts new file mode 100644 index 000000000..213f87da5 --- /dev/null +++ b/test/unit/api.test.ts @@ -0,0 +1,265 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +import { test } from 'tap' +import { connection } from '../utils' +import { Client } from '../..' +import { Transport } from '@elastic/transport' +import * as T from '../../lib/api/types' + +test('Api with top level body', async t => { + t.plan(2) + + const Connection = connection.buildMockConnection({ + onRequest (opts) { + // @ts-expect-error + t.same(JSON.parse(opts.body), { query: { match_all: {} } }) + return { + statusCode: 200, + body: { took: 42 } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection + }) + + const response = await client.search({ + index: 'test', + allow_no_indices: true, + query: { match_all: {} } + }) + + t.equal(response.took, 42) +}) + +test('Api with keyed body', async t => { + t.plan(2) + + const Connection = connection.buildMockConnection({ + onRequest (opts) { + // @ts-expect-error + t.same(JSON.parse(opts.body), { foo: 'bar' }) + return { + statusCode: 200, + body: { result: 'created' } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection + }) + + const response = await client.create({ + index: 'test', + id: '1', + document: { foo: 'bar' } + }) + + t.equal(response.result, 'created') +}) + +test('With generic document', async t => { + t.plan(1) + + interface Doc { + foo: string + } + + const Connection = connection.buildMockConnection({ + onRequest (_opts) { + return { + statusCode: 200, + body: { + took: 42, + hits: { + hits: [{ + _source: { foo: 'bar' } + }] + }, + aggregations: { + unique: { + buckets: [{ key: 'bar' }] + } + } + } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection + }) + + const response = await client.search({ + index: 'test', + allow_no_indices: true, + query: { match_all: {} }, + aggregations: { + unique: { + terms: { + field: 'foo' + } + } + } + }) + + t.equal(response.hits.hits[0]._source?.foo, 'bar') +}) + +test('With generic document and aggregation', async t => { + t.plan(2) + + interface Doc { + foo: string + } + + interface Aggregations { + unique: T.AggregationsTermsAggregateBase<{ key: string }> + } + + const Connection = connection.buildMockConnection({ + onRequest (opts) { + return { + statusCode: 200, + body: { + took: 42, + hits: { + hits: [{ + _source: { foo: 'bar' } + }] + }, + aggregations: { + unique: { + buckets: [{ key: 'bar' }] + } + } + } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection + }) + + const response = await client.search({ + index: 'test', + allow_no_indices: true, + query: { match_all: {} }, + aggregations: { + unique: { + terms: { + field: 'foo' + } + } + } + }) + + t.equal(response.hits.hits[0]._source?.foo, 'bar') + t.ok(Array.isArray(response.aggregations?.unique.buckets)) +}) + +test('Api request metadata', t => { + t.test('name', async t => { + class TestTransport extends Transport { + // @ts-expect-error + async request(params, options) { + t.equal(params.meta.name, 'synonyms.put_synonym_rule') + return super.request(params, options) + } + } + + const Connection = connection.buildMockConnection({ + onRequest () { + return { + statusCode: 200, + body: { took: 42 } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + // @ts-expect-error + Transport: TestTransport, + Connection + }) + // @ts-expect-error + await client.synonyms.putSynonymRule({ set_id: "foo", rule_id: "bar" }) + }) + + t.test('pathParts', async t => { + class TestTransport extends Transport { + // @ts-expect-error + async request(params, options) { + t.strictSame(params.meta.pathParts, { + set_id: 'foo', + rule_id: 'bar' + }) + return super.request(params, options) + } + } + + const Connection = connection.buildMockConnection({ + onRequest () { + return { + statusCode: 200, + body: { took: 42 } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + // @ts-expect-error + Transport: TestTransport, + Connection + }) + // @ts-expect-error + await client.synonyms.putSynonymRule({ set_id: "foo", rule_id: "bar" }) + }) + + t.test('acceptedParams', async t => { + class TestTransport extends Transport { + // @ts-expect-error + async request(params, options) { + t.strictSame(params.meta.acceptedParams, [ + 'set_id', + 'rule_id', + 'synonyms', + 'refresh', + ]) + return super.request(params, options) + } + } + + const Connection = connection.buildMockConnection({ + onRequest () { + return { + statusCode: 200, + body: { took: 42 } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + // @ts-expect-error + Transport: TestTransport, + Connection + }) + // @ts-expect-error + await client.synonyms.putSynonymRule({ set_id: "foo", rule_id: "bar" }) + }) + + t.end() +}) diff --git a/test/unit/base-connection-pool.test.js b/test/unit/base-connection-pool.test.js deleted file mode 100644 index 74d4ff3b7..000000000 --- a/test/unit/base-connection-pool.test.js +++ /dev/null @@ -1,490 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -const { test } = require('tap') -const { URL } = require('url') -const BaseConnectionPool = require('../../lib/pool/BaseConnectionPool') -const Connection = require('../../lib/Connection') - -test('API', t => { - t.test('addConnection', t => { - const pool = new BaseConnectionPool({ Connection }) - const href = 'http://localhost:9200/' - pool.addConnection(href) - t.ok(pool.connections.find(c => c.id === href) instanceof Connection) - t.strictEqual(pool.connections.find(c => c.id === href).status, Connection.statuses.ALIVE) - t.end() - }) - - t.test('addConnection should throw with two connections with the same id', t => { - const pool = new BaseConnectionPool({ Connection }) - const href = 'http://localhost:9200/' - pool.addConnection(href) - try { - pool.addConnection(href) - t.fail('Should throw') - } catch (err) { - t.is(err.message, `Connection with id '${href}' is already present`) - } - t.end() - }) - - t.test('addConnection should handle not-friendly url parameters for user and password', t => { - const pool = new BaseConnectionPool({ Connection }) - const href = 'http://us/"er:p@assword@localhost:9200/' - pool.addConnection(href) - const conn = pool.connections[0] - t.strictEqual(conn.url.username, 'us%22er') - t.strictEqual(conn.url.password, 'p%40assword') - t.match(conn.headers, { - authorization: 'Basic ' + Buffer.from('us"er:p@assword').toString('base64') - }) - t.end() - }) - - t.test('markDead', t => { - const pool = new BaseConnectionPool({ Connection, sniffEnabled: true }) - const href = 'http://localhost:9200/' - var connection = pool.addConnection(href) - t.same(pool.markDead(connection), pool) - connection = pool.connections.find(c => c.id === href) - t.strictEqual(connection.status, Connection.statuses.ALIVE) - t.end() - }) - - t.test('markAlive', t => { - const pool = new BaseConnectionPool({ Connection, sniffEnabled: true }) - const href = 'http://localhost:9200/' - var connection = pool.addConnection(href) - t.same(pool.markAlive(connection), pool) - connection = pool.connections.find(c => c.id === href) - t.strictEqual(connection.status, Connection.statuses.ALIVE) - t.end() - }) - - t.test('getConnection should throw', t => { - const pool = new BaseConnectionPool({ Connection }) - const href = 'http://localhost:9200/' - pool.addConnection(href) - try { - pool.getConnection() - t.fail('Should fail') - } catch (err) { - t.is(err.message, 'getConnection must be implemented') - } - t.end() - }) - - t.test('removeConnection', t => { - const pool = new BaseConnectionPool({ Connection }) - const href = 'http://localhost:9200/' - var connection = pool.addConnection(href) - pool.removeConnection(connection) - t.strictEqual(pool.size, 0) - t.end() - }) - - t.test('empty', t => { - const pool = new BaseConnectionPool({ Connection }) - pool.addConnection('http://localhost:9200/') - pool.addConnection('http://localhost:9201/') - pool.empty(() => { - t.strictEqual(pool.size, 0) - t.end() - }) - }) - - t.test('urlToHost', t => { - const pool = new BaseConnectionPool({ Connection }) - const url = 'http://localhost:9200/' - t.deepEqual( - pool.urlToHost(url), - { url: new URL(url) } - ) - t.end() - }) - - t.test('nodesToHost', t => { - t.test('publish_address as ip address (IPv4)', t => { - const pool = new BaseConnectionPool({ Connection }) - const nodes = { - a1: { - http: { - publish_address: '127.0.0.1:9200' - }, - roles: ['master', 'data', 'ingest'] - }, - a2: { - http: { - publish_address: '127.0.0.1:9201' - }, - roles: ['master', 'data', 'ingest'] - } - } - - t.deepEqual(pool.nodesToHost(nodes, 'http:'), [{ - url: new URL('http://127.0.0.1:9200/'), - id: 'a1', - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }, { - url: new URL('http://127.0.0.1:9201/'), - id: 'a2', - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }]) - - t.strictEqual(pool.nodesToHost(nodes, 'http:')[0].url.host, '127.0.0.1:9200') - t.strictEqual(pool.nodesToHost(nodes, 'http:')[1].url.host, '127.0.0.1:9201') - t.end() - }) - - t.test('publish_address as ip address (IPv6)', t => { - const pool = new BaseConnectionPool({ Connection }) - const nodes = { - a1: { - http: { - publish_address: '[::1]:9200' - }, - roles: ['master', 'data', 'ingest'] - }, - a2: { - http: { - publish_address: '[::1]:9201' - }, - roles: ['master', 'data', 'ingest'] - } - } - - t.deepEqual(pool.nodesToHost(nodes, 'http:'), [{ - url: new URL('http://[::1]:9200/'), - id: 'a1', - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }, { - url: new URL('http://[::1]:9201/'), - id: 'a2', - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }]) - - t.strictEqual(pool.nodesToHost(nodes, 'http:')[0].url.host, '[::1]:9200') - t.strictEqual(pool.nodesToHost(nodes, 'http:')[1].url.host, '[::1]:9201') - t.end() - }) - - t.test('publish_address as host/ip (IPv4)', t => { - const pool = new BaseConnectionPool({ Connection }) - const nodes = { - a1: { - http: { - publish_address: 'example.com/127.0.0.1:9200' - }, - roles: ['master', 'data', 'ingest'] - }, - a2: { - http: { - publish_address: 'example.com/127.0.0.1:9201' - }, - roles: ['master', 'data', 'ingest'] - } - } - - t.deepEqual(pool.nodesToHost(nodes, 'http:'), [{ - url: new URL('http://example.com:9200/'), - id: 'a1', - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }, { - url: new URL('http://example.com:9201/'), - id: 'a2', - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }]) - - t.strictEqual(pool.nodesToHost(nodes, 'http:')[0].url.host, 'example.com:9200') - t.strictEqual(pool.nodesToHost(nodes, 'http:')[1].url.host, 'example.com:9201') - t.end() - }) - - t.test('publish_address as host/ip (IPv6)', t => { - const pool = new BaseConnectionPool({ Connection }) - const nodes = { - a1: { - http: { - publish_address: 'example.com/[::1]:9200' - }, - roles: ['master', 'data', 'ingest'] - }, - a2: { - http: { - publish_address: 'example.com/[::1]:9201' - }, - roles: ['master', 'data', 'ingest'] - } - } - - t.deepEqual(pool.nodesToHost(nodes, 'http:'), [{ - url: new URL('http://example.com:9200/'), - id: 'a1', - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }, { - url: new URL('http://example.com:9201/'), - id: 'a2', - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }]) - - t.strictEqual(pool.nodesToHost(nodes, 'http:')[0].url.host, 'example.com:9200') - t.strictEqual(pool.nodesToHost(nodes, 'http:')[1].url.host, 'example.com:9201') - t.end() - }) - - t.test('Should use the configure protocol', t => { - const pool = new BaseConnectionPool({ Connection }) - const nodes = { - a1: { - http: { - publish_address: 'example.com/127.0.0.1:9200' - }, - roles: ['master', 'data', 'ingest'] - }, - a2: { - http: { - publish_address: 'example.com/127.0.0.1:9201' - }, - roles: ['master', 'data', 'ingest'] - } - } - - t.strictEqual(pool.nodesToHost(nodes, 'https:')[0].url.protocol, 'https:') - t.strictEqual(pool.nodesToHost(nodes, 'http:')[1].url.protocol, 'http:') - t.end() - }) - - t.end() - }) - - t.test('update', t => { - t.test('Should not update existing connections', t => { - t.plan(2) - const pool = new BaseConnectionPool({ Connection }) - pool.addConnection([{ - url: new URL('http://127.0.0.1:9200/'), - id: 'a1', - roles: { - master: true, - data: true, - ingest: true - } - }, { - url: new URL('http://127.0.0.1:9201/'), - id: 'a2', - roles: { - master: true, - data: true, - ingest: true - } - }]) - - pool.update([{ - url: new URL('http://127.0.0.1:9200/'), - id: 'a1', - roles: null - }, { - url: new URL('http://127.0.0.1:9201/'), - id: 'a2', - roles: null - }]) - - t.ok(pool.connections.find(c => c.id === 'a1').roles !== null) - t.ok(pool.connections.find(c => c.id === 'a2').roles !== null) - }) - - t.test('Should not update existing connections (mark alive)', t => { - t.plan(5) - class CustomBaseConnectionPool extends BaseConnectionPool { - markAlive (connection) { - t.ok('called') - super.markAlive(connection) - } - } - const pool = new CustomBaseConnectionPool({ Connection }) - const conn1 = pool.addConnection({ - url: new URL('http://127.0.0.1:9200/'), - id: 'a1', - roles: { - master: true, - data: true, - ingest: true - } - }) - - const conn2 = pool.addConnection({ - url: new URL('http://127.0.0.1:9201/'), - id: 'a2', - roles: { - master: true, - data: true, - ingest: true - } - }) - - pool.markDead(conn1) - pool.markDead(conn2) - - pool.update([{ - url: new URL('http://127.0.0.1:9200/'), - id: 'a1', - roles: null - }, { - url: new URL('http://127.0.0.1:9201/'), - id: 'a2', - roles: null - }]) - - t.ok(pool.connections.find(c => c.id === 'a1').roles !== null) - t.ok(pool.connections.find(c => c.id === 'a2').roles !== null) - }) - - t.test('Should not update existing connections (same url, different id)', t => { - t.plan(3) - class CustomBaseConnectionPool extends BaseConnectionPool { - markAlive (connection) { - t.ok('called') - super.markAlive(connection) - } - } - const pool = new CustomBaseConnectionPool({ Connection }) - pool.addConnection([{ - url: new URL('http://127.0.0.1:9200/'), - id: 'http://127.0.0.1:9200/', - roles: { - master: true, - data: true, - ingest: true - } - }]) - - pool.update([{ - url: new URL('http://127.0.0.1:9200/'), - id: 'a1', - roles: true - }]) - - // roles will never be updated, we only use it to do - // a dummy check to see if the connection has been updated - t.deepEqual(pool.connections.find(c => c.id === 'a1').roles, { - master: true, - data: true, - ingest: true, - ml: false - }) - t.strictEqual(pool.connections.find(c => c.id === 'http://127.0.0.1:9200/'), undefined) - }) - - t.test('Add a new connection', t => { - t.plan(2) - const pool = new BaseConnectionPool({ Connection }) - pool.addConnection({ - url: new URL('http://127.0.0.1:9200/'), - id: 'a1', - roles: { - master: true, - data: true, - ingest: true - } - }) - - pool.update([{ - url: new URL('http://127.0.0.1:9200/'), - id: 'a1', - roles: null - }, { - url: new URL('http://127.0.0.1:9201/'), - id: 'a2', - roles: null - }]) - - t.ok(pool.connections.find(c => c.id === 'a1').roles !== null) - t.ok(pool.connections.find(c => c.id === 'a2')) - }) - - t.test('Remove old connections', t => { - t.plan(3) - const pool = new BaseConnectionPool({ Connection }) - pool.addConnection({ - url: new URL('http://127.0.0.1:9200/'), - id: 'a1', - roles: null - }) - - pool.update([{ - url: new URL('http://127.0.0.1:9200/'), - id: 'a2', - roles: null - }, { - url: new URL('http://127.0.0.1:9201/'), - id: 'a3', - roles: null - }]) - - t.false(pool.connections.find(c => c.id === 'a1')) - t.true(pool.connections.find(c => c.id === 'a2')) - t.true(pool.connections.find(c => c.id === 'a3')) - }) - - t.end() - }) - - t.test('CreateConnection', t => { - t.plan(1) - const pool = new BaseConnectionPool({ Connection }) - const conn = pool.createConnection('http://localhost:9200/') - pool.connections.push(conn) - try { - pool.createConnection('http://localhost:9200/') - t.fail('Should throw') - } catch (err) { - t.is(err.message, 'Connection with id \'http://localhost:9200//' is already present') - } - }) - - t.end() -}) diff --git a/test/unit/child.test.js b/test/unit/child.test.js deleted file mode 100644 index e6a2b91ea..000000000 --- a/test/unit/child.test.js +++ /dev/null @@ -1,267 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -const { test } = require('tap') -const { Client, errors } = require('../../index') -const { - buildServer, - connection: { MockConnection } -} = require('../utils') - -test('Should create a child client (headers check)', t => { - t.plan(4) - - var count = 0 - function handler (req, res) { - if (count++ === 0) { - t.match(req.headers, { 'x-foo': 'bar' }) - } else { - t.match(req.headers, { 'x-baz': 'faz' }) - } - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}`, - headers: { 'x-foo': 'bar' } - }) - const child = client.child({ - headers: { 'x-baz': 'faz' } - }) - - client.info((err, res) => { - t.error(err) - child.info((err, res) => { - t.error(err) - server.stop() - }) - }) - }) -}) - -test('Should create a child client (timeout check)', t => { - t.plan(2) - - function handler (req, res) { - setTimeout(() => { - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - }, 50) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ node: `http://localhost:${port}` }) - const child = client.child({ requestTimeout: 25, maxRetries: 0 }) - - client.info((err, res) => { - t.error(err) - child.info((err, res) => { - t.true(err instanceof errors.TimeoutError) - server.stop() - }) - }) - }) -}) - -test('Client extensions', t => { - t.test('One level', t => { - t.plan(1) - - const client = new Client({ node: 'http://localhost:9200/' }) - client.extend('utility.index', () => { - return () => t.ok('called') - }) - - const child = client.child() - child.utility.index() - }) - - t.test('Two levels', t => { - t.plan(2) - - const client = new Client({ node: 'http://localhost:9200/' }) - client.extend('utility.index', () => { - return () => t.ok('called') - }) - - const child = client.child() - child.extend('utility.search', () => { - return () => t.ok('called') - }) - - const grandchild = child.child() - grandchild.utility.index() - grandchild.utility.search() - }) - - t.test('The child should not extend the parent', t => { - t.plan(1) - - const client = new Client({ node: 'http://localhost:9200/' }) - const child = client.child() - - child.extend('utility.index', () => { - return () => t.fail('Should not be called') - }) - - try { - client.utility.index() - } catch (err) { - t.ok(err) - } - }) - - t.end() -}) - -test('Should share the event emitter', t => { - t.test('One level', t => { - t.plan(2) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - const child = client.child() - - client.on('response', (err, meta) => { - t.error(err) - }) - - child.info((err, res) => { - t.error(err) - }) - }) - - t.test('Two levels', t => { - t.plan(2) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - const child = client.child() - const grandchild = child.child() - - client.on('response', (err, meta) => { - t.error(err) - }) - - grandchild.info((err, res) => { - t.error(err) - }) - }) - - t.test('Child listener - one level', t => { - t.plan(2) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - const child = client.child() - - child.on('response', (err, meta) => { - t.error(err) - }) - - child.info((err, res) => { - t.error(err) - }) - }) - - t.test('Child listener - two levels', t => { - t.plan(2) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - const child = client.child() - const grandchild = child.child() - - child.on('response', (err, meta) => { - t.error(err) - }) - - grandchild.info((err, res) => { - t.error(err) - }) - }) - - t.end() -}) - -test('Should create a child client (generateRequestId check)', t => { - t.plan(6) - - function generateRequestId1 () { - var id = 0 - return () => `trace-1-${id++}` - } - - function generateRequestId2 () { - var id = 0 - return () => `trace-2-${id++}` - } - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection, - generateRequestId: generateRequestId1() - }) - const child = client.child({ - Connection: MockConnection, - generateRequestId: generateRequestId2() - }) - - var count = 0 - client.on('request', (err, { meta }) => { - t.error(err) - t.strictEqual( - meta.request.id, - count++ === 0 ? 'trace-1-0' : 'trace-2-0' - ) - }) - - client.info(err => { - t.error(err) - child.info(t.error) - }) -}) - -test('Should create a child client (name check)', t => { - t.plan(8) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection, - name: 'parent' - }) - const child = client.child({ - Connection: MockConnection, - name: 'child' - }) - - t.strictEqual(client.name, 'parent') - t.strictEqual(child.name, 'child') - - var count = 0 - client.on('request', (err, { meta }) => { - t.error(err) - t.strictEqual( - meta.name, - count++ === 0 ? 'parent' : 'child' - ) - }) - - client.info(err => { - t.error(err) - child.info(t.error) - }) -}) diff --git a/test/unit/client.test.js b/test/unit/client.test.js deleted file mode 100644 index 885a824d5..000000000 --- a/test/unit/client.test.js +++ /dev/null @@ -1,1069 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -const { test } = require('tap') -const { URL } = require('url') -const { Client, ConnectionPool, Transport } = require('../../index') -const { CloudConnectionPool } = require('../../lib/pool') -const { buildServer } = require('../utils') - -test('Configure host', t => { - t.test('Single string', t => { - const client = new Client({ - node: 'http://localhost:9200/' - }) - const pool = client.connectionPool - t.match(pool.connections.find(c => c.id === 'http://localhost:9200/'), { - url: new URL('http://localhost:9200/'), - id: 'http://localhost:9200/', - ssl: null, - deadCount: 0, - resurrectTimeout: 0, - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }) - t.end() - }) - - t.test('Array of strings', t => { - const client = new Client({ - nodes: ['http://localhost:9200/', 'http://localhost:9201/'] - }) - const pool = client.connectionPool - t.match(pool.connections.find(c => c.id === 'http://localhost:9200/'), { - url: new URL('http://localhost:9200/'), - id: 'http://localhost:9200/', - ssl: null, - deadCount: 0, - resurrectTimeout: 0, - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }) - t.match(pool.connections.find(c => c.id === 'http://localhost:9201/'), { - url: new URL('http://localhost:9201/'), - id: 'http://localhost:9201/', - ssl: null, - deadCount: 0, - resurrectTimeout: 0, - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }) - - t.end() - }) - - t.test('Single object', t => { - const client = new Client({ - node: { - url: new URL('http://localhost:9200/'), - id: 'node', - roles: { - master: true, - data: false, - ingest: false - }, - ssl: 'ssl' - } - }) - const pool = client.connectionPool - t.match(pool.connections.find(c => c.id === 'node'), { - url: new URL('http://localhost:9200/'), - id: 'node', - ssl: 'ssl', - deadCount: 0, - resurrectTimeout: 0 - }) - - t.deepEqual(pool.connections.find(c => c.id === 'node').roles, { - master: true, - data: false, - ingest: false, - ml: false - }) - - t.end() - }) - - t.test('Array of objects', t => { - const client = new Client({ - nodes: [{ - url: new URL('http://localhost:9200/'), - id: 'node1', - roles: { - master: true, - data: false, - ingest: false - }, - ssl: 'ssl' - }, { - url: new URL('http://localhost:9200/'), - id: 'node2', - roles: { - master: false, - data: true, - ingest: false - }, - ssl: 'ssl' - }] - }) - const pool = client.connectionPool - t.match(pool.connections.find(c => c.id === 'node1'), { - url: new URL('http://localhost:9200/'), - id: 'node1', - ssl: 'ssl', - deadCount: 0, - resurrectTimeout: 0 - }) - - t.deepEqual(pool.connections.find(c => c.id === 'node1').roles, { - master: true, - data: false, - ingest: false, - ml: false - }) - - t.match(pool.connections.find(c => c.id === 'node2'), { - url: new URL('http://localhost:9200/'), - id: 'node2', - ssl: 'ssl', - deadCount: 0, - resurrectTimeout: 0 - }) - - t.deepEqual(pool.connections.find(c => c.id === 'node2').roles, { - master: false, - data: true, - ingest: false, - ml: false - }) - - t.end() - }) - - t.test('Custom headers', t => { - const client = new Client({ - node: { - url: new URL('http://localhost:9200/'), - headers: { 'x-foo': 'bar' }, - id: 'node' - } - }) - const pool = client.connectionPool - t.match(pool.connections.find(c => c.id === 'node'), { - url: new URL('http://localhost:9200/'), - headers: { 'x-foo': 'bar' } - }) - t.end() - }) - - t.test('Missing node conf', t => { - try { - new Client() // eslint-disable-line - t.fail('Should fail') - } catch (err) { - t.ok(err) - } - t.end() - }) - - t.end() -}) - -test('Authentication', t => { - t.test('Basic', t => { - t.test('Node with basic auth data in the url', t => { - t.plan(3) - - function handler (req, res) { - t.match(req.headers, { - authorization: 'Basic Zm9vOmJhcg==' - }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://foo:bar@localhost:${port}` - }) - - client.info((err, { body }) => { - t.error(err) - t.deepEqual(body, { hello: 'world' }) - server.stop() - }) - }) - }) - - t.test('Node with basic auth data in the url (array of nodes)', t => { - t.plan(3) - - function handler (req, res) { - t.match(req.headers, { - authorization: 'Basic Zm9vOmJhcg==' - }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - nodes: [`http://foo:bar@localhost:${port}`] - }) - - client.info((err, { body }) => { - t.error(err) - t.deepEqual(body, { hello: 'world' }) - server.stop() - }) - }) - }) - - t.test('Node with basic auth data in the options', t => { - t.plan(3) - - function handler (req, res) { - t.match(req.headers, { - authorization: 'Basic Zm9vOmJhcg==' - }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}`, - auth: { - username: 'foo', - password: 'bar' - } - }) - - client.info((err, { body }) => { - t.error(err) - t.deepEqual(body, { hello: 'world' }) - server.stop() - }) - }) - }) - - t.test('Custom basic authentication per request', t => { - t.plan(6) - - var first = true - function handler (req, res) { - t.match(req.headers, { - authorization: first ? 'hello' : 'Basic Zm9vOmJhcg==' - }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://foo:bar@localhost:${port}` - }) - - client.info({}, { - headers: { - authorization: 'hello' - } - }, (err, { body }) => { - t.error(err) - t.deepEqual(body, { hello: 'world' }) - first = false - - client.info((err, { body }) => { - t.error(err) - t.deepEqual(body, { hello: 'world' }) - server.stop() - }) - }) - }) - }) - - t.test('Override default basic authentication per request', t => { - t.plan(6) - - var first = true - function handler (req, res) { - t.match(req.headers, { - authorization: first ? 'hello' : 'Basic Zm9vOmJhcg==' - }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}`, - auth: { - username: 'foo', - password: 'bar' - } - }) - - client.info({}, { - headers: { - authorization: 'hello' - } - }, (err, { body }) => { - t.error(err) - t.deepEqual(body, { hello: 'world' }) - first = false - - client.info((err, { body }) => { - t.error(err) - t.deepEqual(body, { hello: 'world' }) - server.stop() - }) - }) - }) - }) - - t.end() - }) - - t.test('ApiKey', t => { - t.test('Node with ApiKey auth data in the options as string', t => { - t.plan(3) - - function handler (req, res) { - t.match(req.headers, { - authorization: 'ApiKey Zm9vOmJhcg==' - }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}`, - auth: { - apiKey: 'Zm9vOmJhcg==' - } - }) - - client.info((err, { body }) => { - t.error(err) - t.deepEqual(body, { hello: 'world' }) - server.stop() - }) - }) - }) - - t.test('Node with ApiKey auth data in the options as object', t => { - t.plan(3) - - function handler (req, res) { - t.match(req.headers, { - authorization: 'ApiKey Zm9vOmJhcg==' - }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}`, - auth: { - apiKey: { id: 'foo', api_key: 'bar' } - } - }) - - client.info((err, { body }) => { - t.error(err) - t.deepEqual(body, { hello: 'world' }) - server.stop() - }) - }) - }) - - t.test('Custom ApiKey authentication per request', t => { - t.plan(6) - - var first = true - function handler (req, res) { - t.match(req.headers, { - authorization: first ? 'ApiKey Zm9vOmJhcg==' : 'Basic Zm9vOmJhcg==' - }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://foo:bar@localhost:${port}` - }) - - client.info({}, { - headers: { - authorization: 'ApiKey Zm9vOmJhcg==' - } - }, (err, { body }) => { - t.error(err) - t.deepEqual(body, { hello: 'world' }) - first = false - - client.info((err, { body }) => { - t.error(err) - t.deepEqual(body, { hello: 'world' }) - server.stop() - }) - }) - }) - }) - - t.test('Override default ApiKey authentication per request', t => { - t.plan(6) - - var first = true - function handler (req, res) { - t.match(req.headers, { - authorization: first ? 'hello' : 'ApiKey Zm9vOmJhcg==' - }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}`, - auth: { - apiKey: 'Zm9vOmJhcg==' - } - }) - - client.info({}, { - headers: { - authorization: 'hello' - } - }, (err, { body }) => { - t.error(err) - t.deepEqual(body, { hello: 'world' }) - first = false - - client.info((err, { body }) => { - t.error(err) - t.deepEqual(body, { hello: 'world' }) - server.stop() - }) - }) - }) - }) - - t.test('ApiKey should take precedence over basic auth (in url)', t => { - t.plan(3) - - function handler (req, res) { - t.match(req.headers, { - authorization: 'ApiKey Zm9vOmJhcg==' - }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://user:pwd@localhost:${port}`, - auth: { - apiKey: 'Zm9vOmJhcg==' - } - }) - - client.info((err, { body }) => { - t.error(err) - t.deepEqual(body, { hello: 'world' }) - server.stop() - }) - }) - }) - - t.test('ApiKey should take precedence over basic auth (in opts)', t => { - t.plan(3) - - function handler (req, res) { - t.match(req.headers, { - authorization: 'ApiKey Zm9vOmJhcg==' - }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}`, - auth: { - apiKey: 'Zm9vOmJhcg==', - username: 'user', - password: 'pwd' - } - }) - - client.info((err, { body }) => { - t.error(err) - t.deepEqual(body, { hello: 'world' }) - server.stop() - }) - }) - }) - - t.end() - }) - - t.end() -}) - -test('Custom headers per request', t => { - t.plan(3) - - function handler (req, res) { - t.match(req.headers, { - 'x-foo': 'bar', - 'x-baz': 'faz' - }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://foo:bar@localhost:${port}` - }) - - client.info({}, { - headers: { - 'x-foo': 'bar', - 'x-baz': 'faz' - } - }, (err, { body }) => { - t.error(err) - t.deepEqual(body, { hello: 'world' }) - server.stop() - }) - }) -}) - -test('Client close', t => { - t.plan(2) - - class MyConnectionPool extends ConnectionPool { - empty (callback) { - t.ok('called') - super.empty(callback) - } - } - - const client = new Client({ - node: 'http://localhost:9200/', - ConnectionPool: MyConnectionPool - }) - - client.close(() => t.pass('Closed')) -}) - -test('Client close (promise)', t => { - t.plan(2) - - class MyConnectionPool extends ConnectionPool { - empty (callback) { - t.ok('called') - super.empty(callback) - } - } - - const client = new Client({ - node: 'http://localhost:9200/', - ConnectionPool: MyConnectionPool - }) - - client.close() - .then(() => t.pass('Closed')) -}) - -test('Extend client APIs', t => { - t.test('Extend a single method', t => { - t.plan(5) - - const client = new Client({ node: 'http://localhost:9200/' }) - client.extend('method', ({ makeRequest, result, ConfigurationError }) => { - t.type(makeRequest, 'function') - t.true(new ConfigurationError() instanceof Error) - t.deepEqual(result, { - body: null, - statusCode: null, - headers: null, - warnings: null - }) - - return (params, options) => { - t.deepEqual(params, { you_know: 'for search' }) - t.deepEqual(options, { winter: 'is coming' }) - } - }) - - client.method( - { you_know: 'for search' }, - { winter: 'is coming' } - ) - }) - - t.test('Create a namespace and a method', t => { - t.plan(5) - - const client = new Client({ node: 'http://localhost:9200/' }) - client.extend('namespace.method', ({ makeRequest, result, ConfigurationError }) => { - t.type(makeRequest, 'function') - t.true(new ConfigurationError() instanceof Error) - t.deepEqual(result, { - body: null, - statusCode: null, - headers: null, - warnings: null - }) - - return (params, options) => { - t.deepEqual(params, { you_know: 'for search' }) - t.deepEqual(options, { winter: 'is coming' }) - } - }) - - client.namespace.method( - { you_know: 'for search' }, - { winter: 'is coming' } - ) - }) - - t.test('Create a namespace and multiple methods', t => { - t.plan(10) - - const client = new Client({ node: 'http://localhost:9200/' }) - client.extend('namespace.method1', ({ makeRequest, result, ConfigurationError }) => { - t.type(makeRequest, 'function') - t.true(new ConfigurationError() instanceof Error) - t.deepEqual(result, { - body: null, - statusCode: null, - headers: null, - warnings: null - }) - - return (params, options) => { - t.deepEqual(params, { you_know: 'for search' }) - t.deepEqual(options, { winter: 'is coming' }) - } - }) - - client.extend('namespace.method2', ({ makeRequest, result, ConfigurationError }) => { - t.type(makeRequest, 'function') - t.true(new ConfigurationError() instanceof Error) - t.deepEqual(result, { - body: null, - statusCode: null, - headers: null, - warnings: null - }) - - return (params, options) => { - t.deepEqual(params, { you_know: 'for search' }) - t.deepEqual(options, { winter: 'is coming' }) - } - }) - - client.namespace.method1( - { you_know: 'for search' }, - { winter: 'is coming' } - ) - - client.namespace.method2( - { you_know: 'for search' }, - { winter: 'is coming' } - ) - }) - - t.test('Cannot override an existing method', t => { - t.plan(1) - - const client = new Client({ node: 'http://localhost:9200/' }) - try { - client.extend('index', () => {}) - t.fail('Should throw') - } catch (err) { - t.is(err.message, 'The method "index" already exists') - } - }) - - t.test('Cannot override an existing namespace and method', t => { - t.plan(1) - - const client = new Client({ node: 'http://localhost:9200/' }) - try { - client.extend('indices.delete', () => {}) - t.fail('Should throw') - } catch (err) { - t.is(err.message, 'The method "delete" already exists on namespace "indices"') - } - }) - - t.test('Can override an existing method with { force: true }', t => { - t.plan(1) - - const client = new Client({ node: 'http://localhost:9200/' }) - try { - client.extend('index', { force: true }, () => t.pass('Called')) - } catch (err) { - t.fail('Should not throw') - } - }) - - t.test('Can override an existing namespace and method with { force: true }', t => { - t.plan(1) - - const client = new Client({ node: 'http://localhost:9200/' }) - try { - client.extend('indices.delete', { force: true }, () => t.pass('Called')) - } catch (err) { - t.fail('Should not throw') - } - }) - - t.test('Should call the transport.request method', t => { - t.plan(2) - - class MyTransport extends Transport { - request (params, options) { - t.deepEqual(params, { you_know: 'for search' }) - t.deepEqual(options, { winter: 'is coming' }) - } - } - - const client = new Client({ - node: 'http://localhost:9200/', - Transport: MyTransport - }) - client.extend('method', ({ makeRequest, result, ConfigurationError }) => { - return (params, options) => makeRequest(params, options) - }) - - client.method( - { you_know: 'for search' }, - { winter: 'is coming' } - ) - }) - - t.test('Should support callbacks', t => { - t.plan(2) - - const client = new Client({ node: 'http://localhost:9200/' }) - client.extend('method', ({ makeRequest, result, ConfigurationError }) => { - return (params, options, callback) => { - callback(null, { hello: 'world' }) - } - }) - - client.method( - { you_know: 'for search' }, - { winter: 'is coming' }, - (err, res) => { - t.error(err) - t.deepEqual(res, { hello: 'world' }) - } - ) - }) - - t.test('Should support promises', t => { - t.plan(1) - - const client = new Client({ node: 'http://localhost:9200/' }) - client.extend('method', ({ makeRequest, result, ConfigurationError }) => { - return (params, options) => { - return new Promise((resolve, reject) => { - resolve({ hello: 'world' }) - }) - } - }) - - client - .method( - { you_know: 'for search' }, - { winter: 'is coming' } - ) - .then(res => t.deepEqual(res, { hello: 'world' })) - .catch(err => t.fail(err)) - }) - - t.end() -}) - -test('Elastic cloud config', t => { - t.test('Basic', t => { - t.plan(5) - const client = new Client({ - cloud: { - // 'localhost$abcd$efgh' - id: 'name:bG9jYWxob3N0JGFiY2QkZWZnaA==', - username: 'elastic', - password: 'changeme' - } - }) - - const pool = client.connectionPool - t.ok(pool instanceof CloudConnectionPool) - t.match(pool.connections.find(c => c.id === 'https://abcd.localhost/'), { - url: new URL('https://elastic:changeme@abcd.localhost/'), - id: 'https://abcd.localhost/', - headers: { - authorization: 'Basic ' + Buffer.from('elastic:changeme').toString('base64') - }, - ssl: { secureProtocol: 'TLSv1_2_method' }, - deadCount: 0, - resurrectTimeout: 0, - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }) - - t.strictEqual(client.transport.compression, 'gzip') - t.strictEqual(client.transport.suggestCompression, true) - t.deepEqual(pool._ssl, { secureProtocol: 'TLSv1_2_method' }) - }) - - t.test('Auth as separate option', t => { - t.plan(5) - const client = new Client({ - cloud: { - // 'localhost$abcd$efgh' - id: 'name:bG9jYWxob3N0JGFiY2QkZWZnaA==' - }, - auth: { - username: 'elastic', - password: 'changeme' - } - }) - - const pool = client.connectionPool - t.ok(pool instanceof CloudConnectionPool) - t.match(pool.connections.find(c => c.id === 'https://abcd.localhost/'), { - url: new URL('https://elastic:changeme@abcd.localhost/'), - id: 'https://abcd.localhost/', - headers: { - authorization: 'Basic ' + Buffer.from('elastic:changeme').toString('base64') - }, - ssl: { secureProtocol: 'TLSv1_2_method' }, - deadCount: 0, - resurrectTimeout: 0, - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }) - - t.strictEqual(client.transport.compression, 'gzip') - t.strictEqual(client.transport.suggestCompression, true) - t.deepEqual(pool._ssl, { secureProtocol: 'TLSv1_2_method' }) - }) - - t.test('ApiKey should take precedence over basic auth', t => { - t.plan(5) - const client = new Client({ - cloud: { - // 'localhost$abcd$efgh' - id: 'name:bG9jYWxob3N0JGFiY2QkZWZnaA==' - }, - auth: { - username: 'elastic', - password: 'changeme', - apiKey: 'Zm9vOmJhcg==' - } - }) - - const pool = client.connectionPool - t.ok(pool instanceof CloudConnectionPool) - t.match(pool.connections.find(c => c.id === 'https://abcd.localhost/'), { - url: new URL('https://elastic:changeme@abcd.localhost/'), - id: 'https://abcd.localhost/', - headers: { - authorization: 'ApiKey Zm9vOmJhcg==' - }, - ssl: { secureProtocol: 'TLSv1_2_method' }, - deadCount: 0, - resurrectTimeout: 0, - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }) - - t.strictEqual(client.transport.compression, 'gzip') - t.strictEqual(client.transport.suggestCompression, true) - t.deepEqual(pool._ssl, { secureProtocol: 'TLSv1_2_method' }) - }) - - t.test('Override default options', t => { - t.plan(4) - const client = new Client({ - cloud: { - // 'localhost$abcd$efgh' - id: 'name:bG9jYWxob3N0JGFiY2QkZWZnaA==', - username: 'elastic', - password: 'changeme' - }, - compression: false, - suggestCompression: false, - ssl: { - secureProtocol: 'TLSv1_1_method' - } - }) - - t.ok(client.connectionPool instanceof CloudConnectionPool) - t.strictEqual(client.transport.compression, false) - t.strictEqual(client.transport.suggestCompression, false) - t.deepEqual(client.connectionPool._ssl, { secureProtocol: 'TLSv1_1_method' }) - }) - - t.end() -}) - -test('Opaque Id support', t => { - t.test('No opaqueId', t => { - t.plan(3) - - function handler (req, res) { - t.strictEqual(req.headers['x-opaque-id'], undefined) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}` - }) - - client.search({ - index: 'test', - q: 'foo:bar' - }, (err, { body }) => { - t.error(err) - t.deepEqual(body, { hello: 'world' }) - server.stop() - }) - }) - }) - - t.test('No prefix', t => { - t.plan(3) - - function handler (req, res) { - t.strictEqual(req.headers['x-opaque-id'], 'bar') - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}` - }) - - client.search({ - index: 'test', - q: 'foo:bar' - }, { - opaqueId: 'bar' - }, (err, { body }) => { - t.error(err) - t.deepEqual(body, { hello: 'world' }) - server.stop() - }) - }) - }) - - t.test('With prefix', t => { - t.plan(3) - - function handler (req, res) { - t.strictEqual(req.headers['x-opaque-id'], 'foo-bar') - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}`, - opaqueIdPrefix: 'foo-' - }) - - client.search({ - index: 'test', - q: 'foo:bar' - }, { - opaqueId: 'bar' - }, (err, { body }) => { - t.error(err) - t.deepEqual(body, { hello: 'world' }) - server.stop() - }) - }) - }) - - t.end() -}) - -test('Correctly handles the same header cased differently', t => { - t.plan(4) - - function handler (req, res) { - t.strictEqual(req.headers['authorization'], 'Basic foobar') - t.strictEqual(req.headers['foo'], 'baz') - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const client = new Client({ - node: `http://localhost:${port}`, - auth: { - username: 'hello', - password: 'world' - }, - headers: { - Authorization: 'Basic foobar', - Foo: 'bar' - } - }) - - client.search({ - index: 'test', - q: 'foo:bar' - }, { - headers: { - foo: 'baz' - } - }, (err, { body }) => { - t.error(err) - t.deepEqual(body, { hello: 'world' }) - server.stop() - }) - }) -}) diff --git a/test/unit/client.test.ts b/test/unit/client.test.ts new file mode 100644 index 000000000..ac8203f55 --- /dev/null +++ b/test/unit/client.test.ts @@ -0,0 +1,799 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +import * as http from 'node:http' +import { URL } from 'node:url' +import { setTimeout } from 'node:timers/promises' +import { test } from 'tap' +import FakeTimers from '@sinonjs/fake-timers' +import { Transport } from '@elastic/transport' +import { buildServer, connection } from '../utils' +import { Client, errors, SniffingTransport } from '../..' +import * as symbols from '@elastic/transport/lib/symbols' +import { BaseConnectionPool, CloudConnectionPool, WeightedConnectionPool, HttpConnection } from '@elastic/transport' +import { BasicTracerProvider, InMemorySpanExporter, SimpleSpanProcessor } from '@opentelemetry/sdk-trace-base' + +let clientVersion: string = require('../../package.json').version // eslint-disable-line +if (clientVersion.includes('-')) { + clientVersion = clientVersion.slice(0, clientVersion.indexOf('-')) + 'p' +} +let transportVersion: string = require('@elastic/transport/package.json').version // eslint-disable-line +if (transportVersion.includes('-')) { + transportVersion = transportVersion.slice(0, transportVersion.indexOf('-')) + 'p' +} +const nodeVersion = process.versions.node + +test('Create a client instance, single node as string', t => { + const client = new Client({ node: 'http://localhost:9200/' }) + t.ok(client.connectionPool instanceof WeightedConnectionPool) + t.equal(client.connectionPool.size, 1) + t.end() +}) + +test('Create a client instance, multi node as strings', t => { + const client = new Client({ nodes: ['http://localhost:9200/', 'http://localhost:9201/'] }) + t.ok(client.connectionPool instanceof WeightedConnectionPool) + t.equal(client.connectionPool.size, 2) + t.end() +}) + +test('Create a client instance, single node as object', t => { + const client = new Client({ + node: { + url: new URL('http://localhost:9200/') + } + }) + t.equal(client.connectionPool.size, 1) + t.end() +}) + +test('Create a client instance, multi node as object', t => { + const client = new Client({ + nodes: [{ + url: new URL('http://localhost:9200/') + }, { + url: new URL('http://localhost:9201/') + }] + }) + t.equal(client.connectionPool.size, 2) + t.end() +}) + +test('Missing node(s)', t => { + t.throws(() => new Client({}), errors.ConfigurationError) + t.end() +}) + +test('multi nodes with roles, using default node filter', async t => { + const client = new Client({ + nodes: [ + { + url: new URL('http://node1:9200/'), + roles: { master: true, data: false, ingest: false, ml: false } + }, + { + url: new URL('http://node2:9200/'), + roles: { master: true, data: true, ingest: false, ml: false } + }, + ] + }) + const conn = client.connectionPool.getConnection({ + now: Date.now() + 1000 * 60 * 3, + requestId: 1, + name: 'elasticsearch-js', + context: null + }) + + t.equal(conn?.url.hostname, 'node2') + + t.end() +}) + +test('Custom headers', t => { + const client = new Client({ + node: 'http://localhost:9200/', + headers: { foo: 'bar' } + }) + t.match(client.transport[symbols.kHeaders], { foo: 'bar' }) + t.end() +}) + +test('Custom headers should merge, not overwrite', t => { + const client = new Client({ + node: 'http://localhost:9200/', + headers: { foo: 'bar' } + }) + t.ok(client.transport[symbols.kHeaders]['user-agent']?.startsWith('elasticsearch-js/')) + t.end() +}) + +test('Redaction options should merge, not overwrite', t => { + const client = new Client({ + node: 'http://localhost:9200/', + // @ts-expect-error + redaction: { + additionalKeys: ['foo'], + } + }) + t.equal(client.transport[symbols.kRedaction].type, 'replace') + t.match(client.transport[symbols.kRedaction].additionalKeys, ['foo']) + t.end() +}) + +test('Basic auth', async t => { + t.plan(1) + + const Connection = connection.buildMockConnection({ + onRequest(opts) { + t.match(opts.headers, { authorization: 'Basic aGVsbG86d29ybGQ=' }) + return { + statusCode: 200, + body: { hello: 'world' } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection, + auth: { + username: 'hello', + password: 'world' + } + }) + + await client.transport.request({ method: 'GET', path: '/' }) +}) + +test('Basic auth via url', async t => { + t.plan(1) + + const Connection = connection.buildMockConnection({ + onRequest(opts) { + t.match(opts.headers, { authorization: 'Basic aGVsbG86d29ybGQ=' }) + return { + statusCode: 200, + body: { hello: 'world' } + } + } + }) + + const client = new Client({ + node: 'http://hello:world@localhost:9200/', + Connection + }) + + await client.transport.request({ method: 'GET', path: '/' }) +}) + +test('ApiKey as string', async t => { + t.plan(1) + + const Connection = connection.buildMockConnection({ + onRequest(opts) { + t.match(opts.headers, { authorization: 'ApiKey foobar' }) + return { + statusCode: 200, + body: { hello: 'world' } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection, + auth: { + apiKey: 'foobar' + } + }) + + await client.transport.request({ method: 'GET', path: '/' }) +}) + +test('ApiKey as object', async t => { + t.plan(1) + + const Connection = connection.buildMockConnection({ + onRequest(opts) { + t.match(opts.headers, { authorization: 'ApiKey Zm9vOmJhcg==' }) + return { + statusCode: 200, + body: { hello: 'world' } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection, + auth: { + apiKey: { + id: 'foo', + api_key: 'bar' + } + } + }) + + await client.transport.request({ method: 'GET', path: '/' }) +}) + +test('Bearer auth', async t => { + t.plan(1) + + const Connection = connection.buildMockConnection({ + onRequest(opts) { + t.match(opts.headers, { authorization: 'Bearer token' }) + return { + statusCode: 200, + body: { hello: 'world' } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection, + auth: { + bearer: 'token' + } + }) + + await client.transport.request({ method: 'GET', path: '/' }) +}) + +test('Override authentication per request', async t => { + t.plan(1) + + const Connection = connection.buildMockConnection({ + onRequest(opts) { + t.match(opts.headers, { authorization: 'Basic foobar' }) + return { + statusCode: 200, + body: { hello: 'world' } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection, + auth: { + username: 'hello', + password: 'world' + } + }) + + await client.transport.request( + { method: 'GET', path: '/' }, + { headers: { authorization: 'Basic foobar' } } + ) +}) + +test('Custom headers per request', async t => { + t.plan(1) + + const Connection = connection.buildMockConnection({ + onRequest(opts) { + t.match(opts.headers, { + foo: 'bar', + faz: 'bar' + }) + return { + statusCode: 200, + body: { hello: 'world' } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection, + headers: { foo: 'bar' } + }) + + await client.transport.request( + { method: 'GET', path: '/' }, + { headers: { faz: 'bar' } } + ) +}) + +test('Close the client', async t => { + t.plan(1) + + class MyConnectionPool extends BaseConnectionPool { + async empty(): Promise { + t.pass('called') + } + } + + const client = new Client({ + node: 'http://localhost:9200/', + ConnectionPool: MyConnectionPool + }) + + await client.close() +}) + +test('Elastic Cloud config', t => { + const client = new Client({ + cloud: { + // 'localhost$abcd$' + id: 'name:bG9jYWxob3N0JGFiY2Qk' + }, + auth: { + username: 'elastic', + password: 'changeme' + } + }) + + t.ok(client.connectionPool instanceof CloudConnectionPool) + const connection = client.connectionPool.connections.find(c => c.id === 'https://abcd.localhost/') + + t.equal(connection?.headers?.authorization, `Basic ${Buffer.from('elastic:changeme').toString('base64')}`) + t.same(connection?.tls, { secureProtocol: 'TLSv1_2_method' }) + t.equal(connection?.url.hostname, 'abcd.localhost') + t.equal(connection?.url.protocol, 'https:') + + t.test('Invalid Cloud ID will throw ConfigurationError', t => { + t.throws(() => new Client({ + cloud: { + id: 'invalidCloudIdThatIsNotBase64' + }, + auth: { + username: 'elastic', + password: 'changeme' + } + + }), errors.ConfigurationError) + t.end() + }) + + t.end() +}) + + + +test('Override default Elastic Cloud options', t => { + const client = new Client({ + cloud: { + // 'localhost$abcd$efgh' + id: 'name:bG9jYWxob3N0JGFiY2QkZWZnaA==', + }, + auth: { + username: 'elastic', + password: 'changeme' + }, + compression: false, + tls: { + secureProtocol: 'TLSv1_1_method' + } + }) + + t.ok(client.connectionPool instanceof CloudConnectionPool) + t.equal(client.transport[symbols.kCompression], false) + t.same(client.connectionPool._tls, { secureProtocol: 'TLSv1_1_method' }) + + t.end() +}) + +test('Configure opaqueIdPrefix', t => { + const client = new Client({ + node: 'http://localhost:9200/', + opaqueIdPrefix: 'foobar' + }) + + t.equal(client.transport[symbols.kOpaqueIdPrefix], 'foobar') + + t.end() +}) + +test('name as string', t => { + const client = new Client({ + node: 'http://localhost:9200/', + name: 'es-client' + }) + + t.equal(client.name, 'es-client') + + t.end() +}) + +test('name as symbol', t => { + const s = Symbol() + const client = new Client({ + node: 'http://localhost:9200/', + name: s + }) + + t.equal(client.name, s) + + t.end() +}) + +test('Meta header enabled by default', async t => { + t.plan(1) + + const Connection = connection.buildMockConnection({ + onRequest(opts) { + t.match(opts.headers, { 'x-elastic-client-meta': `es=${clientVersion},js=${nodeVersion},t=${transportVersion},hc=${nodeVersion}` }) + return { + statusCode: 200, + body: { hello: 'world' } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection + }) + + await client.transport.request({ method: 'GET', path: '/' }) +}) + +test('Meta header disabled', async t => { + t.plan(1) + + const Connection = connection.buildMockConnection({ + onRequest(opts) { + t.notOk(opts.headers?.['x-elastic-client-meta']) + return { + statusCode: 200, + body: { hello: 'world' } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection, + enableMetaHeader: false + }) + + await client.transport.request({ method: 'GET', path: '/' }) +}) + +test('Meta header indicates when UndiciConnection is used', async t => { + t.plan(1) + + function handler(req: http.IncomingMessage, res: http.ServerResponse) { + t.equal(req.headers['x-elastic-client-meta'], `es=${clientVersion},js=${nodeVersion},t=${transportVersion},un=${nodeVersion}`) + res.end('ok') + } + + const [{ port }, server] = await buildServer(handler) + t.after(() => server.stop()) + + const client = new Client({ + node: `http://localhost:${port}`, + // Connection: UndiciConnection is the default + }) + + await client.transport.request({ method: 'GET', path: '/' }) +}) + +test('Meta header indicates when HttpConnection is used', async t => { + t.plan(1) + + function handler(req: http.IncomingMessage, res: http.ServerResponse) { + t.equal(req.headers['x-elastic-client-meta'], `es=${clientVersion},js=${nodeVersion},t=${transportVersion},hc=${nodeVersion}`) + res.end('ok') + } + + const [{ port }, server] = await buildServer(handler) + t.after(() => server.stop()) + + const client = new Client({ + node: `http://localhost:${port}`, + Connection: HttpConnection, + }) + + await client.transport.request({ method: 'GET', path: '/' }) +}) + +test('caFingerprint', t => { + const client = new Client({ + node: 'https://localhost:9200/', + caFingerprint: 'FO:OB:AR' + }) + + t.equal(client.connectionPool[symbols.kCaFingerprint], 'FO:OB:AR') + t.end() +}) + +test('caFingerprint can\'t be configured over http / 1', t => { + t.throws(() => new Client({ + node: 'http://localhost:9200/', + caFingerprint: 'FO:OB:AR' + }), + errors.ConfigurationError + ) + t.end() +}) + +test('caFingerprint can\'t be configured over http / 2', t => { + t.throws(() => new Client({ + nodes: ['http://localhost:9200/'], + caFingerprint: 'FO:OB:AR' + }), + errors.ConfigurationError + ) + t.end() +}) + +test('user agent is in the correct format', t => { + const client = new Client({ node: 'http://localhost:9200/' }) + const agentRaw = client.transport[symbols.kHeaders]['user-agent'] || '' + const agentSplit = agentRaw.split(/\s+/) + t.equal(agentSplit[0].split('/')[0], 'elasticsearch-js') + t.ok(/^\d+\.\d+\.\d+/.test(agentSplit[0].split('/')[1])) + t.end() +}) + +test('Ensure new client instance stores requestTimeout for each connection', t => { + const client = new Client({ + node: { url: new URL('http://localhost:9200/') }, + requestTimeout: 60000, + }) + t.equal(client.connectionPool.connections[0].timeout, 60000) + t.end() +}) + +test('No request timeout is set by default', t => { + const client = new Client({ + node: { url: new URL('http://localhost:9200/') }, + }) + t.equal(client.connectionPool.connections[0].timeout, null) + t.end() +}) + +test('Ensure new client does not time out if requestTimeout is not set', async t => { + const clock = FakeTimers.install({ toFake: ['setTimeout'] }) + t.teardown(() => clock.uninstall()) + + function handler(_req: http.IncomingMessage, res: http.ServerResponse) { + setTimeout(1000 * 60 * 60).then(() => { + t.ok('timeout ended') + res.setHeader('content-type', 'application/json') + res.end(JSON.stringify({ success: true })) + }) + clock.tick(1000 * 60 * 60) + } + + const [{ port }, server] = await buildServer(handler) + + const client = new Client({ + node: `http://localhost:${port}`, + }) + + try { + await client.transport.request({ method: 'GET', path: '/' }) + } catch (error) { + t.fail('Error should not be thrown', error) + } finally { + server.stop() + t.end() + } +}) + +test('Pass disablePrototypePoisoningProtection option to serializer', async t => { + let client = new Client({ + node: 'http://localhost:9200/', + disablePrototypePoisoningProtection: false + }) + t.same(client.serializer[symbols.kJsonOptions], { + protoAction: 'error', + constructorAction: 'error' + }) + + client = new Client({ + node: 'http://localhost:9200/', + disablePrototypePoisoningProtection: true + }) + t.same(client.serializer[symbols.kJsonOptions], { + protoAction: 'ignore', + constructorAction: 'ignore' + }) + + client = new Client({ + node: 'http://localhost:9200/', + disablePrototypePoisoningProtection: 'proto' + }) + t.same(client.serializer[symbols.kJsonOptions], { + protoAction: 'error', + constructorAction: 'ignore' + }) + + client = new Client({ + node: 'http://localhost:9200/', + disablePrototypePoisoningProtection: 'constructor' + }) + t.same(client.serializer[symbols.kJsonOptions], { + protoAction: 'ignore', + constructorAction: 'error' + }) +}) + +test('disablePrototypePoisoningProtection is true by default', async t => { + const client = new Client({ node: 'http://localhost:9200/' }) + t.same(client.serializer[symbols.kJsonOptions], { + protoAction: 'ignore', + constructorAction: 'ignore' + }) +}) + +test('serverless defaults', t => { + t.test('uses CloudConnectionPool by default', t => { + const client = new Client({ node: 'http://localhost:9200/', serverMode: 'serverless' }) + t.ok(client.connectionPool instanceof CloudConnectionPool) + t.equal(client.connectionPool.size, 1) + t.end() + }) + + t.test('selects one node if multiple are provided', t => { + const client = new Client({ nodes: ['http://localhost:9200/', 'http://localhost:9201/'], serverMode: 'serverless' }) + t.equal(client.connectionPool.size, 1) + t.end() + }) + + t.test('uses TLSv1_2_method by default', t => { + const client = new Client({ + node: 'https://localhost:9200/', + serverMode: 'serverless', + auth: { + username: 'elastic', + password: 'changeme' + } + }) + + const connection = client.connectionPool.connections.find(c => c.id === 'https://localhost:9200/') + + t.equal(connection?.headers?.authorization, `Basic ${Buffer.from('elastic:changeme').toString('base64')}`) + t.same(connection?.tls, { secureProtocol: 'TLSv1_2_method' }) + t.equal(connection?.url.hostname, 'localhost') + t.equal(connection?.url.protocol, 'https:') + + t.end() + }) + + t.test('elastic-api-version header exists on all requests', async t => { + t.plan(1) + + const Connection = connection.buildMockConnection({ + onRequest(opts) { + t.equal(opts.headers?.['elastic-api-version'], '2023-10-31') + return { + statusCode: 200, + body: { hello: 'world' } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + serverMode: 'serverless', + Connection, + }) + + await client.transport.request({ method: 'GET', path: '/' }) + }) + + t.test('sniffing transport not used', t => { + const client = new Client({ node: 'http://localhost:9200/', serverMode: 'serverless' }) + t.ok(!(client.transport instanceof SniffingTransport)) + t.end() + }) + + t.end() +}) + +test('custom transport: class', async t => { + t.plan(3) + + class MyTransport extends Transport { + async request(params, options): Promise { + t.ok(true, 'custom Transport request function should be called') + return super.request(params, options) + } + } + + function handler(_req: http.IncomingMessage, res: http.ServerResponse) { + t.ok(true, 'handler should be called') + res.end('ok') + } + + const [{ port }, server] = await buildServer(handler) + t.after(() => server.stop()) + + const client = new Client({ + node: `http://localhost:${port}`, + Transport: MyTransport + }) + + t.ok(client.transport instanceof MyTransport, 'Custom transport should be used') + + client.transport.request({ method: 'GET', path: '/' }) +}) + +test('custom transport: disable otel via options', async t => { + const exporter = new InMemorySpanExporter() + const processor = new SimpleSpanProcessor(exporter) + const provider = new BasicTracerProvider({ + spanProcessors: [processor] + }) + provider.register() + + t.after(async () => { + await provider.forceFlush() + exporter.reset() + await provider.shutdown() + }) + + class MyTransport extends Transport { + async request(params, options = {}): Promise { + // @ts-expect-error + options.openTelemetry = { enabled: false } + return super.request(params, options) + } + } + + function handler(_req: http.IncomingMessage, res: http.ServerResponse) { + res.end('ok') + } + + const [{ port }, server] = await buildServer(handler) + t.after(() => server.stop()) + + const client = new Client({ + node: `http://localhost:${port}`, + Transport: MyTransport + }) + + await client.transport.request({ + path: '/hello', + method: 'GET', + meta: { name: 'hello' }, + }) + + t.equal(exporter.getFinishedSpans().length, 0) + t.end() +}) + +test('custom transport: disable otel via env var', async t => { + const exporter = new InMemorySpanExporter() + const processor = new SimpleSpanProcessor(exporter) + const provider = new BasicTracerProvider({ + spanProcessors: [processor] + }) + provider.register() + + t.after(async () => { + await provider.forceFlush() + exporter.reset() + await provider.shutdown() + }) + + function handler(_req: http.IncomingMessage, res: http.ServerResponse) { + res.end('ok') + } + + const [{ port }, server] = await buildServer(handler) + t.after(() => server.stop()) + + const client = new Client({ + node: `http://localhost:${port}`, + }) + + process.env.OTEL_ELASTICSEARCH_ENABLED = 'false' + + await client.transport.request({ + path: '/hello', + method: 'GET', + meta: { name: 'hello' }, + }) + + t.equal(exporter.getFinishedSpans().length, 0) + t.end() +}) diff --git a/test/unit/cloud-connection-pool.test.js b/test/unit/cloud-connection-pool.test.js deleted file mode 100644 index e0cb1a499..000000000 --- a/test/unit/cloud-connection-pool.test.js +++ /dev/null @@ -1,33 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -const { test } = require('tap') -const { CloudConnectionPool } = require('../../lib/pool') -const Connection = require('../../lib/Connection') - -test('Should expose a cloudConnection property', t => { - const pool = new CloudConnectionPool({ Connection }) - pool.addConnection('http://localhost:9200/') - t.ok(pool.cloudConnection instanceof Connection) - t.end() -}) - -test('Get connection should always return cloudConnection', t => { - const pool = new CloudConnectionPool({ Connection }) - const conn = pool.addConnection('http://localhost:9200/') - t.deepEqual(pool.getConnection(), conn) - t.end() -}) - -test('pool.empty should reset cloudConnection', t => { - const pool = new CloudConnectionPool({ Connection }) - pool.addConnection('http://localhost:9200/') - t.ok(pool.cloudConnection instanceof Connection) - pool.empty(() => { - t.strictEqual(pool.cloudConnection, null) - t.end() - }) -}) diff --git a/test/unit/connection-pool.test.js b/test/unit/connection-pool.test.js deleted file mode 100644 index b8c412d16..000000000 --- a/test/unit/connection-pool.test.js +++ /dev/null @@ -1,786 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -const { test } = require('tap') -const { URL } = require('url') -const ConnectionPool = require('../../lib/pool/ConnectionPool') -const Connection = require('../../lib/Connection') -const { defaultNodeFilter, roundRobinSelector } = require('../../lib/Transport').internals -const { connection: { MockConnection, MockConnectionTimeout } } = require('../utils') - -test('API', t => { - t.test('addConnection', t => { - const pool = new ConnectionPool({ Connection }) - const href = 'http://localhost:9200/' - pool.addConnection(href) - t.ok(pool.connections.find(c => c.id === href) instanceof Connection) - t.strictEqual(pool.connections.find(c => c.id === href).status, Connection.statuses.ALIVE) - t.deepEqual(pool.dead, []) - t.end() - }) - - t.test('addConnection should throw with two connections with the same id', t => { - const pool = new ConnectionPool({ Connection }) - const href = 'http://localhost:9200/' - pool.addConnection(href) - try { - pool.addConnection(href) - t.fail('Should throw') - } catch (err) { - t.is(err.message, `Connection with id '${href}' is already present`) - } - t.end() - }) - - t.test('addConnection should handle not-friendly url parameters for user and password', t => { - const pool = new ConnectionPool({ Connection }) - const href = 'http://us/"er:p@assword@localhost:9200/' - pool.addConnection(href) - const conn = pool.getConnection() - t.strictEqual(conn.url.username, 'us%22er') - t.strictEqual(conn.url.password, 'p%40assword') - t.match(conn.headers, { - authorization: 'Basic ' + Buffer.from('us"er:p@assword').toString('base64') - }) - t.end() - }) - - t.test('markDead', t => { - const pool = new ConnectionPool({ Connection, sniffEnabled: true }) - const href = 'http://localhost:9200/' - var connection = pool.addConnection(href) - pool.markDead(connection) - connection = pool.connections.find(c => c.id === href) - t.strictEqual(connection.deadCount, 1) - t.true(connection.resurrectTimeout > 0) - t.deepEqual(pool.dead, [href]) - t.end() - }) - - t.test('markDead should sort the dead queue by deadTimeout', t => { - const pool = new ConnectionPool({ Connection }) - const href1 = 'http://localhost:9200/1' - const href2 = 'http://localhost:9200/2' - const conn1 = pool.addConnection(href1) - const conn2 = pool.addConnection(href2) - pool.markDead(conn2) - setTimeout(() => { - pool.markDead(conn1) - t.deepEqual(pool.dead, [href2, href1]) - t.end() - }, 10) - }) - - t.test('markDead should ignore connections that no longer exists', t => { - const pool = new ConnectionPool({ Connection, sniffEnabled: true }) - pool.addConnection('http://localhost:9200/') - pool.markDead({ id: 'foo-bar' }) - t.deepEqual(pool.dead, []) - t.end() - }) - - t.test('markAlive', t => { - const pool = new ConnectionPool({ Connection, sniffEnabled: true }) - const href = 'http://localhost:9200/' - var connection = pool.addConnection(href) - pool.markDead(connection) - pool.markAlive(connection) - connection = pool.connections.find(c => c.id === href) - t.strictEqual(connection.deadCount, 0) - t.strictEqual(connection.resurrectTimeout, 0) - t.strictEqual(connection.status, Connection.statuses.ALIVE) - t.deepEqual(pool.dead, []) - t.end() - }) - - t.test('resurrect', t => { - t.test('ping strategy', t => { - t.test('alive', t => { - const pool = new ConnectionPool({ - resurrectStrategy: 'ping', - pingTimeout: 3000, - Connection: MockConnection, - sniffEnabled: true - }) - const href = 'http://localhost:9200/' - var connection = pool.addConnection(href) - pool.markDead(connection) - const opts = { - now: Date.now() + 1000 * 60 * 3, - requestId: 1, - name: 'elasticsearch-js' - } - pool.resurrect(opts, (isAlive, connection) => { - t.true(isAlive) - connection = pool.connections.find(c => c.id === connection.id) - t.strictEqual(connection.deadCount, 0) - t.strictEqual(connection.resurrectTimeout, 0) - t.strictEqual(connection.status, Connection.statuses.ALIVE) - t.deepEqual(pool.dead, []) - t.end() - }) - }) - - t.test('dead', t => { - const pool = new ConnectionPool({ - resurrectStrategy: 'ping', - pingTimeout: 3000, - Connection: MockConnectionTimeout, - sniffEnabled: true - }) - const href = 'http://localhost:9200/' - var connection = pool.addConnection(href) - pool.markDead(connection) - const opts = { - now: Date.now() + 1000 * 60 * 3, - requestId: 1, - name: 'elasticsearch-js' - } - pool.resurrect(opts, (isAlive, connection) => { - t.false(isAlive) - connection = pool.connections.find(c => c.id === connection.id) - t.strictEqual(connection.deadCount, 2) - t.true(connection.resurrectTimeout > 0) - t.strictEqual(connection.status, Connection.statuses.DEAD) - t.deepEqual(pool.dead, [href]) - t.end() - }) - }) - - t.end() - }) - - t.test('optimistic strategy', t => { - const pool = new ConnectionPool({ - resurrectStrategy: 'optimistic', - Connection, - sniffEnabled: true - }) - const href = 'http://localhost:9200/' - var connection = pool.addConnection(href) - pool.markDead(connection) - const opts = { - now: Date.now() + 1000 * 60 * 3, - requestId: 1, - name: 'elasticsearch-js' - } - pool.resurrect(opts, (isAlive, connection) => { - t.true(isAlive) - connection = pool.connections.find(c => c.id === connection.id) - t.strictEqual(connection.deadCount, 1) - t.true(connection.resurrectTimeout > 0) - t.strictEqual(connection.status, Connection.statuses.ALIVE) - t.deepEqual(pool.dead, []) - t.end() - }) - }) - - t.test('none strategy', t => { - const pool = new ConnectionPool({ - resurrectStrategy: 'none', - Connection, - sniffEnabled: true - }) - const href = 'http://localhost:9200/' - var connection = pool.addConnection(href) - pool.markDead(connection) - const opts = { - now: Date.now() + 1000 * 60 * 3, - requestId: 1, - name: 'elasticsearch-js' - } - pool.resurrect(opts, (isAlive, connection) => { - t.ok(isAlive === null) - t.ok(connection === null) - connection = pool.connections.find(c => c.id === href) - t.strictEqual(connection.deadCount, 1) - t.true(connection.resurrectTimeout > 0) - t.strictEqual(connection.status, Connection.statuses.DEAD) - t.deepEqual(pool.dead, [href]) - t.end() - }) - }) - - t.end() - }) - - t.test('getConnection', t => { - t.test('Should return a connection', t => { - const pool = new ConnectionPool({ Connection }) - const href = 'http://localhost:9200/' - pool.addConnection(href) - t.ok(pool.getConnection() instanceof Connection) - t.end() - }) - - t.test('filter option', t => { - const pool = new ConnectionPool({ Connection }) - const href1 = 'http://localhost:9200/' - const href2 = 'http://localhost:9200/other' - pool.addConnection([href1, href2]) - - const filter = node => node.id === href1 - t.strictEqual(pool.getConnection({ filter }).id, href1) - t.end() - }) - - t.test('filter should get Connection objects', t => { - t.plan(2) - const pool = new ConnectionPool({ Connection }) - const href1 = 'http://localhost:9200/' - const href2 = 'http://localhost:9200/other' - pool.addConnection([href1, href2]) - - const filter = node => { - t.ok(node instanceof Connection) - return true - } - pool.getConnection({ filter }) - }) - - t.test('filter should get alive connections', t => { - t.plan(2) - const pool = new ConnectionPool({ Connection }) - const href1 = 'http://localhost:9200/' - const href2 = 'http://localhost:9200/other' - const conn = pool.addConnection(href1) - pool.addConnection([href2, `${href2}/stuff`]) - pool.markDead(conn) - - const filter = node => { - t.strictEqual(node.status, Connection.statuses.ALIVE) - return true - } - pool.getConnection({ filter }) - }) - - t.test('If all connections are marked as dead, getConnection should return a dead connection', t => { - const pool = new ConnectionPool({ Connection }) - const href1 = 'http://localhost:9200/' - const href2 = 'http://localhost:9200/other' - const conn1 = pool.addConnection(href1) - const conn2 = pool.addConnection(href2) - pool.markDead(conn1) - pool.markDead(conn2) - const conn = pool.getConnection() - t.ok(conn instanceof Connection) - t.is(conn.status, 'dead') - t.end() - }) - - t.end() - }) - - t.test('removeConnection', t => { - const pool = new ConnectionPool({ Connection }) - const href = 'http://localhost:9200/' - var connection = pool.addConnection(href) - t.ok(pool.getConnection() instanceof Connection) - pool.removeConnection(connection) - t.strictEqual(pool.getConnection(), null) - t.end() - }) - - t.test('empty', t => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection('http://localhost:9200/') - pool.addConnection('http://localhost:9201/') - pool.empty(() => { - t.strictEqual(pool.size, 0) - t.deepEqual(pool.dead, []) - t.end() - }) - }) - - t.test('urlToHost', t => { - const pool = new ConnectionPool({ Connection }) - const url = 'http://localhost:9200/' - t.deepEqual( - pool.urlToHost(url), - { url: new URL(url) } - ) - t.end() - }) - - t.test('nodesToHost', t => { - t.test('publish_address as ip address (IPv4)', t => { - const pool = new ConnectionPool({ Connection }) - const nodes = { - a1: { - http: { - publish_address: '127.0.0.1:9200' - }, - roles: ['master', 'data', 'ingest'] - }, - a2: { - http: { - publish_address: '127.0.0.1:9201' - }, - roles: ['master', 'data', 'ingest'] - } - } - - t.deepEqual(pool.nodesToHost(nodes, 'http:'), [{ - url: new URL('http://127.0.0.1:9200/'), - id: 'a1', - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }, { - url: new URL('http://127.0.0.1:9201/'), - id: 'a2', - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }]) - - t.strictEqual(pool.nodesToHost(nodes, 'http:')[0].url.host, '127.0.0.1:9200') - t.strictEqual(pool.nodesToHost(nodes, 'http:')[1].url.host, '127.0.0.1:9201') - t.end() - }) - - t.test('publish_address as ip address (IPv6)', t => { - const pool = new ConnectionPool({ Connection }) - const nodes = { - a1: { - http: { - publish_address: '[::1]:9200' - }, - roles: ['master', 'data', 'ingest'] - }, - a2: { - http: { - publish_address: '[::1]:9201' - }, - roles: ['master', 'data', 'ingest'] - } - } - - t.deepEqual(pool.nodesToHost(nodes, 'http:'), [{ - url: new URL('http://[::1]:9200/'), - id: 'a1', - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }, { - url: new URL('http://[::1]:9201/'), - id: 'a2', - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }]) - - t.strictEqual(pool.nodesToHost(nodes, 'http:')[0].url.host, '[::1]:9200') - t.strictEqual(pool.nodesToHost(nodes, 'http:')[1].url.host, '[::1]:9201') - t.end() - }) - - t.test('publish_address as host/ip (IPv4)', t => { - const pool = new ConnectionPool({ Connection }) - const nodes = { - a1: { - http: { - publish_address: 'example.com/127.0.0.1:9200' - }, - roles: ['master', 'data', 'ingest'] - }, - a2: { - http: { - publish_address: 'example.com/127.0.0.1:9201' - }, - roles: ['master', 'data', 'ingest'] - } - } - - t.deepEqual(pool.nodesToHost(nodes, 'http:'), [{ - url: new URL('http://example.com:9200/'), - id: 'a1', - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }, { - url: new URL('http://example.com:9201/'), - id: 'a2', - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }]) - - t.strictEqual(pool.nodesToHost(nodes, 'http:')[0].url.host, 'example.com:9200') - t.strictEqual(pool.nodesToHost(nodes, 'http:')[1].url.host, 'example.com:9201') - t.end() - }) - - t.test('publish_address as host/ip (IPv6)', t => { - const pool = new ConnectionPool({ Connection }) - const nodes = { - a1: { - http: { - publish_address: 'example.com/[::1]:9200' - }, - roles: ['master', 'data', 'ingest'] - }, - a2: { - http: { - publish_address: 'example.com/[::1]:9201' - }, - roles: ['master', 'data', 'ingest'] - } - } - - t.deepEqual(pool.nodesToHost(nodes, 'http:'), [{ - url: new URL('http://example.com:9200/'), - id: 'a1', - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }, { - url: new URL('http://example.com:9201/'), - id: 'a2', - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }]) - - t.strictEqual(pool.nodesToHost(nodes, 'http:')[0].url.host, 'example.com:9200') - t.strictEqual(pool.nodesToHost(nodes, 'http:')[1].url.host, 'example.com:9201') - t.end() - }) - - t.test('Should use the configure protocol', t => { - const pool = new ConnectionPool({ Connection }) - const nodes = { - a1: { - http: { - publish_address: 'example.com/127.0.0.1:9200' - }, - roles: ['master', 'data', 'ingest'] - }, - a2: { - http: { - publish_address: 'example.com/127.0.0.1:9201' - }, - roles: ['master', 'data', 'ingest'] - } - } - - t.strictEqual(pool.nodesToHost(nodes, 'https:')[0].url.protocol, 'https:') - t.strictEqual(pool.nodesToHost(nodes, 'http:')[1].url.protocol, 'http:') - t.end() - }) - - t.test('Should map roles', t => { - const pool = new ConnectionPool({ Connection }) - const nodes = { - a1: { - http: { - publish_address: 'example.com:9200' - }, - roles: ['master', 'data', 'ingest', 'ml'] - }, - a2: { - http: { - publish_address: 'example.com:9201' - }, - roles: [] - } - } - t.same(pool.nodesToHost(nodes, 'http:'), [{ - url: new URL('http://example.com:9200/'), - id: 'a1', - roles: { - master: true, - data: true, - ingest: true, - ml: true - } - }, { - url: new URL('http://example.com:9201/'), - id: 'a2', - roles: { - master: false, - data: false, - ingest: false, - ml: false - } - }]) - - t.end() - }) - - t.end() - }) - - t.test('update', t => { - t.test('Should not update existing connections', t => { - t.plan(2) - const pool = new ConnectionPool({ Connection }) - pool.addConnection([{ - url: new URL('http://127.0.0.1:9200/'), - id: 'a1', - roles: { - master: true, - data: true, - ingest: true - } - }, { - url: new URL('http://127.0.0.1:9201/'), - id: 'a2', - roles: { - master: true, - data: true, - ingest: true - } - }]) - - pool.update([{ - url: new URL('http://127.0.0.1:9200/'), - id: 'a1', - roles: null - }, { - url: new URL('http://127.0.0.1:9201/'), - id: 'a2', - roles: null - }]) - - t.ok(pool.connections.find(c => c.id === 'a1').roles !== null) - t.ok(pool.connections.find(c => c.id === 'a2').roles !== null) - }) - - t.test('Should not update existing connections (mark alive)', t => { - t.plan(5) - class CustomConnectionPool extends ConnectionPool { - markAlive (connection) { - t.ok('called') - super.markAlive(connection) - } - } - const pool = new CustomConnectionPool({ Connection }) - const conn1 = pool.addConnection({ - url: new URL('http://127.0.0.1:9200/'), - id: 'a1', - roles: { - master: true, - data: true, - ingest: true - } - }) - - const conn2 = pool.addConnection({ - url: new URL('http://127.0.0.1:9201/'), - id: 'a2', - roles: { - master: true, - data: true, - ingest: true - } - }) - - pool.markDead(conn1) - pool.markDead(conn2) - - pool.update([{ - url: new URL('http://127.0.0.1:9200/'), - id: 'a1', - roles: null - }, { - url: new URL('http://127.0.0.1:9201/'), - id: 'a2', - roles: null - }]) - - t.ok(pool.connections.find(c => c.id === 'a1').roles !== null) - t.ok(pool.connections.find(c => c.id === 'a2').roles !== null) - }) - - t.test('Should not update existing connections (same url, different id)', t => { - t.plan(3) - class CustomConnectionPool extends ConnectionPool { - markAlive (connection) { - t.ok('called') - super.markAlive(connection) - } - } - const pool = new CustomConnectionPool({ Connection }) - pool.addConnection([{ - url: new URL('http://127.0.0.1:9200/'), - id: 'http://127.0.0.1:9200/', - roles: { - master: true, - data: true, - ingest: true - } - }]) - - pool.update([{ - url: new URL('http://127.0.0.1:9200/'), - id: 'a1', - roles: true - }]) - - // roles will never be updated, we only use it to do - // a dummy check to see if the connection has been updated - t.deepEqual(pool.connections.find(c => c.id === 'a1').roles, { - master: true, - data: true, - ingest: true, - ml: false - }) - t.strictEqual(pool.connections.find(c => c.id === 'http://127.0.0.1:9200/'), undefined) - }) - - t.test('Add a new connection', t => { - t.plan(2) - const pool = new ConnectionPool({ Connection }) - pool.addConnection({ - url: new URL('http://127.0.0.1:9200/'), - id: 'a1', - roles: { - master: true, - data: true, - ingest: true - } - }) - - pool.update([{ - url: new URL('http://127.0.0.1:9200/'), - id: 'a1', - roles: null - }, { - url: new URL('http://127.0.0.1:9201/'), - id: 'a2', - roles: null - }]) - - t.ok(pool.connections.find(c => c.id === 'a1').roles !== null) - t.ok(pool.connections.find(c => c.id === 'a2')) - }) - - t.test('Remove old connections', t => { - t.plan(3) - const pool = new ConnectionPool({ Connection }) - pool.addConnection({ - url: new URL('http://127.0.0.1:9200/'), - id: 'a1', - roles: null - }) - - pool.update([{ - url: new URL('http://127.0.0.1:9200/'), - id: 'a2', - roles: null - }, { - url: new URL('http://127.0.0.1:9201/'), - id: 'a3', - roles: null - }]) - - t.false(pool.connections.find(c => c.id === 'a1')) - t.true(pool.connections.find(c => c.id === 'a2')) - t.true(pool.connections.find(c => c.id === 'a3')) - }) - - t.test('Remove old connections (markDead)', t => { - t.plan(5) - const pool = new ConnectionPool({ Connection, sniffEnabled: true }) - const conn = pool.addConnection({ - url: new URL('http://127.0.0.1:9200/'), - id: 'a1', - roles: null - }) - - pool.markDead(conn) - t.deepEqual(pool.dead, ['a1']) - - pool.update([{ - url: new URL('http://127.0.0.1:9200/'), - id: 'a2', - roles: null - }, { - url: new URL('http://127.0.0.1:9201/'), - id: 'a3', - roles: null - }]) - - t.deepEqual(pool.dead, []) - t.false(pool.connections.find(c => c.id === 'a1')) - t.true(pool.connections.find(c => c.id === 'a2')) - t.true(pool.connections.find(c => c.id === 'a3')) - }) - - t.end() - }) - - t.end() -}) - -test('Node selector', t => { - t.test('round-robin', t => { - t.plan(1) - const pool = new ConnectionPool({ Connection }) - pool.addConnection('http://localhost:9200/') - t.true(pool.getConnection({ selector: roundRobinSelector() }) instanceof Connection) - }) - - t.test('random', t => { - t.plan(1) - const pool = new ConnectionPool({ Connection }) - pool.addConnection('http://localhost:9200/') - t.true(pool.getConnection({ selector: roundRobinSelector() }) instanceof Connection) - }) - - t.end() -}) - -test('Node filter', t => { - t.test('default', t => { - t.plan(1) - const pool = new ConnectionPool({ Connection }) - pool.addConnection({ url: new URL('http://localhost:9200/') }) - t.true(pool.getConnection({ filter: defaultNodeFilter }) instanceof Connection) - }) - - t.test('Should filter master only nodes', t => { - t.plan(1) - const pool = new ConnectionPool({ Connection }) - pool.addConnection({ - url: new URL('http://localhost:9200/'), - roles: { - master: true, - data: false, - ingest: false, - ml: false - } - }) - t.strictEqual(pool.getConnection({ filter: defaultNodeFilter }), null) - }) - - t.end() -}) diff --git a/test/unit/connection.test.js b/test/unit/connection.test.js deleted file mode 100644 index 68ffebbda..000000000 --- a/test/unit/connection.test.js +++ /dev/null @@ -1,902 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -const { test } = require('tap') -const { inspect } = require('util') -const { createGzip, createDeflate } = require('zlib') -const { URL } = require('url') -const { Agent } = require('http') -const intoStream = require('into-stream') -const { buildServer } = require('../utils') -const Connection = require('../../lib/Connection') -const { TimeoutError, ConfigurationError, RequestAbortedError } = require('../../lib/errors') - -test('Basic (http)', t => { - t.plan(4) - - function handler (req, res) { - t.match(req.headers, { - 'x-custom-test': 'true', - connection: 'keep-alive' - }) - res.end('ok') - } - - buildServer(handler, ({ port }, server) => { - const connection = new Connection({ - url: new URL(`http://localhost:${port}`) - }) - connection.request({ - path: '/hello', - method: 'GET', - headers: { - 'X-Custom-Test': true - } - }, (err, res) => { - t.error(err) - - t.match(res.headers, { - connection: 'keep-alive' - }) - - var payload = '' - res.setEncoding('utf8') - res.on('data', chunk => { payload += chunk }) - res.on('error', err => t.fail(err)) - res.on('end', () => { - t.strictEqual(payload, 'ok') - server.stop() - }) - }) - }) -}) - -test('Basic (https)', t => { - t.plan(4) - - function handler (req, res) { - t.match(req.headers, { - 'x-custom-test': 'true', - connection: 'keep-alive' - }) - res.end('ok') - } - - buildServer(handler, { secure: true }, ({ port }, server) => { - const connection = new Connection({ - url: new URL(`https://localhost:${port}`) - }) - connection.request({ - path: '/hello', - method: 'GET', - headers: { - 'X-Custom-Test': true - } - }, (err, res) => { - t.error(err) - - t.match(res.headers, { - connection: 'keep-alive' - }) - - var payload = '' - res.setEncoding('utf8') - res.on('data', chunk => { payload += chunk }) - res.on('error', err => t.fail(err)) - res.on('end', () => { - t.strictEqual(payload, 'ok') - server.stop() - }) - }) - }) -}) - -test('Basic (https with ssl agent)', t => { - t.plan(4) - - function handler (req, res) { - t.match(req.headers, { - 'x-custom-test': 'true', - connection: 'keep-alive' - }) - res.end('ok') - } - - buildServer(handler, { secure: true }, ({ port, key, cert }, server) => { - const connection = new Connection({ - url: new URL(`https://localhost:${port}`), - ssl: { key, cert } - }) - connection.request({ - path: '/hello', - method: 'GET', - headers: { - 'X-Custom-Test': true - } - }, (err, res) => { - t.error(err) - - t.match(res.headers, { - connection: 'keep-alive' - }) - - var payload = '' - res.setEncoding('utf8') - res.on('data', chunk => { payload += chunk }) - res.on('error', err => t.fail(err)) - res.on('end', () => { - t.strictEqual(payload, 'ok') - server.stop() - }) - }) - }) -}) - -test('Custom http agent', t => { - t.plan(5) - - function handler (req, res) { - t.match(req.headers, { - 'x-custom-test': 'true', - connection: 'keep-alive' - }) - res.end('ok') - } - - buildServer(handler, ({ port }, server) => { - const agent = new Agent({ - keepAlive: true, - keepAliveMsecs: 1000, - maxSockets: 256, - maxFreeSockets: 256 - }) - agent.custom = true - const connection = new Connection({ - url: new URL(`http://localhost:${port}`), - agent: () => agent - }) - t.true(connection.agent.custom) - connection.request({ - path: '/hello', - method: 'GET', - headers: { - 'X-Custom-Test': true - } - }, (err, res) => { - t.error(err) - - t.match(res.headers, { - connection: 'keep-alive' - }) - - var payload = '' - res.setEncoding('utf8') - res.on('data', chunk => { payload += chunk }) - res.on('error', err => t.fail(err)) - res.on('end', () => { - t.strictEqual(payload, 'ok') - server.stop() - }) - }) - }) -}) - -test('Disable keep alive', t => { - t.plan(3) - - function handler (req, res) { - t.match(req.headers, { - 'x-custom-test': 'true', - connection: 'close' - }) - res.end('ok') - } - - buildServer(handler, ({ port }, server) => { - const connection = new Connection({ - url: new URL(`http://localhost:${port}`), - agent: { keepAlive: false } - }) - connection.request({ - path: '/hello', - method: 'GET', - headers: { - 'X-Custom-Test': true - } - }, (err, res) => { - t.error(err) - - t.match(res.headers, { - connection: 'close' - }) - server.stop() - }) - }) -}) - -test('Timeout support', t => { - t.plan(1) - - function handler (req, res) { - setTimeout( - () => res.end('ok'), - 1000 - ) - } - - buildServer(handler, ({ port }, server) => { - const connection = new Connection({ - url: new URL(`http://localhost:${port}`) - }) - connection.request({ - path: '/hello', - method: 'GET', - timeout: 500 - }, (err, res) => { - t.ok(err instanceof TimeoutError) - server.stop() - }) - }) -}) - -test('querystring', t => { - t.test('Should concatenate the querystring', t => { - t.plan(2) - - function handler (req, res) { - t.strictEqual(req.url, '/hello?hello=world&you_know=for%20search') - res.end('ok') - } - - buildServer(handler, ({ port }, server) => { - const connection = new Connection({ - url: new URL(`http://localhost:${port}`) - }) - connection.request({ - path: '/hello', - method: 'GET', - querystring: 'hello=world&you_know=for%20search' - }, (err, res) => { - t.error(err) - server.stop() - }) - }) - }) - - t.test('If the querystring is null should not do anything', t => { - t.plan(2) - - function handler (req, res) { - t.strictEqual(req.url, '/hello') - res.end('ok') - } - - buildServer(handler, ({ port }, server) => { - const connection = new Connection({ - url: new URL(`http://localhost:${port}`) - }) - connection.request({ - path: '/hello', - method: 'GET', - querystring: null - }, (err, res) => { - t.error(err) - server.stop() - }) - }) - }) - - t.end() -}) - -test('Body request', t => { - t.plan(2) - - function handler (req, res) { - var payload = '' - req.setEncoding('utf8') - req.on('data', chunk => { payload += chunk }) - req.on('error', err => t.fail(err)) - req.on('end', () => { - t.strictEqual(payload, 'hello') - res.end('ok') - }) - } - - buildServer(handler, ({ port }, server) => { - const connection = new Connection({ - url: new URL(`http://localhost:${port}`) - }) - connection.request({ - path: '/hello', - method: 'POST', - body: 'hello' - }, (err, res) => { - t.error(err) - server.stop() - }) - }) -}) - -test('Send body as buffer', t => { - t.plan(2) - - function handler (req, res) { - var payload = '' - req.setEncoding('utf8') - req.on('data', chunk => { payload += chunk }) - req.on('error', err => t.fail(err)) - req.on('end', () => { - t.strictEqual(payload, 'hello') - res.end('ok') - }) - } - - buildServer(handler, ({ port }, server) => { - const connection = new Connection({ - url: new URL(`http://localhost:${port}`) - }) - connection.request({ - path: '/hello', - method: 'POST', - body: Buffer.from('hello') - }, (err, res) => { - t.error(err) - server.stop() - }) - }) -}) - -test('Send body as stream', t => { - t.plan(2) - - function handler (req, res) { - var payload = '' - req.setEncoding('utf8') - req.on('data', chunk => { payload += chunk }) - req.on('error', err => t.fail(err)) - req.on('end', () => { - t.strictEqual(payload, 'hello') - res.end('ok') - }) - } - - buildServer(handler, ({ port }, server) => { - const connection = new Connection({ - url: new URL(`http://localhost:${port}`) - }) - connection.request({ - path: '/hello', - method: 'POST', - body: intoStream('hello') - }, (err, res) => { - t.error(err) - server.stop() - }) - }) -}) - -test('Should handle compression', t => { - t.test('gzip', t => { - t.plan(3) - - function handler (req, res) { - res.writeHead(200, { - 'Content-Type': 'application/json;utf=8', - 'Content-Encoding': 'gzip' - }) - intoStream(JSON.stringify({ hello: 'world' })) - .pipe(createGzip()) - .pipe(res) - } - - buildServer(handler, ({ port }, server) => { - const connection = new Connection({ - url: new URL(`http://localhost:${port}`) - }) - connection.request({ - path: '/hello', - method: 'GET' - }, (err, res) => { - t.error(err) - - t.match(res.headers, { - 'content-type': 'application/json;utf=8', - 'content-encoding': 'gzip' - }) - - var payload = '' - res.setEncoding('utf8') - res.on('data', chunk => { payload += chunk }) - res.on('error', err => t.fail(err)) - res.on('end', () => { - t.deepEqual(JSON.parse(payload), { hello: 'world' }) - server.stop() - }) - }) - }) - }) - - t.test('deflate', t => { - t.plan(3) - - function handler (req, res) { - res.writeHead(200, { - 'Content-Type': 'application/json;utf=8', - 'Content-Encoding': 'deflate' - }) - intoStream(JSON.stringify({ hello: 'world' })) - .pipe(createDeflate()) - .pipe(res) - } - - buildServer(handler, ({ port }, server) => { - const connection = new Connection({ - url: new URL(`http://localhost:${port}`) - }) - connection.request({ - path: '/hello', - method: 'GET' - }, (err, res) => { - t.error(err) - - t.match(res.headers, { - 'content-type': 'application/json;utf=8', - 'content-encoding': 'deflate' - }) - - var payload = '' - res.setEncoding('utf8') - res.on('data', chunk => { payload += chunk }) - res.on('error', err => t.fail(err)) - res.on('end', () => { - t.deepEqual(JSON.parse(payload), { hello: 'world' }) - server.stop() - }) - }) - }) - }) - - t.end() -}) - -test('Should not close a connection if there are open requests', t => { - t.plan(4) - - function handler (req, res) { - setTimeout(() => res.end('ok'), 1000) - } - - buildServer(handler, ({ port }, server) => { - const connection = new Connection({ - url: new URL(`http://localhost:${port}`) - }) - - setTimeout(() => { - t.strictEqual(connection._openRequests, 1) - connection.close() - }, 500) - - connection.request({ - path: '/hello', - method: 'GET' - }, (err, res) => { - t.error(err) - t.strictEqual(connection._openRequests, 0) - - var payload = '' - res.setEncoding('utf8') - res.on('data', chunk => { payload += chunk }) - res.on('error', err => t.fail(err)) - res.on('end', () => { - t.strictEqual(payload, 'ok') - server.stop() - }) - }) - }) -}) - -test('Url with auth', t => { - t.plan(2) - - function handler (req, res) { - t.match(req.headers, { - authorization: 'Basic Zm9vOmJhcg==' - }) - res.end('ok') - } - - buildServer(handler, ({ port }, server) => { - const connection = new Connection({ - url: new URL(`http://foo:bar@localhost:${port}`), - auth: { username: 'foo', password: 'bar' } - }) - connection.request({ - path: '/hello', - method: 'GET' - }, (err, res) => { - t.error(err) - server.stop() - }) - }) -}) - -test('Url with querystring', t => { - t.plan(2) - - function handler (req, res) { - t.strictEqual(req.url, '/hello?foo=bar&baz=faz') - res.end('ok') - } - - buildServer(handler, ({ port }, server) => { - const connection = new Connection({ - url: new URL(`http://localhost:${port}?foo=bar`) - }) - connection.request({ - path: '/hello', - method: 'GET', - querystring: 'baz=faz' - }, (err, res) => { - t.error(err) - server.stop() - }) - }) -}) - -test('Custom headers for connection', t => { - t.plan(3) - - function handler (req, res) { - t.match(req.headers, { - 'x-custom-test': 'true', - 'x-foo': 'bar' - }) - res.end('ok') - } - - buildServer(handler, ({ port }, server) => { - const connection = new Connection({ - url: new URL(`http://localhost:${port}`), - headers: { 'x-foo': 'bar' } - }) - connection.request({ - path: '/hello', - method: 'GET', - headers: { - 'X-Custom-Test': true - } - }, (err, res) => { - t.error(err) - // should not update the default - t.deepEqual(connection.headers, { 'x-foo': 'bar' }) - server.stop() - }) - }) -}) - -// TODO: add a check that the response is not decompressed -test('asStream set to true', t => { - t.plan(2) - - function handler (req, res) { - res.end('ok') - } - - buildServer(handler, ({ port }, server) => { - const connection = new Connection({ - url: new URL(`http://localhost:${port}`) - }) - connection.request({ - path: '/hello', - method: 'GET', - asStream: true - }, (err, res) => { - t.error(err) - - var payload = '' - res.setEncoding('utf8') - res.on('data', chunk => { payload += chunk }) - res.on('error', err => t.fail(err)) - res.on('end', () => { - t.strictEqual(payload, 'ok') - server.stop() - }) - }) - }) -}) - -test('Connection id should not contain credentials', t => { - const connection = new Connection({ - url: new URL('http://user:password@localhost:9200/') - }) - t.strictEqual(connection.id, 'http://localhost:9200/') - t.end() -}) - -test('Should throw if the protocol is not http or https', t => { - try { - new Connection({ // eslint-disable-line - url: new URL('nope://nope') - }) - t.fail('Should throw') - } catch (err) { - t.ok(err instanceof ConfigurationError) - t.is(err.message, 'Invalid protocol: \'nope:\'') - } - t.end() -}) - -// https://github.com/nodejs/node/commit/b961d9fd83 -test('Should disallow two-byte characters in URL path', t => { - t.plan(1) - - const connection = new Connection({ - url: new URL('http://localhost:9200/') - }) - connection.request({ - path: '/thisisinvalid\uffe2', - method: 'GET' - }, (err, res) => { - t.strictEqual( - err.message, - 'ERR_UNESCAPED_CHARACTERS: /thisisinvalid\uffe2' - ) - }) -}) - -test('setRole', t => { - t.test('Update the value of a role', t => { - t.plan(2) - - const connection = new Connection({ - url: new URL('http://localhost:9200/') - }) - - t.deepEqual(connection.roles, { - master: true, - data: true, - ingest: true, - ml: false - }) - - connection.setRole('master', false) - - t.deepEqual(connection.roles, { - master: false, - data: true, - ingest: true, - ml: false - }) - }) - - t.test('Invalid role', t => { - t.plan(2) - - const connection = new Connection({ - url: new URL('http://localhost:9200/') - }) - - try { - connection.setRole('car', true) - t.fail('Shoud throw') - } catch (err) { - t.true(err instanceof ConfigurationError) - t.is(err.message, 'Unsupported role: \'car\'') - } - }) - - t.test('Invalid value', t => { - t.plan(2) - - const connection = new Connection({ - url: new URL('http://localhost:9200/') - }) - - try { - connection.setRole('master', 1) - t.fail('Shoud throw') - } catch (err) { - t.true(err instanceof ConfigurationError) - t.is(err.message, 'enabled should be a boolean') - } - }) - - t.end() -}) - -test('Util.inspect Connection class should hide agent, ssl and auth', t => { - t.plan(1) - - const connection = new Connection({ - url: new URL('http://user:password@localhost:9200/'), - id: 'node-id', - headers: { foo: 'bar' } - }) - - // Removes spaces and new lines because - // utils.inspect is handled differently - // between major versions of Node.js - function cleanStr (str) { - return str - .replace(/\s/g, '') - .replace(/(\r\n|\n|\r)/gm, '') - } - - t.strictEqual(cleanStr(inspect(connection)), cleanStr(`{ url: 'http://localhost:9200/', - id: 'node-id', - headers: { foo: 'bar' }, - deadCount: 0, - resurrectTimeout: 0, - _openRequests: 0, - status: 'alive', - roles: { master: true, data: true, ingest: true, ml: false }}`) - ) -}) - -test('connection.toJSON should hide agent, ssl and auth', t => { - t.plan(1) - - const connection = new Connection({ - url: new URL('http://user:password@localhost:9200/'), - id: 'node-id', - headers: { foo: 'bar' } - }) - - t.deepEqual(connection.toJSON(), { - url: 'http://localhost:9200/', - id: 'node-id', - headers: { - foo: 'bar' - }, - deadCount: 0, - resurrectTimeout: 0, - _openRequests: 0, - status: 'alive', - roles: { - master: true, - data: true, - ingest: true, - ml: false - } - }) -}) - -// https://github.com/elastic/elasticsearch-js/issues/843 -test('Port handling', t => { - t.test('http 80', t => { - const connection = new Connection({ - url: new URL('http://localhost/') - }) - - t.strictEqual( - connection.buildRequestObject({}).port, - undefined - ) - - t.end() - }) - - t.test('https 443', t => { - const connection = new Connection({ - url: new URL('https://localhost/') - }) - - t.strictEqual( - connection.buildRequestObject({}).port, - undefined - ) - - t.end() - }) - - t.end() -}) - -test('Authorization header', t => { - t.test('None', t => { - const connection = new Connection({ - url: new URL('http://localhost:9200/') - }) - - t.deepEqual(connection.headers, {}) - - t.end() - }) - - t.test('Basic', t => { - const connection = new Connection({ - url: new URL('http://localhost:9200/'), - auth: { username: 'foo', password: 'bar' } - }) - - t.deepEqual(connection.headers, { authorization: 'Basic Zm9vOmJhcg==' }) - - t.end() - }) - - t.test('ApiKey (string)', t => { - const connection = new Connection({ - url: new URL('http://localhost:9200/'), - auth: { apiKey: 'Zm9vOmJhcg==' } - }) - - t.deepEqual(connection.headers, { authorization: 'ApiKey Zm9vOmJhcg==' }) - - t.end() - }) - - t.test('ApiKey (object)', t => { - const connection = new Connection({ - url: new URL('http://localhost:9200/'), - auth: { apiKey: { id: 'foo', api_key: 'bar' } } - }) - - t.deepEqual(connection.headers, { authorization: 'ApiKey Zm9vOmJhcg==' }) - - t.end() - }) - - t.end() -}) - -test('Should not add agent and ssl to the serialized connection', t => { - const connection = new Connection({ - url: new URL('http://localhost:9200/') - }) - - t.strictEqual( - JSON.stringify(connection), - '{"url":"http://localhost:9200/","id":"http://localhost:9200/","headers":{},"deadCount":0,"resurrectTimeout":0,"_openRequests":0,"status":"alive","roles":{"master":true,"data":true,"ingest":true,"ml":false}}' - ) - - t.end() -}) - -test('Abort a request syncronously', t => { - t.plan(1) - - function handler (req, res) { - t.fail('The server should not be contacted') - } - - buildServer(handler, ({ port }, server) => { - const connection = new Connection({ - url: new URL(`http://localhost:${port}`) - }) - const request = connection.request({ - path: '/hello', - method: 'GET' - }, (err, res) => { - t.ok(err instanceof RequestAbortedError) - server.stop() - }) - request.abort() - }) -}) - -test('Abort a request asyncronously', t => { - t.plan(1) - - function handler (req, res) { - // might be called or not - res.end('ok') - } - - buildServer(handler, ({ port }, server) => { - const connection = new Connection({ - url: new URL(`http://localhost:${port}`) - }) - const request = connection.request({ - path: '/hello', - method: 'GET' - }, (err, res) => { - t.ok(err instanceof RequestAbortedError) - server.stop() - }) - setImmediate(() => request.abort()) - }) -}) diff --git a/test/unit/errors.test.js b/test/unit/errors.test.js deleted file mode 100644 index 972cf8479..000000000 --- a/test/unit/errors.test.js +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -/* eslint no-prototype-builtins: 0 */ - -const { test } = require('tap') -const { errors } = require('../../index') - -test('ElasticsearchClientError', t => { - const err = new errors.ElasticsearchClientError() - t.true(err instanceof Error) - t.end() -}) - -test('TimeoutError', t => { - const err = new errors.TimeoutError() - t.true(err instanceof Error) - t.true(err instanceof errors.ElasticsearchClientError) - t.true(err.hasOwnProperty('meta')) - t.end() -}) - -test('ConnectionError', t => { - const err = new errors.ConnectionError() - t.true(err instanceof Error) - t.true(err instanceof errors.ElasticsearchClientError) - t.true(err.hasOwnProperty('meta')) - t.end() -}) - -test('NoLivingConnectionsError', t => { - const err = new errors.NoLivingConnectionsError() - t.true(err instanceof Error) - t.true(err instanceof errors.ElasticsearchClientError) - t.true(err.hasOwnProperty('meta')) - t.end() -}) - -test('SerializationError', t => { - const err = new errors.SerializationError() - t.true(err instanceof Error) - t.true(err instanceof errors.ElasticsearchClientError) - t.false(err.hasOwnProperty('meta')) - t.true(err.hasOwnProperty('data')) - t.end() -}) - -test('DeserializationError', t => { - const err = new errors.DeserializationError() - t.true(err instanceof Error) - t.true(err instanceof errors.ElasticsearchClientError) - t.false(err.hasOwnProperty('meta')) - t.true(err.hasOwnProperty('data')) - t.end() -}) - -test('ConfigurationError', t => { - const err = new errors.ConfigurationError() - t.true(err instanceof Error) - t.true(err instanceof errors.ElasticsearchClientError) - t.false(err.hasOwnProperty('meta')) - t.end() -}) - -test('ResponseError', t => { - const meta = { - body: 1, - statusCode: 1, - headers: 1 - } - const err = new errors.ResponseError(meta) - t.true(err instanceof Error) - t.true(err instanceof errors.ElasticsearchClientError) - t.true(err.hasOwnProperty('meta')) - t.ok(err.body) - t.ok(err.statusCode) - t.ok(err.headers) - t.end() -}) - -test('RequestAbortedError', t => { - const err = new errors.RequestAbortedError() - t.true(err instanceof Error) - t.true(err instanceof errors.ElasticsearchClientError) - t.true(err.hasOwnProperty('meta')) - t.end() -}) diff --git a/test/unit/events.test.js b/test/unit/events.test.js deleted file mode 100644 index de55a1d02..000000000 --- a/test/unit/events.test.js +++ /dev/null @@ -1,153 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -const { test } = require('tap') -const { Client, events } = require('../../index') -const { TimeoutError } = require('../../lib/errors') -const { connection: { MockConnection, MockConnectionTimeout } } = require('../utils') - -test('Should emit a request event when a request is performed', t => { - t.plan(3) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - client.on(events.REQUEST, (err, request) => { - t.error(err) - t.match(request, { - body: null, - statusCode: null, - headers: null, - warnings: null, - meta: { - context: null, - name: 'elasticsearch-js', - request: { - params: { - method: 'GET', - path: '/test/_search', - body: '', - querystring: 'q=foo%3Abar' - }, - options: {}, - id: 1 - }, - connection: { - id: 'http://localhost:9200/' - }, - attempts: 0, - aborted: false - } - }) - }) - - client.search({ - index: 'test', - q: 'foo:bar' - }, (err, result) => { - t.error(err) - }) -}) - -test('Should emit a response event in case of a successful response', t => { - t.plan(3) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - client.on(events.RESPONSE, (err, request) => { - t.error(err) - t.match(request, { - body: { hello: 'world' }, - statusCode: 200, - headers: { - 'content-type': 'application/json;utf=8', - connection: 'keep-alive' - }, - warnings: null, - meta: { - context: null, - name: 'elasticsearch-js', - request: { - params: { - method: 'GET', - path: '/test/_search', - body: '', - querystring: 'q=foo%3Abar' - }, - options: {}, - id: 1 - }, - connection: { - id: 'http://localhost:9200/' - }, - attempts: 0, - aborted: false - } - }) - }) - - client.search({ - index: 'test', - q: 'foo:bar' - }, (err, result) => { - t.error(err) - }) -}) - -test('Should emit a response event with the error set', t => { - t.plan(3) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnectionTimeout, - maxRetries: 0 - }) - - client.on(events.RESPONSE, (err, request) => { - t.ok(err instanceof TimeoutError) - t.match(request, { - body: null, - statusCode: null, - headers: null, - warnings: null, - meta: { - context: null, - name: 'elasticsearch-js', - request: { - params: { - method: 'GET', - path: '/test/_search', - body: '', - querystring: 'q=foo%3Abar' - }, - options: { - requestTimeout: 500 - }, - id: 1 - }, - connection: { - id: 'http://localhost:9200/' - }, - attempts: 0, - aborted: false - } - }) - }) - - client.search({ - index: 'test', - q: 'foo:bar' - }, { - requestTimeout: 500 - }, (err, result) => { - t.ok(err instanceof TimeoutError) - }) -}) diff --git a/test/unit/helpers/bulk.test.js b/test/unit/helpers/bulk.test.js deleted file mode 100644 index 0eb8d94fe..000000000 --- a/test/unit/helpers/bulk.test.js +++ /dev/null @@ -1,1105 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -const { createReadStream } = require('fs') -const { join } = require('path') -const split = require('split2') -const FakeTimers = require('@sinonjs/fake-timers') -const semver = require('semver') -const { test } = require('tap') -const { Client, errors } = require('../../../') -const { buildServer, connection } = require('../../utils') - -const dataset = [ - { user: 'jon', age: 23 }, - { user: 'arya', age: 18 }, - { user: 'tyrion', age: 39 } -] - -test('bulk index', t => { - t.test('datasource as array', t => { - t.test('Should perform a bulk request', async t => { - let count = 0 - const MockConnection = connection.buildMockConnection({ - onRequest (params) { - t.strictEqual(params.path, '/_bulk') - t.match(params.headers, { 'content-type': 'application/x-ndjson' }) - const [action, payload] = params.body.split('\n') - t.deepEqual(JSON.parse(action), { index: { _index: 'test' } }) - t.deepEqual(JSON.parse(payload), dataset[count++]) - return { body: { errors: false, items: [{}] } } - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - const result = await client.helpers.bulk({ - datasource: dataset.slice(), - flushBytes: 1, - concurrency: 1, - onDocument (doc) { - return { - index: { _index: 'test' } - } - }, - onDrop (doc) { - t.fail('This should never be called') - } - }) - - t.type(result.time, 'number') - t.type(result.bytes, 'number') - t.match(result, { - total: 3, - successful: 3, - retry: 0, - failed: 0, - aborted: false - }) - }) - - t.test('Should perform a bulk request (with concurrency)', async t => { - let count = 0 - const MockConnection = connection.buildMockConnection({ - onRequest (params) { - t.strictEqual(params.path, '/_bulk') - t.match(params.headers, { 'content-type': 'application/x-ndjson' }) - const [action, payload] = params.body.split('\n') - t.deepEqual(JSON.parse(action), { index: { _index: 'test' } }) - t.deepEqual(JSON.parse(payload), dataset[count++]) - return { body: { errors: false, items: [{}] } } - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - const result = await client.helpers.bulk({ - datasource: dataset.slice(), - flushBytes: 1, - concurrency: 3, - onDocument (doc) { - return { - index: { _index: 'test' } - } - }, - onDrop (doc) { - t.fail('This should never be called') - } - }) - - t.type(result.time, 'number') - t.type(result.bytes, 'number') - t.match(result, { - total: 3, - successful: 3, - retry: 0, - failed: 0, - aborted: false - }) - }) - - t.test('Should perform a bulk request (high flush size)', async t => { - const MockConnection = connection.buildMockConnection({ - onRequest (params) { - t.strictEqual(params.path, '/_bulk') - t.match(params.headers, { 'content-type': 'application/x-ndjson' }) - t.strictEqual(params.body.split('\n').filter(Boolean).length, 6) - return { body: { errors: false, items: new Array(3).fill({}) } } - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - const result = await client.helpers.bulk({ - datasource: dataset.slice(), - flushBytes: 5000000, - concurrency: 1, - onDocument (doc) { - return { - index: { _index: 'test' } - } - }, - onDrop (doc) { - t.fail('This should never be called') - } - }) - - t.type(result.time, 'number') - t.type(result.bytes, 'number') - t.match(result, { - total: 3, - successful: 3, - retry: 0, - failed: 0, - aborted: false - }) - }) - - t.test('refreshOnCompletion', async t => { - let count = 0 - const MockConnection = connection.buildMockConnection({ - onRequest (params) { - if (params.method === 'GET') { - t.strictEqual(params.path, '/_all/_refresh') - return { body: { acknowledged: true } } - } else { - t.strictEqual(params.path, '/_bulk') - t.match(params.headers, { 'content-type': 'application/x-ndjson' }) - const [action, payload] = params.body.split('\n') - t.deepEqual(JSON.parse(action), { index: { _index: 'test' } }) - t.deepEqual(JSON.parse(payload), dataset[count++]) - return { body: { errors: false, items: [{}] } } - } - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - const result = await client.helpers.bulk({ - datasource: dataset.slice(), - flushBytes: 1, - concurrency: 1, - refreshOnCompletion: true, - onDocument (doc) { - return { - index: { _index: 'test' } - } - } - }) - - t.type(result.time, 'number') - t.type(result.bytes, 'number') - t.match(result, { - total: 3, - successful: 3, - retry: 0, - failed: 0, - aborted: false - }) - }) - - t.test('Should perform a bulk request (custom action)', async t => { - let count = 0 - const MockConnection = connection.buildMockConnection({ - onRequest (params) { - t.strictEqual(params.path, '/_bulk') - t.match(params.headers, { 'content-type': 'application/x-ndjson' }) - const [action, payload] = params.body.split('\n') - t.deepEqual(JSON.parse(action), { index: { _index: 'test', _id: count } }) - t.deepEqual(JSON.parse(payload), dataset[count++]) - return { body: { errors: false, items: [{}] } } - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - let id = 0 - const result = await client.helpers.bulk({ - datasource: dataset.slice(), - flushBytes: 1, - concurrency: 1, - onDocument (doc) { - return { - index: { - _index: 'test', - _id: id++ - } - } - }, - onDrop (doc) { - t.fail('This should never be called') - } - }) - - t.type(result.time, 'number') - t.type(result.bytes, 'number') - t.match(result, { - total: 3, - successful: 3, - retry: 0, - failed: 0, - aborted: false - }) - }) - - t.test('Should perform a bulk request (retry)', async t => { - if (semver.lt(process.versions.node, '10.0.0')) { - t.skip('This test will not pass on Node v8') - return - } - async function handler (req, res) { - t.strictEqual(req.url, '/_bulk') - t.match(req.headers, { 'content-type': 'application/x-ndjson' }) - - let body = '' - req.setEncoding('utf8') - for await (const chunk of req) { - body += chunk - } - const [, payload] = body.split('\n') - - res.setHeader('content-type', 'application/json') - - if (JSON.parse(payload).user === 'arya') { - res.end(JSON.stringify({ - took: 0, - errors: true, - items: [{ - index: { - status: 429 - } - }] - })) - } else { - res.end(JSON.stringify({ - took: 0, - errors: false, - items: [{}] - })) - } - } - - const [{ port }, server] = await buildServer(handler) - const client = new Client({ node: `http://localhost:${port}` }) - const result = await client.helpers.bulk({ - datasource: dataset.slice(), - flushBytes: 1, - concurrency: 1, - wait: 10, - retries: 1, - onDocument (doc) { - return { - index: { _index: 'test' } - } - }, - onDrop (doc) { - t.deepEqual(doc, { - status: 429, - error: null, - operation: { index: { _index: 'test' } }, - document: { user: 'arya', age: 18 }, - retried: true - }) - } - }) - - t.type(result.time, 'number') - t.type(result.bytes, 'number') - t.match(result, { - total: 3, - successful: 2, - retry: 2, - failed: 1, - aborted: false - }) - server.stop() - }) - - t.test('Should perform a bulk request (retry a single document from batch)', async t => { - function handler (req, res) { - res.setHeader('content-type', 'application/json') - res.end(JSON.stringify({ - took: 0, - errors: true, - items: [ - { index: { status: 200 } }, - { index: { status: 429 } }, - { index: { status: 200 } } - ] - })) - } - - const [{ port }, server] = await buildServer(handler) - const client = new Client({ node: `http://localhost:${port}` }) - const result = await client.helpers.bulk({ - datasource: dataset.slice(), - concurrency: 1, - wait: 10, - retries: 0, - onDocument (doc) { - return { - index: { _index: 'test' } - } - }, - onDrop (doc) { - t.deepEqual(doc, { - status: 429, - error: null, - operation: { index: { _index: 'test' } }, - document: { user: 'arya', age: 18 }, - retried: false - }) - } - }) - - t.type(result.time, 'number') - t.type(result.bytes, 'number') - t.match(result, { - total: 3, - successful: 2, - retry: 0, - failed: 1, - aborted: false - }) - server.stop() - }) - - t.test('Should perform a bulk request (failure)', async t => { - if (semver.lt(process.versions.node, '10.0.0')) { - t.skip('This test will not pass on Node v8') - return - } - async function handler (req, res) { - t.strictEqual(req.url, '/_bulk') - t.match(req.headers, { 'content-type': 'application/x-ndjson' }) - - let body = '' - req.setEncoding('utf8') - for await (const chunk of req) { - body += chunk - } - const [, payload] = body.split('\n') - - res.setHeader('content-type', 'application/json') - - if (JSON.parse(payload).user === 'arya') { - res.end(JSON.stringify({ - took: 0, - errors: true, - items: [{ - index: { - status: 400, - error: { something: 'went wrong' } - } - }] - })) - } else { - res.end(JSON.stringify({ - took: 0, - errors: false, - items: [{}] - })) - } - } - - const [{ port }, server] = await buildServer(handler) - const client = new Client({ node: `http://localhost:${port}` }) - const result = await client.helpers.bulk({ - datasource: dataset.slice(), - flushBytes: 1, - concurrency: 1, - wait: 10, - onDocument (doc) { - return { - index: { _index: 'test' } - } - }, - onDrop (doc) { - t.deepEqual(doc, { - status: 400, - error: { something: 'went wrong' }, - operation: { index: { _index: 'test' } }, - document: { user: 'arya', age: 18 }, - retried: false - }) - } - }) - - t.type(result.time, 'number') - t.type(result.bytes, 'number') - t.match(result, { - total: 3, - successful: 2, - retry: 0, - failed: 1, - aborted: false - }) - server.stop() - }) - - t.test('Server error', async t => { - const MockConnection = connection.buildMockConnection({ - onRequest (params) { - return { - statusCode: 500, - body: { somothing: 'went wrong' } - } - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - const b = client.helpers.bulk({ - datasource: dataset.slice(), - flushBytes: 1, - concurrency: 1, - onDocument (doc) { - return { - index: { _index: 'test' } - } - }, - onDrop (doc) { - t.fail('This should never be called') - } - }) - - try { - await b - t.fail('Should throw') - } catch (err) { - t.true(err instanceof errors.ResponseError) - } - }) - - t.test('Server error (high flush size, to trigger the finish error)', async t => { - const MockConnection = connection.buildMockConnection({ - onRequest (params) { - return { - statusCode: 500, - body: { somothing: 'went wrong' } - } - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - const b = client.helpers.bulk({ - datasource: dataset.slice(), - flushBytes: 5000000, - concurrency: 1, - onDocument (doc) { - return { - index: { _index: 'test' } - } - }, - onDrop (doc) { - t.fail('This should never be called') - } - }) - - try { - await b - t.fail('Should throw') - } catch (err) { - t.true(err instanceof errors.ResponseError) - } - }) - - t.test('Should abort a bulk request', async t => { - if (semver.lt(process.versions.node, '10.0.0')) { - t.skip('This test will not pass on Node v8') - return - } - async function handler (req, res) { - t.strictEqual(req.url, '/_bulk') - t.match(req.headers, { 'content-type': 'application/x-ndjson' }) - - let body = '' - req.setEncoding('utf8') - for await (const chunk of req) { - body += chunk - } - const [, payload] = body.split('\n') - - res.setHeader('content-type', 'application/json') - - if (JSON.parse(payload).user === 'arya') { - res.end(JSON.stringify({ - took: 0, - errors: true, - items: [{ - index: { - status: 400, - error: { something: 'went wrong' } - } - }] - })) - } else { - res.end(JSON.stringify({ - took: 0, - errors: false, - items: [{}] - })) - } - } - - const [{ port }, server] = await buildServer(handler) - const client = new Client({ node: `http://localhost:${port}` }) - const b = client.helpers.bulk({ - datasource: dataset.slice(), - flushBytes: 1, - concurrency: 1, - wait: 10, - onDocument (doc) { - return { - index: { _index: 'test' } - } - }, - onDrop (doc) { - b.abort() - } - }) - - const result = await b - t.type(result.time, 'number') - t.type(result.bytes, 'number') - t.match(result, { - total: 2, - successful: 1, - retry: 0, - failed: 1, - aborted: true - }) - server.stop() - }) - - t.test('Invalid operation', t => { - t.plan(2) - const MockConnection = connection.buildMockConnection({ - onRequest (params) { - return { body: { errors: false, items: [{}] } } - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - client.helpers - .bulk({ - datasource: dataset.slice(), - flushBytes: 1, - concurrency: 1, - onDocument (doc) { - return { - foo: { _index: 'test' } - } - } - }) - .catch(err => { - t.true(err instanceof errors.ConfigurationError) - t.is(err.message, `Bulk helper invalid action: 'foo'`) - }) - }) - - t.end() - }) - - t.test('datasource as stream', t => { - t.test('Should perform a bulk request', async t => { - let count = 0 - const MockConnection = connection.buildMockConnection({ - onRequest (params) { - t.strictEqual(params.path, '/_bulk') - t.match(params.headers, { 'content-type': 'application/x-ndjson' }) - const [action, payload] = params.body.split('\n') - t.deepEqual(JSON.parse(action), { index: { _index: 'test', _id: count } }) - t.deepEqual(JSON.parse(payload), dataset[count++]) - return { body: { errors: false, items: [{}] } } - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - const stream = createReadStream(join(__dirname, '..', '..', 'fixtures', 'small-dataset.ndjson'), 'utf8') - - let id = 0 - const result = await client.helpers.bulk({ - datasource: stream.pipe(split()), - flushBytes: 1, - concurrency: 1, - onDocument (doc) { - return { - index: { - _index: 'test', - _id: id++ - } - } - }, - onDrop (doc) { - t.fail('This should never be called') - } - }) - - t.type(result.time, 'number') - t.type(result.bytes, 'number') - t.match(result, { - total: 3, - successful: 3, - retry: 0, - failed: 0, - aborted: false - }) - }) - - t.end() - }) - - t.test('datasource as async generator', t => { - t.test('Should perform a bulk request', async t => { - let count = 0 - const MockConnection = connection.buildMockConnection({ - onRequest (params) { - t.strictEqual(params.path, '/_bulk') - t.match(params.headers, { 'content-type': 'application/x-ndjson' }) - const [action, payload] = params.body.split('\n') - t.deepEqual(JSON.parse(action), { index: { _index: 'test' } }) - t.deepEqual(JSON.parse(payload), dataset[count++]) - return { body: { errors: false, items: [{}] } } - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - async function * generator () { - const data = dataset.slice() - for (const doc of data) { - yield doc - } - } - - const result = await client.helpers.bulk({ - datasource: generator(), - flushBytes: 1, - concurrency: 1, - onDocument (doc) { - return { - index: { _index: 'test' } - } - }, - onDrop (doc) { - t.fail('This should never be called') - } - }) - - t.type(result.time, 'number') - t.type(result.bytes, 'number') - t.match(result, { - total: 3, - successful: 3, - retry: 0, - failed: 0, - aborted: false - }) - }) - t.end() - }) - - t.end() -}) - -test('bulk create', t => { - t.test('Should perform a bulk request', async t => { - let count = 0 - const MockConnection = connection.buildMockConnection({ - onRequest (params) { - t.strictEqual(params.path, '/_bulk') - t.match(params.headers, { 'content-type': 'application/x-ndjson' }) - const [action, payload] = params.body.split('\n') - t.deepEqual(JSON.parse(action), { create: { _index: 'test', _id: count } }) - t.deepEqual(JSON.parse(payload), dataset[count++]) - return { body: { errors: false, items: [{}] } } - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - let id = 0 - const result = await client.helpers.bulk({ - datasource: dataset.slice(), - flushBytes: 1, - concurrency: 1, - onDocument (doc) { - return { - create: { - _index: 'test', - _id: id++ - } - } - }, - onDrop (doc) { - t.fail('This should never be called') - } - }) - - t.type(result.time, 'number') - t.type(result.bytes, 'number') - t.match(result, { - total: 3, - successful: 3, - retry: 0, - failed: 0, - aborted: false - }) - }) - t.end() -}) - -test('bulk update', t => { - t.test('Should perform a bulk request', async t => { - let count = 0 - const MockConnection = connection.buildMockConnection({ - onRequest (params) { - t.strictEqual(params.path, '/_bulk') - t.match(params.headers, { 'content-type': 'application/x-ndjson' }) - const [action, payload] = params.body.split('\n') - t.deepEqual(JSON.parse(action), { update: { _index: 'test', _id: count } }) - t.deepEqual(JSON.parse(payload), { doc: dataset[count++], doc_as_upsert: true }) - return { body: { errors: false, items: [{}] } } - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - let id = 0 - const result = await client.helpers.bulk({ - datasource: dataset.slice(), - flushBytes: 1, - concurrency: 1, - onDocument (doc) { - return [{ - update: { - _index: 'test', - _id: id++ - } - }, { - doc_as_upsert: true - }] - }, - onDrop (doc) { - t.fail('This should never be called') - } - }) - - t.type(result.time, 'number') - t.type(result.bytes, 'number') - t.match(result, { - total: 3, - successful: 3, - retry: 0, - failed: 0, - aborted: false - }) - }) - t.end() -}) - -test('bulk delete', t => { - t.test('Should perform a bulk request', async t => { - let count = 0 - const MockConnection = connection.buildMockConnection({ - onRequest (params) { - t.strictEqual(params.path, '/_bulk') - t.match(params.headers, { 'content-type': 'application/x-ndjson' }) - t.deepEqual(JSON.parse(params.body), { delete: { _index: 'test', _id: count++ } }) - return { body: { errors: false, items: [{}] } } - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - let id = 0 - const result = await client.helpers.bulk({ - datasource: dataset.slice(), - flushBytes: 1, - concurrency: 1, - onDocument (doc) { - return { - delete: { - _index: 'test', - _id: id++ - } - } - }, - onDrop (doc) { - t.fail('This should never be called') - } - }) - - t.type(result.time, 'number') - t.type(result.bytes, 'number') - t.match(result, { - total: 3, - successful: 3, - retry: 0, - failed: 0, - aborted: false - }) - }) - - t.test('Should perform a bulk request (failure)', async t => { - if (semver.lt(process.versions.node, '10.0.0')) { - t.skip('This test will not pass on Node v8') - return - } - async function handler (req, res) { - t.strictEqual(req.url, '/_bulk') - t.match(req.headers, { 'content-type': 'application/x-ndjson' }) - - let body = '' - req.setEncoding('utf8') - for await (const chunk of req) { - body += chunk - } - - res.setHeader('content-type', 'application/json') - - if (JSON.parse(body).delete._id === 1) { - res.end(JSON.stringify({ - took: 0, - errors: true, - items: [{ - delete: { - status: 400, - error: { something: 'went wrong' } - } - }] - })) - } else { - res.end(JSON.stringify({ - took: 0, - errors: false, - items: [{}] - })) - } - } - - const [{ port }, server] = await buildServer(handler) - const client = new Client({ node: `http://localhost:${port}` }) - let id = 0 - const result = await client.helpers.bulk({ - datasource: dataset.slice(), - flushBytes: 1, - concurrency: 1, - wait: 10, - onDocument (doc) { - return { - delete: { - _index: 'test', - _id: id++ - } - } - }, - onDrop (doc) { - t.deepEqual(doc, { - status: 400, - error: { something: 'went wrong' }, - operation: { delete: { _index: 'test', _id: 1 } }, - document: null, - retried: false - }) - } - }) - - t.type(result.time, 'number') - t.type(result.bytes, 'number') - t.match(result, { - total: 3, - successful: 2, - retry: 0, - failed: 1, - aborted: false - }) - server.stop() - }) - - t.end() -}) - -test('errors', t => { - t.test('datasource type', async t => { - const client = new Client({ - node: 'http://localhost:9200/' - }) - try { - await client.helpers.bulk({ - datasource: 'hello', - onDocument (doc) { - return { - index: { _index: 'test' } - } - } - }) - } catch (err) { - t.true(err instanceof errors.ConfigurationError) - t.is(err.message, 'bulk helper: the datasource must be an array or a buffer or a readable stream or an async generator') - } - }) - - t.test('missing datasource', async t => { - const client = new Client({ - node: 'http://localhost:9200/' - }) - try { - await client.helpers.bulk({ - onDocument (doc) { - return { - index: { _index: 'test' } - } - } - }) - } catch (err) { - t.true(err instanceof errors.ConfigurationError) - t.is(err.message, 'bulk helper: the datasource is required') - } - }) - - t.test('missing onDocument', async t => { - const client = new Client({ - node: 'http://localhost:9200/' - }) - try { - await client.helpers.bulk({ - datasource: dataset.slice() - }) - } catch (err) { - t.true(err instanceof errors.ConfigurationError) - t.is(err.message, 'bulk helper: the onDocument callback is required') - } - }) - - t.end() -}) - -test('Flush interval', t => { - t.test('Slow producer', async t => { - const clock = FakeTimers.install({ toFake: ['setTimeout', 'clearTimeout'] }) - t.teardown(() => clock.uninstall()) - - let count = 0 - const MockConnection = connection.buildMockConnection({ - onRequest (params) { - t.strictEqual(params.path, '/_bulk') - t.match(params.headers, { 'content-type': 'application/x-ndjson' }) - const [action, payload] = params.body.split('\n') - t.deepEqual(JSON.parse(action), { index: { _index: 'test' } }) - t.deepEqual(JSON.parse(payload), dataset[count++]) - return { body: { errors: false, items: [{}] } } - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - const result = await client.helpers.bulk({ - datasource: (async function * generator () { - for (const chunk of dataset) { - await clock.nextAsync() - yield chunk - } - })(), - flushBytes: 5000000, - concurrency: 1, - onDocument (doc) { - return { - index: { _index: 'test' } - } - }, - onDrop (doc) { - t.fail('This should never be called') - } - }) - - t.type(result.time, 'number') - t.type(result.bytes, 'number') - t.match(result, { - total: 3, - successful: 3, - retry: 0, - failed: 0, - aborted: false - }) - }) - - t.test('Abort operation', async t => { - const clock = FakeTimers.install({ toFake: ['setTimeout', 'clearTimeout'] }) - t.teardown(() => clock.uninstall()) - - let count = 0 - const MockConnection = connection.buildMockConnection({ - onRequest (params) { - t.true(count < 2) - t.strictEqual(params.path, '/_bulk') - t.match(params.headers, { 'content-type': 'application/x-ndjson' }) - const [action, payload] = params.body.split('\n') - t.deepEqual(JSON.parse(action), { index: { _index: 'test' } }) - t.deepEqual(JSON.parse(payload), dataset[count++]) - return { body: { errors: false, items: [{}] } } - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - const b = client.helpers.bulk({ - datasource: (async function * generator () { - for (const chunk of dataset) { - await clock.nextAsync() - if (chunk.user === 'tyrion') { - // Needed otherwise in Node.js 10 - // the second request will never be sent - await Promise.resolve() - b.abort() - } - yield chunk - } - })(), - flushBytes: 5000000, - concurrency: 1, - onDocument (doc) { - return { - index: { _index: 'test' } - } - }, - onDrop (doc) { - t.fail('This should never be called') - } - }) - - const result = await b - - t.type(result.time, 'number') - t.type(result.bytes, 'number') - t.match(result, { - total: 2, - successful: 2, - retry: 0, - failed: 0, - aborted: true - }) - }) - - t.end() -}) diff --git a/test/unit/helpers/bulk.test.ts b/test/unit/helpers/bulk.test.ts new file mode 100644 index 000000000..45487aaa4 --- /dev/null +++ b/test/unit/helpers/bulk.test.ts @@ -0,0 +1,1913 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +import FakeTimers from '@sinonjs/fake-timers' +import { AssertionError } from 'node:assert' +import { createReadStream } from 'node:fs' +import * as http from 'node:http' +import { join } from 'node:path' +import split from 'split2' +import { Readable } from 'node:stream' +import { test } from 'tap' +import { Client, errors } from '../../../' +import { buildServer, connection } from '../../utils' +const { sleep } = require('../../integration/helper') + +let clientVersion: string = require('../../../package.json').version // eslint-disable-line +if (clientVersion.includes('-')) { + clientVersion = clientVersion.slice(0, clientVersion.indexOf('-')) + 'p' +} +let transportVersion: string = require('@elastic/transport/package.json').version // eslint-disable-line +if (transportVersion.includes('-')) { + transportVersion = transportVersion.slice(0, transportVersion.indexOf('-')) + 'p' +} +const nodeVersion = process.versions.node + +const dataset = [ + { user: 'jon', age: 23 }, + { user: 'arya', age: 18 }, + { user: 'tyrion', age: 39 } +] + +interface Document { + user: string + age: number +} + +test('bulk index', t => { + t.test('datasource as array', t => { + t.test('Should perform a bulk request', async t => { + let count = 0 + const MockConnection = connection.buildMockConnection({ + onRequest (params) { + t.equal(params.path, '/_bulk') + t.match(params.headers, { + 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=9', + 'x-elastic-client-meta': `es=${clientVersion},js=${nodeVersion},t=${transportVersion},hc=${nodeVersion},h=bp` + }) + // @ts-expect-error + const [action, payload] = params.body.split('\n') + t.same(JSON.parse(action), { index: { _index: 'test' } }) + t.same(JSON.parse(payload), dataset[count++]) + return { body: { errors: false, items: [{}] } } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + const result = await client.helpers.bulk({ + datasource: dataset.slice(), + flushBytes: 1, + concurrency: 1, + onDocument (doc) { + t.type(doc.user, 'string') // testing that doc is type of Document + return { + index: { _index: 'test' } + } + }, + onDrop (doc) { + t.fail('This should never be called') + } + }) + + t.type(result.time, 'number') + t.type(result.bytes, 'number') + t.match(result, { + total: 3, + successful: 3, + retry: 0, + failed: 0, + aborted: false + }) + }) + + t.test('Should perform a bulk request (with concurrency)', async t => { + let count = 0 + const MockConnection = connection.buildMockConnection({ + onRequest (params) { + t.equal(params.path, '/_bulk') + t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=9' }) + t.notMatch(params.headers, { + 'x-elastic-client-meta': `es=${clientVersion},js=${nodeVersion},t=${transportVersion},hc=${nodeVersion},h=bp` + }) + // @ts-expect-error + const [action, payload] = params.body.split('\n') + t.same(JSON.parse(action), { index: { _index: 'test' } }) + t.same(JSON.parse(payload), dataset[count++]) + return { body: { errors: false, items: [{}] } } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection, + enableMetaHeader: false + }) + const result = await client.helpers.bulk({ + datasource: dataset.slice(), + flushBytes: 1, + concurrency: 3, + onDocument (doc) { + return { + index: { _index: 'test' } + } + }, + onDrop (doc) { + t.fail('This should never be called') + } + }) + + t.type(result.time, 'number') + t.type(result.bytes, 'number') + t.match(result, { + total: 3, + successful: 3, + retry: 0, + failed: 0, + aborted: false + }) + }) + + t.test('Should perform a bulk request (high flush size)', async t => { + const MockConnection = connection.buildMockConnection({ + onRequest (params) { + t.equal(params.path, '/_bulk') + t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=9' }) + // @ts-expect-error + t.equal(params.body.split('\n').filter(Boolean).length, 6) + return { body: { errors: false, items: new Array(3).fill({}) } } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + const result = await client.helpers.bulk({ + datasource: dataset.slice(), + flushBytes: 5000000, + concurrency: 1, + onDocument (doc) { + return { + index: { _index: 'test' } + } + }, + onDrop (doc) { + t.fail('This should never be called') + } + }) + + t.type(result.time, 'number') + t.type(result.bytes, 'number') + t.match(result, { + total: 3, + successful: 3, + retry: 0, + failed: 0, + aborted: false + }) + }) + + t.test('refreshOnCompletion', async t => { + let count = 0 + const MockConnection = connection.buildMockConnection({ + onRequest (params) { + if (params.method === 'GET') { + t.equal(params.path, '/_all/_refresh') + return { body: { acknowledged: true } } + } else { + t.equal(params.path, '/_bulk') + t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=9' }) + // @ts-expect-error + const [action, payload] = params.body.split('\n') + t.same(JSON.parse(action), { index: { _index: 'test' } }) + t.same(JSON.parse(payload), dataset[count++]) + return { body: { errors: false, items: [{}] } } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + const result = await client.helpers.bulk({ + datasource: dataset.slice(), + flushBytes: 1, + concurrency: 1, + refreshOnCompletion: true, + onDocument (doc) { + return { + index: { _index: 'test' } + } + } + }) + + t.type(result.time, 'number') + t.type(result.bytes, 'number') + t.match(result, { + total: 3, + successful: 3, + retry: 0, + failed: 0, + aborted: false + }) + }) + + t.test('refreshOnCompletion custom index', async t => { + let count = 0 + const MockConnection = connection.buildMockConnection({ + onRequest (params) { + if (params.method === 'GET') { + t.equal(params.path, '/test/_refresh') + return { body: { acknowledged: true } } + } else { + t.equal(params.path, '/_bulk') + t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=9' }) + // @ts-expect-error + const [action, payload] = params.body.split('\n') + t.same(JSON.parse(action), { index: { _index: 'test' } }) + t.same(JSON.parse(payload), dataset[count++]) + return { body: { errors: false, items: [{}] } } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + const result = await client.helpers.bulk({ + datasource: dataset.slice(), + flushBytes: 1, + concurrency: 1, + refreshOnCompletion: 'test', + onDocument (doc) { + return { + index: { _index: 'test' } + } + } + }) + + t.type(result.time, 'number') + t.type(result.bytes, 'number') + t.match(result, { + total: 3, + successful: 3, + retry: 0, + failed: 0, + aborted: false + }) + }) + + t.test('Should perform a bulk request (custom action)', async t => { + let count = 0 + const MockConnection = connection.buildMockConnection({ + onRequest (params) { + t.equal(params.path, '/_bulk') + t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=9' }) + // @ts-expect-error + const [action, payload] = params.body.split('\n') + t.same(JSON.parse(action), { index: { _index: 'test', _id: count } }) + t.same(JSON.parse(payload), dataset[count++]) + return { body: { errors: false, items: [{}] } } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + let id = 0 + const result = await client.helpers.bulk({ + datasource: dataset.slice(), + flushBytes: 1, + concurrency: 1, + onDocument (doc) { + return { + index: { + _index: 'test', + _id: String(id++) + } + } + }, + onDrop (doc) { + t.fail('This should never be called') + } + }) + + t.type(result.time, 'number') + t.type(result.bytes, 'number') + t.match(result, { + total: 3, + successful: 3, + retry: 0, + failed: 0, + aborted: false + }) + }) + + t.test('Should perform a bulk request (retry)', async t => { + async function handler (req: http.IncomingMessage, res: http.ServerResponse) { + t.equal(req.url, '/_bulk') + t.match(req.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=9' }) + + let body = '' + req.setEncoding('utf8') + for await (const chunk of req) { + body += chunk + } + const [, payload] = body.split('\n') + + res.setHeader('content-type', 'application/json') + + if (JSON.parse(payload).user === 'arya') { + res.end(JSON.stringify({ + took: 0, + errors: true, + items: [{ + index: { + status: 429 + } + }] + })) + } else { + res.end(JSON.stringify({ + took: 0, + errors: false, + items: [{}] + })) + } + } + + const [{ port }, server] = await buildServer(handler) + const client = new Client({ node: `http://localhost:${port}` }) + const result = await client.helpers.bulk({ + datasource: dataset.slice(), + flushBytes: 1, + concurrency: 1, + wait: 10, + retries: 1, + onDocument (doc) { + return { + index: { _index: 'test' } + } + }, + onDrop (doc) { + t.same(doc, { + status: 429, + error: null, + operation: { index: { _index: 'test' } }, + document: { user: 'arya', age: 18 }, + retried: true + }) + } + }) + + t.type(result.time, 'number') + t.type(result.bytes, 'number') + t.match(result, { + total: 3, + successful: 2, + retry: 2, + failed: 1, + aborted: false + }) + server.stop() + }) + + t.test('Should perform a bulk request (retry a single document from batch)', async t => { + function handler (req: http.IncomingMessage, res: http.ServerResponse) { + res.setHeader('content-type', 'application/json') + res.end(JSON.stringify({ + took: 0, + errors: true, + items: [ + { index: { status: 200 } }, + { index: { status: 429 } }, + { index: { status: 200 } } + ] + })) + } + + const [{ port }, server] = await buildServer(handler) + const client = new Client({ node: `http://localhost:${port}` }) + const result = await client.helpers.bulk({ + datasource: dataset.slice(), + concurrency: 1, + wait: 10, + retries: 0, + onDocument (doc) { + return { + index: { _index: 'test' } + } + }, + onDrop (doc) { + t.same(doc, { + status: 429, + error: null, + operation: { index: { _index: 'test' } }, + document: { user: 'arya', age: 18 }, + retried: false + }) + } + }) + + t.type(result.time, 'number') + t.type(result.bytes, 'number') + t.match(result, { + total: 3, + successful: 2, + retry: 0, + failed: 1, + aborted: false + }) + server.stop() + }) + + t.test('Should perform a bulk request (failure)', async t => { + async function handler (req: http.IncomingMessage, res: http.ServerResponse) { + t.equal(req.url, '/_bulk') + t.match(req.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=9' }) + + let body = '' + req.setEncoding('utf8') + for await (const chunk of req) { + body += chunk + } + const [, payload] = body.split('\n') + + res.setHeader('content-type', 'application/json') + + if (JSON.parse(payload).user === 'arya') { + res.end(JSON.stringify({ + took: 0, + errors: true, + items: [{ + index: { + status: 400, + error: { something: 'went wrong' } + } + }] + })) + } else { + res.end(JSON.stringify({ + took: 0, + errors: false, + items: [{}] + })) + } + } + + const [{ port }, server] = await buildServer(handler) + const client = new Client({ node: `http://localhost:${port}` }) + const result = await client.helpers.bulk({ + datasource: dataset.slice(), + flushBytes: 1, + concurrency: 1, + wait: 10, + onDocument (doc) { + return { + index: { _index: 'test' } + } + }, + onDrop (doc) { + t.same(doc, { + status: 400, + error: { something: 'went wrong' }, + operation: { index: { _index: 'test' } }, + document: { user: 'arya', age: 18 }, + retried: false + }) + } + }) + + t.type(result.time, 'number') + t.type(result.bytes, 'number') + t.match(result, { + total: 3, + successful: 2, + retry: 0, + failed: 1, + aborted: false + }) + server.stop() + }) + + t.test('Server error', async t => { + const MockConnection = connection.buildMockConnection({ + onRequest (_params) { + return { + statusCode: 500, + body: { somothing: 'went wrong' } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + const b = client.helpers.bulk({ + datasource: dataset.slice(), + flushBytes: 1, + concurrency: 1, + onDocument (_doc) { + return { + index: { _index: 'test' } + } + }, + onDrop (_doc) { + t.fail('This should never be called') + } + }) + + try { + await b + t.fail('Should throw') + } catch (err: any) { + t.ok(err instanceof errors.ResponseError) + } + }) + + t.test('Server error (high flush size, to trigger the finish error)', async t => { + const MockConnection = connection.buildMockConnection({ + onRequest (_params) { + return { + statusCode: 500, + body: { somothing: 'went wrong' } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + const b = client.helpers.bulk({ + datasource: dataset.slice(), + flushBytes: 5000000, + concurrency: 1, + onDocument (_doc) { + return { + index: { _index: 'test' } + } + }, + onDrop (_doc) { + t.fail('This should never be called') + } + }) + + try { + await b + t.fail('Should throw') + } catch (err: any) { + t.ok(err instanceof errors.ResponseError) + } + }) + + t.test('Should abort a bulk request', async t => { + async function handler (req: http.IncomingMessage, res: http.ServerResponse) { + t.equal(req.url, '/_bulk') + t.match(req.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=9' }) + + let body = '' + req.setEncoding('utf8') + for await (const chunk of req) { + body += chunk + } + const [, payload] = body.split('\n') + + res.setHeader('content-type', 'application/json') + + if (JSON.parse(payload).user === 'arya') { + res.end(JSON.stringify({ + took: 0, + errors: true, + items: [{ + index: { + status: 400, + error: { something: 'went wrong' } + } + }] + })) + } else { + res.end(JSON.stringify({ + took: 0, + errors: false, + items: [{}] + })) + } + } + + const [{ port }, server] = await buildServer(handler) + const client = new Client({ node: `http://localhost:${port}` }) + const b = client.helpers.bulk({ + datasource: dataset.slice(), + flushBytes: 1, + concurrency: 1, + wait: 10, + onDocument (_doc) { + return { + index: { _index: 'test' } + } + }, + onDrop (_doc) { + b.abort() + } + }) + + const result = await b + t.type(result.time, 'number') + t.type(result.bytes, 'number') + t.match(result, { + total: 2, + successful: 1, + retry: 0, + failed: 1, + aborted: true + }) + server.stop() + }) + + t.test('Invalid operation', t => { + t.plan(2) + const MockConnection = connection.buildMockConnection({ + onRequest (_params) { + return { body: { errors: false, items: [{}] } } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + client.helpers + .bulk({ + datasource: dataset.slice(), + flushBytes: 1, + concurrency: 1, + // @ts-expect-error + onDocument (_doc) { + return { + foo: { _index: 'test' } + } + } + }) + .catch(err => { + t.ok(err instanceof errors.ConfigurationError) + t.equal(err.message, 'Bulk helper invalid action: \'foo\'') + }) + }) + + t.test('should call onSuccess callback for each indexed document', async t => { + const MockConnection = connection.buildMockConnection({ + onRequest (params) { + // @ts-expect-error + let [action] = params.body.split('\n') + action = JSON.parse(action) + return { body: { errors: false, items: [action] } } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + let count = 0 + await client.helpers.bulk({ + datasource: dataset.slice(), + flushBytes: 1, + concurrency: 1, + onDocument (_doc) { + return { + index: { _index: 'test' } + } + }, + onSuccess ({ result, document }) { + t.same(result, { index: { _index: 'test' }}) + t.same(document, dataset[count++]) + }, + onDrop (_doc) { + t.fail('This should never be called') + } + }) + t.equal(count, 3) + t.end() + }) + + t.end() + }) + + t.test('datasource as stream', t => { + t.test('Should perform a bulk request', async t => { + let count = 0 + const MockConnection = connection.buildMockConnection({ + onRequest (params) { + t.equal(params.path, '/_bulk') + t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=9' }) + // @ts-expect-error + const [action, payload] = params.body.split('\n') + t.same(JSON.parse(action), { index: { _index: 'test', _id: count } }) + t.same(JSON.parse(payload), dataset[count++]) + return { body: { errors: false, items: [{}] } } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + const stream = createReadStream(join(__dirname, '..', '..', 'fixtures', 'small-dataset.ndjson'), 'utf8') + + let id = 0 + const result = await client.helpers.bulk({ + datasource: stream.pipe(split()), + flushBytes: 1, + concurrency: 1, + onDocument (doc) { + return { + index: { + _index: 'test', + _id: String(id++) + } + } + }, + onDrop (doc) { + t.fail('This should never be called') + } + }) + + t.type(result.time, 'number') + t.type(result.bytes, 'number') + t.match(result, { + total: 3, + successful: 3, + retry: 0, + failed: 0, + aborted: false + }) + }) + + t.test('onSuccess is called for each indexed document', async t => { + const MockConnection = connection.buildMockConnection({ + onRequest (params) { + // @ts-expect-error + let [action] = params.body.split('\n') + action = JSON.parse(action) + return { body: { errors: false, items: [action] } } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + const stream = createReadStream(join(__dirname, '..', '..', 'fixtures', 'small-dataset.ndjson'), 'utf8') + + let count = 0 + await client.helpers.bulk({ + datasource: stream.pipe(split()), + flushBytes: 1, + concurrency: 1, + onDocument (_doc) { + return { + index: { _index: 'test' } + } + }, + onSuccess ({ result, document }) { + t.same(result, { index: { _index: 'test' }}) + t.same(document, dataset[count++]) + }, + onDrop (_doc) { + t.fail('This should never be called') + } + }) + t.equal(count, 3) + t.end() + }) + + t.end() + }) + + t.test('datasource as async generator', t => { + t.test('Should perform a bulk request', async t => { + let count = 0 + const MockConnection = connection.buildMockConnection({ + onRequest (params) { + t.equal(params.path, '/_bulk') + t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=9' }) + // @ts-expect-error + const [action, payload] = params.body.split('\n') + t.same(JSON.parse(action), { index: { _index: 'test' } }) + t.same(JSON.parse(payload), dataset[count++]) + return { body: { errors: false, items: [{}] } } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + async function * generator () { + const data = dataset.slice() + for (const doc of data) { + yield doc + } + } + + const result = await client.helpers.bulk({ + datasource: generator(), + flushBytes: 1, + concurrency: 1, + onDocument (doc) { + return { + index: { _index: 'test' } + } + }, + onDrop (doc) { + t.fail('This should never be called') + } + }) + + t.type(result.time, 'number') + t.type(result.bytes, 'number') + t.match(result, { + total: 3, + successful: 3, + retry: 0, + failed: 0, + aborted: false + }) + }) + + t.test('onSuccess is called for each indexed document', async t => { + const MockConnection = connection.buildMockConnection({ + onRequest (params) { + // @ts-expect-error + let [action] = params.body.split('\n') + action = JSON.parse(action) + return { body: { errors: false, items: [action] } } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + async function * generator () { + const data = dataset.slice() + for (const doc of data) { + yield doc + } + } + + let count = 0 + await client.helpers.bulk({ + datasource: generator(), + flushBytes: 1, + concurrency: 1, + onDocument (_doc) { + return { + index: { _index: 'test' } + } + }, + onSuccess ({ result, document }) { + t.same(result, { index: { _index: 'test' }}) + t.same(document, dataset[count++]) + }, + onDrop (_doc) { + t.fail('This should never be called') + } + }) + t.equal(count, 3) + t.end() + }) + t.end() + }) + + t.test('Should use payload returned by `onDocument`', async t => { + let count = 0 + const updatedAt = '1970-01-01T12:00:00.000Z' + const MockConnection = connection.buildMockConnection({ + onRequest (params) { + t.equal(params.path, '/_bulk') + t.match(params.headers, { + 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=9', + 'x-elastic-client-meta': `es=${clientVersion},js=${nodeVersion},t=${transportVersion},hc=${nodeVersion},h=bp` + }) + // @ts-expect-error + const [action, payload] = params.body.split('\n') + t.same(JSON.parse(action), { index: { _index: 'test' } }) + t.same(JSON.parse(payload), { ...dataset[count++], updatedAt }) + return { body: { errors: false, items: [{}] } } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + const result = await client.helpers.bulk({ + datasource: dataset.slice(), + flushBytes: 1, + concurrency: 1, + onDocument (doc) { + t.type(doc.user, 'string') // testing that doc is type of Document + return [ + { + index: { + _index: 'test' + } + }, + { ...doc, updatedAt } + ] + }, + onDrop (doc) { + t.fail('This should never be called') + } + }) + + t.type(result.time, 'number') + t.type(result.bytes, 'number') + t.match(result, { + total: 3, + successful: 3, + retry: 0, + failed: 0, + aborted: false + }) + }) + + t.end() +}) + +test('bulk create', t => { + t.test('Should perform a bulk request', async t => { + let count = 0 + const MockConnection = connection.buildMockConnection({ + onRequest (params) { + t.equal(params.path, '/_bulk') + t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=9' }) + // @ts-expect-error + const [action, payload] = params.body.split('\n') + t.same(JSON.parse(action), { create: { _index: 'test', _id: count } }) + t.same(JSON.parse(payload), dataset[count++]) + return { body: { errors: false, items: [{}] } } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + let id = 0 + const result = await client.helpers.bulk({ + datasource: dataset.slice(), + flushBytes: 1, + concurrency: 1, + onDocument (doc) { + return { + create: { + _index: 'test', + _id: String(id++) + } + } + }, + onDrop (doc) { + t.fail('This should never be called') + } + }) + + t.type(result.time, 'number') + t.type(result.bytes, 'number') + t.match(result, { + total: 3, + successful: 3, + retry: 0, + failed: 0, + aborted: false + }) + }) + + t.test('Should use payload returned by `onDocument`', async t => { + let count = 0 + const updatedAt = '1970-01-01T12:00:00.000Z' + const MockConnection = connection.buildMockConnection({ + onRequest (params) { + t.equal(params.path, '/_bulk') + t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=9' }) + // @ts-expect-error + const [action, payload] = params.body.split('\n') + t.same(JSON.parse(action), { create: { _index: 'test', _id: count } }) + t.same(JSON.parse(payload), { ...dataset[count++], updatedAt }) + return { body: { errors: false, items: [{}] } } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + let id = 0 + const result = await client.helpers.bulk({ + datasource: dataset.slice(), + flushBytes: 1, + concurrency: 1, + onDocument (doc) { + return [ + { + create: { + _index: 'test', + _id: String(id++) + } + }, + { ...doc, updatedAt } + ] + }, + onDrop (doc) { + t.fail('This should never be called') + } + }) + + t.type(result.time, 'number') + t.type(result.bytes, 'number') + t.match(result, { + total: 3, + successful: 3, + retry: 0, + failed: 0, + aborted: false + }) + }) + + + + t.end() +}) + +test('bulk update', t => { + t.test('Should perform a bulk request', async t => { + let count = 0 + const MockConnection = connection.buildMockConnection({ + onRequest (params) { + t.equal(params.path, '/_bulk') + t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=9' }) + // @ts-expect-error + const [action, payload] = params.body.split('\n') + t.same(JSON.parse(action), { update: { _index: 'test', _id: count } }) + t.same(JSON.parse(payload), { doc: dataset[count++], doc_as_upsert: true }) + return { body: { errors: false, items: [{}] } } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + let id = 0 + const result = await client.helpers.bulk({ + datasource: dataset.slice(), + flushBytes: 1, + concurrency: 1, + onDocument (doc) { + return [{ + update: { + _index: 'test', + _id: String(id++) + } + }, { + doc_as_upsert: true + }] + }, + onDrop (doc) { + t.fail('This should never be called') + } + }) + + t.type(result.time, 'number') + t.type(result.bytes, 'number') + t.match(result, { + total: 3, + successful: 3, + retry: 0, + failed: 0, + aborted: false + }) + }) + + t.test('Should perform a bulk request dataset as string)', async t => { + let count = 0 + const MockConnection = connection.buildMockConnection({ + onRequest (params) { + t.equal(params.path, '/_bulk') + t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=9' }) + // @ts-expect-error + const [action, payload] = params.body.split('\n') + t.same(JSON.parse(action), { update: { _index: 'test', _id: count } }) + t.same(JSON.parse(payload), { doc: dataset[count++] }) + return { body: { errors: false, items: [{}] } } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + let id = 0 + const result = await client.helpers.bulk({ + datasource: dataset.map(d => JSON.stringify(d)), + flushBytes: 1, + concurrency: 1, + onDocument (doc) { + return [{ + update: { + _index: 'test', + _id: String(id++) + } + }, {}] + }, + onDrop (doc) { + t.fail('This should never be called') + } + }) + + t.type(result.time, 'number') + t.type(result.bytes, 'number') + t.match(result, { + total: 3, + successful: 3, + retry: 0, + failed: 0, + aborted: false + }) + }) + + t.test('Should track the number of noop results', async t => { + let count = 0 + const MockConnection = connection.buildMockConnection({ + onRequest (params) { + t.equal(params.path, '/_bulk') + t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=9' }) + // @ts-expect-error + const [action, payload] = params.body.split('\n') + t.same(JSON.parse(action), { update: { _index: 'test', _id: count } }) + t.same(JSON.parse(payload), { doc: dataset[count++], doc_as_upsert: true }) + return { body: { errors: false, items: [{ update: { result: 'noop' } }] } } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + let id = 0 + const result = await client.helpers.bulk({ + datasource: dataset.slice(), + flushBytes: 1, + concurrency: 1, + onDocument (doc) { + return [{ + update: { + _index: 'test', + _id: String(id++) + } + }, { + doc_as_upsert: true + }] + }, + onDrop (doc) { + t.fail('This should never be called') + } + }) + + t.type(result.time, 'number') + t.type(result.bytes, 'number') + t.match(result, { + total: 3, + successful: 3, + noop: 3, + retry: 0, + failed: 0, + aborted: false + }) + }) + + t.end() +}) + +test('bulk delete', t => { + t.test('Should perform a bulk request', async t => { + let count = 0 + const MockConnection = connection.buildMockConnection({ + onRequest (params) { + t.equal(params.path, '/_bulk') + t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=9' }) + // @ts-expect-error + t.same(JSON.parse(params.body), { delete: { _index: 'test', _id: count++ } }) + return { body: { errors: false, items: [{}] } } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + let id = 0 + const result = await client.helpers.bulk({ + datasource: dataset.slice(), + flushBytes: 1, + concurrency: 1, + onDocument (doc) { + return { + delete: { + _index: 'test', + _id: String(id++) + } + } + }, + onDrop (doc) { + t.fail('This should never be called') + } + }) + + t.type(result.time, 'number') + t.type(result.bytes, 'number') + t.match(result, { + total: 3, + successful: 3, + retry: 0, + failed: 0, + aborted: false + }) + }) + + t.test('Should perform a bulk request (failure)', async t => { + async function handler (req: http.IncomingMessage, res: http.ServerResponse) { + t.equal(req.url, '/_bulk') + t.match(req.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=9' }) + + let body = '' + req.setEncoding('utf8') + for await (const chunk of req) { + body += chunk + } + + res.setHeader('content-type', 'application/json') + + if (JSON.parse(body).delete._id === '1') { + res.end(JSON.stringify({ + took: 0, + errors: true, + items: [{ + delete: { + status: 400, + error: { something: 'went wrong' } + } + }] + })) + } else { + res.end(JSON.stringify({ + took: 0, + errors: false, + items: [{}] + })) + } + } + + const [{ port }, server] = await buildServer(handler) + const client = new Client({ node: `http://localhost:${port}` }) + let id = 0 + + const result = await client.helpers.bulk({ + datasource: dataset.slice(), + flushBytes: 1, + concurrency: 1, + wait: 10, + onDocument (doc) { + return { + delete: { + _index: 'test', + _id: String(id++) + } + } + }, + onDrop (doc) { + t.same(doc, { + status: 400, + error: { something: 'went wrong' }, + operation: { delete: { _index: 'test', _id: 1 } }, + document: null, + retried: false + }) + } + }) + + t.type(result.time, 'number') + t.type(result.bytes, 'number') + t.match(result, { + total: 3, + successful: 2, + retry: 0, + failed: 1, + aborted: false + }) + server.stop() + }) + + t.test('Should call onDrop on the correct document when doing a mix of operations that includes deletes', async t => { + // checks to ensure onDrop doesn't provide the wrong document when some operations are deletes + // see https://github.com/elastic/elasticsearch-js/issues/1751 + async function handler (req: http.IncomingMessage, res: http.ServerResponse) { + res.setHeader('content-type', 'application/json') + res.end(JSON.stringify({ + took: 0, + errors: true, + items: [ + { delete: { status: 200 } }, + { index: { status: 429 } }, + { index: { status: 200 } } + ] + })) + } + + const [{ port }, server] = await buildServer(handler) + const client = new Client({ node: `http://localhost:${port}` }) + let counter = 0 + const result = await client.helpers.bulk({ + datasource: dataset.slice(), + concurrency: 1, + wait: 10, + retries: 0, + onDocument (doc) { + counter++ + if (counter === 1) { + return { + delete: { + _index: 'test', + _id: String(counter) + } + } + } else { + return { + index: { + _index: 'test', + } + } + } + }, + onDrop (doc) { + t.same(doc, { + status: 429, + error: null, + operation: { index: { _index: 'test' } }, + document: { user: "arya", age: 18 }, + retried: false, + }) + } + }) + + t.type(result.time, 'number') + t.type(result.bytes, 'number') + t.match(result, { + total: 3, + successful: 2, + retry: 0, + failed: 1, + aborted: false + }) + server.stop() + }) + + t.test('should call onSuccess callback with delete action object', async t => { + const MockConnection = connection.buildMockConnection({ + onRequest (params) { + // @ts-expect-error + let [action, payload] = params.body.split('\n') + action = JSON.parse(action) + return { body: { errors: false, items: [action] } } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + let docCount = 0 + let successCount = 0 + await client.helpers.bulk({ + datasource: dataset.slice(), + flushBytes: 1, + concurrency: 1, + onDocument (_doc) { + if (docCount++ === 1) { + return { + delete: { + _index: 'test', + _id: String(docCount) + } + } + } else { + return { + index: { _index: 'test' } + } + } + }, + onSuccess ({ result, document }) { + const item = dataset[successCount] + if (successCount++ === 1) { + t.same(result, { + delete: { + _index: 'test', + _id: String(successCount) + } + }) + } else { + t.same(result, { index: { _index: 'test' }}) + t.same(document, item) + } + }, + onDrop (_doc) { + t.fail('This should never be called') + } + }) + + t.end() + }) + + t.end() +}) + +test('transport options', t => { + t.test('Should pass transport options in request', async t => { + let count = 0 + const MockConnection = connection.buildMockConnection({ + onRequest (params) { + count++ + + if (params.path === '/_bulk') { + t.match(params.headers, { + 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=9', + foo: 'bar' + }) + return { body: { errors: false, items: [{}] } } + } + + t.equal(params.path, '/_all/_refresh') + t.match(params.headers, { + foo: 'bar' + }) + return { body: {} } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + const result = await client.helpers.bulk({ + datasource: dataset.slice(), + flushBytes: 1, + concurrency: 1, + onDocument (doc) { + return { index: { _index: 'test' } } + }, + onDrop (doc) { + t.fail('This should never be called') + }, + refreshOnCompletion: true + }, { + headers: { + foo: 'bar' + } + }) + + t.equal(count, 4) // three bulk requests, one refresh + t.type(result.time, 'number') + t.type(result.bytes, 'number') + t.match(result, { + total: 3, + successful: 3, + retry: 0, + failed: 0, + aborted: false + }) + }) + + t.test('Should not allow asStream request option', async t => { + t.plan(2) + + const client = new Client({ + node: 'http://localhost:9200/', + }) + + try { + await client.helpers.bulk({ + datasource: dataset.slice(), + flushBytes: 1, + concurrency: 1, + onDocument (doc) { + return { index: { _index: 'test' } } + }, + onDrop (doc) { + t.fail('This should never be called') + }, + refreshOnCompletion: true + }, { + headers: { + foo: 'bar' + }, + asStream: true, + }) + } catch (err: any) { + t.ok(err instanceof AssertionError) + t.equal(err.message, 'bulk helper: the asStream request option is not supported') + } + }) + + t.end() +}) + +test('errors', t => { + t.test('datasource type', async t => { + const client = new Client({ + node: 'http://localhost:9200/' + }) + try { + await client.helpers.bulk({ + // @ts-expect-error + datasource: 'hello', + onDocument (doc) { + return { + index: { _index: 'test' } + } + } + }) + } catch (err: any) { + t.ok(err instanceof errors.ConfigurationError) + t.equal(err.message, 'bulk helper: the datasource must be an array or a buffer or a readable stream or an async generator') + } + }) + + t.test('missing datasource', async t => { + const client = new Client({ + node: 'http://localhost:9200/' + }) + try { + // @ts-expect-error + await client.helpers.bulk({ + onDocument (doc) { + return { + index: { _index: 'test' } + } + } + }) + } catch (err: any) { + t.ok(err instanceof errors.ConfigurationError) + t.equal(err.message, 'bulk helper: the datasource is required') + } + }) + + t.test('missing onDocument', async t => { + const client = new Client({ + node: 'http://localhost:9200/' + }) + try { + // @ts-expect-error + await client.helpers.bulk({ + datasource: dataset.slice() + }) + } catch (err: any) { + t.ok(err instanceof errors.ConfigurationError) + t.equal(err.message, 'bulk helper: the onDocument callback is required') + } + }) + + t.end() +}) + +test('Flush interval', t => { + t.test('Slow producer', async t => { + const clock = FakeTimers.install({ toFake: ['setTimeout', 'clearTimeout'], shouldClearNativeTimers: true }) + t.teardown(() => clock.uninstall()) + + let count = 0 + const MockConnection = connection.buildMockConnection({ + onRequest (params) { + t.equal(params.path, '/_bulk') + t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=9' }) + // @ts-expect-error + const [action, payload] = params.body.split('\n') + t.same(JSON.parse(action), { index: { _index: 'test' } }) + t.same(JSON.parse(payload), dataset[count++]) + return { body: { errors: false, items: [{}] } } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + const result = await client.helpers.bulk({ + datasource: (async function * generator () { + for (const chunk of dataset) { + await clock.nextAsync() + yield chunk + } + })(), + flushBytes: 5000000, + concurrency: 1, + onDocument (doc) { + return { + index: { _index: 'test' } + } + }, + onDrop (doc) { + t.fail('This should never be called') + } + }) + + t.type(result.time, 'number') + t.type(result.bytes, 'number') + t.match(result, { + total: 3, + successful: 3, + retry: 0, + failed: 0, + aborted: false + }) + }) + + t.test('Abort operation', async t => { + const clock = FakeTimers.install({ toFake: ['setTimeout', 'clearTimeout'], shouldClearNativeTimers: true }) + t.teardown(() => clock.uninstall()) + + let count = 0 + const MockConnection = connection.buildMockConnection({ + onRequest (params) { + t.ok(count < 2) + t.equal(params.path, '/_bulk') + t.match(params.headers, { 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=9' }) + // @ts-expect-error + const [action, payload] = params.body.split('\n') + t.same(JSON.parse(action), { index: { _index: 'test' } }) + t.same(JSON.parse(payload), dataset[count++]) + return { body: { errors: false, items: [{}] } } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + const b = client.helpers.bulk({ + datasource: (async function * generator () { + for (const chunk of dataset) { + await clock.nextAsync() + if (chunk.user === 'tyrion') { + // Needed otherwise in Node.js 10 + // the second request will never be sent + await Promise.resolve() + // @ts-ignore + b.abort() + } + yield chunk + } + })(), + flushBytes: 5000000, + concurrency: 1, + onDocument (doc) { + return { + index: { _index: 'test' } + } + }, + onDrop (doc) { + t.fail('This should never be called') + } + }) + + const result = await b + + t.type(result.time, 'number') + t.type(result.bytes, 'number') + t.match(result, { + total: 2, + successful: 2, + retry: 0, + failed: 0, + aborted: true + }) + }) + + t.test('Operation stats', async t => { + let count = 0 + const MockConnection = connection.buildMockConnection({ + onRequest (params) { + t.equal(params.path, '/_bulk') + t.match(params.headers, { + 'content-type': 'application/vnd.elasticsearch+x-ndjson; compatible-with=9', + 'x-elastic-client-meta': `es=${clientVersion},js=${nodeVersion},t=${transportVersion},hc=${nodeVersion},h=bp` + }) + // @ts-expect-error + const [action, payload] = params.body.split('\n') + t.same(JSON.parse(action), { index: { _index: 'test' } }) + t.same(JSON.parse(payload), dataset[count++]) + return { body: { errors: false, items: [{}] } } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + const b = client.helpers.bulk({ + datasource: dataset.slice(), + flushBytes: 1, + concurrency: 1, + onDocument (_doc) { + return { + index: { _index: 'test' } + } + }, + onDrop (_doc) { + t.fail('This should never be called') + } + }) + const result = await b + + t.type(result.time, 'number') + t.type(result.bytes, 'number') + t.match(result, b.stats) + t.match(result, { + total: 3, + successful: 3, + retry: 0, + failed: 0, + aborted: false + }) + }) + + test(`flush timeout does not lock process when flushInterval is less than server timeout`, async t => { + const flushInterval = 500 + + async function handler (req: http.IncomingMessage, res: http.ServerResponse) { + setTimeout(() => { + res.writeHead(200, { 'content-type': 'application/json' }) + res.end(JSON.stringify({ errors: false, items: [{}] })) + }, 1000) + } + + const [{ port }, server] = await buildServer(handler) + const client = new Client({ node: `http://localhost:${port}` }) + + async function * generator () { + const data = dataset.slice() + for (const doc of data) { + await sleep(flushInterval) + yield doc + } + } + + const result = await client.helpers.bulk({ + datasource: Readable.from(generator()), + flushBytes: 1, + flushInterval: flushInterval, + concurrency: 1, + onDocument (_) { + return { + index: { _index: 'test' } + } + }, + onDrop (_) { + t.fail('This should never be called') + } + }) + + t.type(result.time, 'number') + t.type(result.bytes, 'number') + t.match(result, { + total: 3, + successful: 3, + retry: 0, + failed: 0, + aborted: false + }) + + server.stop() + t.end() + }) + + test(`flush timeout does not lock process when flushInterval is greater than server timeout`, async t => { + const flushInterval = 500 + + async function handler (req: http.IncomingMessage, res: http.ServerResponse) { + setTimeout(() => { + res.writeHead(200, { 'content-type': 'application/json' }) + res.end(JSON.stringify({ errors: false, items: [{}] })) + }, 250) + } + + const [{ port }, server] = await buildServer(handler) + const client = new Client({ node: `http://localhost:${port}` }) + + async function * generator () { + const data = dataset.slice() + for (const doc of data) { + await sleep(flushInterval) + yield doc + } + } + + const result = await client.helpers.bulk({ + datasource: Readable.from(generator()), + flushBytes: 1, + flushInterval: flushInterval, + concurrency: 1, + onDocument (_) { + return { + index: { _index: 'test' } + } + }, + onDrop (_) { + t.fail('This should never be called') + } + }) + + t.type(result.time, 'number') + t.type(result.bytes, 'number') + t.match(result, { + total: 3, + successful: 3, + retry: 0, + failed: 0, + aborted: false + }) + + server.stop() + t.end() + }) + + test(`flush timeout does not lock process when flushInterval is equal to server timeout`, async t => { + const flushInterval = 500 + + async function handler (req: http.IncomingMessage, res: http.ServerResponse) { + setTimeout(() => { + res.writeHead(200, { 'content-type': 'application/json' }) + res.end(JSON.stringify({ errors: false, items: [{}] })) + }, flushInterval) + } + + const [{ port }, server] = await buildServer(handler) + const client = new Client({ node: `http://localhost:${port}` }) + + async function * generator () { + const data = dataset.slice() + for (const doc of data) { + await sleep(flushInterval) + yield doc + } + } + + const result = await client.helpers.bulk({ + datasource: Readable.from(generator()), + flushBytes: 1, + flushInterval: flushInterval, + concurrency: 1, + onDocument (_) { + return { + index: { _index: 'test' } + } + }, + onDrop (_) { + t.fail('This should never be called') + } + }) + + t.type(result.time, 'number') + t.type(result.bytes, 'number') + t.match(result, { + total: 3, + successful: 3, + retry: 0, + failed: 0, + aborted: false + }) + + server.stop() + t.end() + }) + + t.end() +}) + diff --git a/test/unit/helpers/esql.test.ts b/test/unit/helpers/esql.test.ts new file mode 100644 index 000000000..dace000c6 --- /dev/null +++ b/test/unit/helpers/esql.test.ts @@ -0,0 +1,318 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +import { test } from 'tap' +import * as arrow from 'apache-arrow' +import { connection } from '../../utils' +import { Client } from '../../../' + +test('ES|QL helper', t => { + test('toRecords', t => { + t.test('Takes an ESQL response and pivots it to an array of records', async t => { + type MyDoc = { + '@timestamp': string, + client_ip: string, + event_duration: number, + message: string, + } + + const MockConnection = connection.buildMockConnection({ + onRequest (_params) { + return { + body: { + columns: [ + { name: '@timestamp', type: 'date' }, + { name: 'client_ip', type: 'ip' }, + { name: 'event_duration', type: 'long' }, + { name: 'message', type: 'keyword' } + ], + values: [ + [ + '2023-10-23T12:15:03.360Z', + '172.21.2.162', + 3450233, + 'Connected to 10.1.0.3' + ], + [ + '2023-10-23T12:27:28.948Z', + '172.21.2.113', + 2764889, + 'Connected to 10.1.0.2' + ] + ] + } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + const result = await client.helpers.esql({ query: 'FROM sample_data' }).toRecords() + const { records, columns } = result + t.equal(records.length, 2) + t.ok(records[0]) + t.same(records[0], { + '@timestamp': '2023-10-23T12:15:03.360Z', + client_ip: '172.21.2.162', + event_duration: 3450233, + message: 'Connected to 10.1.0.3' + }) + t.same(columns, [ + { name: '@timestamp', type: 'date' }, + { name: 'client_ip', type: 'ip' }, + { name: 'event_duration', type: 'long' }, + { name: 'message', type: 'keyword' } + ]) + t.end() + }) + + t.test('ESQL helper uses correct x-elastic-client-meta helper value', async t => { + const MockConnection = connection.buildMockConnection({ + onRequest (params) { + const header = params.headers?.['x-elastic-client-meta'] ?? '' + t.ok(header.includes('h=qo'), `Client meta header does not include ESQL helper value: ${header}`) + return { + body: { + columns: [{ name: '@timestamp', type: 'date' }], + values: [['2023-10-23T12:15:03.360Z']], + } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + await client.helpers.esql({ query: 'FROM sample_data' }).toRecords() + t.end() + }) + + t.end() + }) + + test('toArrowTable', t => { + t.test('Parses a binary response into an Arrow table', async t => { + const binaryContent = '/////zABAAAQAAAAAAAKAA4ABgANAAgACgAAAAAABAAQAAAAAAEKAAwAAAAIAAQACgAAAAgAAAAIAAAAAAAAAAIAAAB8AAAABAAAAJ7///8UAAAARAAAAEQAAAAAAAoBRAAAAAEAAAAEAAAAjP///wgAAAAQAAAABAAAAGRhdGUAAAAADAAAAGVsYXN0aWM6dHlwZQAAAAAAAAAAgv///wAAAQAEAAAAZGF0ZQAAEgAYABQAEwASAAwAAAAIAAQAEgAAABQAAABMAAAAVAAAAAAAAwFUAAAAAQAAAAwAAAAIAAwACAAEAAgAAAAIAAAAEAAAAAYAAABkb3VibGUAAAwAAABlbGFzdGljOnR5cGUAAAAAAAAAAAAABgAIAAYABgAAAAAAAgAGAAAAYW1vdW50AAAAAAAA/////7gAAAAUAAAAAAAAAAwAFgAOABUAEAAEAAwAAABgAAAAAAAAAAAABAAQAAAAAAMKABgADAAIAAQACgAAABQAAABYAAAABQAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAQAAAAAAAAAIAAAAAAAAACgAAAAAAAAAMAAAAAAAAAABAAAAAAAAADgAAAAAAAAAKAAAAAAAAAAAAAAAAgAAAAUAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAHwAAAAAAAAAAAACgmZkTQAAAAGBmZiBAAAAAAAAAL0AAAADAzMwjQAAAAMDMzCtAHwAAAAAAAADV6yywkgEAANWPBquSAQAA1TPgpZIBAADV17mgkgEAANV7k5uSAQAA/////wAAAAA=' + + const MockConnection = connection.buildMockConnection({ + onRequest (_params) { + return { + body: Buffer.from(binaryContent, 'base64'), + statusCode: 200, + headers: { + 'content-type': 'application/vnd.elasticsearch+arrow+stream' + } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + const result = await client.helpers.esql({ query: 'FROM sample_data' }).toArrowTable() + t.ok(result instanceof arrow.Table) + + const testRecords = [ + [ + ['amount', 4.900000095367432], + ['date', 1729532586965] + ], + [ + ['amount', 8.199999809265137], + ['date', 1729446186965], + ], + [ + ['amount', 15.5], + ['date', 1729359786965], + ], + [ + ['amount', 9.899999618530273], + ['date', 1729273386965], + ], + [ + ['amount', 13.899999618530273], + ['date', 1729186986965], + ] + ] + + let count = 0 + const table = [...result] + for (const record of table) { + t.same(record, testRecords[count]) + count++ + } + t.end() + }) + + t.test('ESQL helper uses correct x-elastic-client-meta helper value', async t => { + const binaryContent = '/////zABAAAQAAAAAAAKAA4ABgANAAgACgAAAAAABAAQAAAAAAEKAAwAAAAIAAQACgAAAAgAAAAIAAAAAAAAAAIAAAB8AAAABAAAAJ7///8UAAAARAAAAEQAAAAAAAoBRAAAAAEAAAAEAAAAjP///wgAAAAQAAAABAAAAGRhdGUAAAAADAAAAGVsYXN0aWM6dHlwZQAAAAAAAAAAgv///wAAAQAEAAAAZGF0ZQAAEgAYABQAEwASAAwAAAAIAAQAEgAAABQAAABMAAAAVAAAAAAAAwFUAAAAAQAAAAwAAAAIAAwACAAEAAgAAAAIAAAAEAAAAAYAAABkb3VibGUAAAwAAABlbGFzdGljOnR5cGUAAAAAAAAAAAAABgAIAAYABgAAAAAAAgAGAAAAYW1vdW50AAAAAAAA/////7gAAAAUAAAAAAAAAAwAFgAOABUAEAAEAAwAAABgAAAAAAAAAAAABAAQAAAAAAMKABgADAAIAAQACgAAABQAAABYAAAABQAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAQAAAAAAAAAIAAAAAAAAACgAAAAAAAAAMAAAAAAAAAABAAAAAAAAADgAAAAAAAAAKAAAAAAAAAAAAAAAAgAAAAUAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAHwAAAAAAAAAAAACgmZkTQAAAAGBmZiBAAAAAAAAAL0AAAADAzMwjQAAAAMDMzCtAHwAAAAAAAADV6yywkgEAANWPBquSAQAA1TPgpZIBAADV17mgkgEAANV7k5uSAQAA/////wAAAAA=' + + const MockConnection = connection.buildMockConnection({ + onRequest (params) { + const header = params.headers?.['x-elastic-client-meta'] ?? '' + t.ok(header.includes('h=qa'), `Client meta header does not include ESQL helper value: ${header}`) + return { + body: Buffer.from(binaryContent, 'base64'), + statusCode: 200, + headers: { + 'content-type': 'application/vnd.elasticsearch+arrow+stream' + } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + await client.helpers.esql({ query: 'FROM sample_data' }).toArrowTable() + t.end() + }) + + t.end() + }) + + test('toArrowReader', async t => { + const testRecords = [ + { amount: 4.900000095367432, }, + { amount: 8.199999809265137, }, + { amount: 15.5, }, + { amount: 9.899999618530273, }, + { amount: 13.899999618530273, }, + ] + + // build reusable Arrow table + const table = arrow.tableFromJSON(testRecords) + const rawData = await arrow.RecordBatchStreamWriter.writeAll(table).toUint8Array() + + t.test('Parses a binary response into an Arrow stream reader', async t => { + const MockConnection = connection.buildMockConnection({ + onRequest (_params) { + return { + body: Buffer.from(rawData), + statusCode: 200, + headers: { + 'content-type': 'application/vnd.elasticsearch+arrow+stream', + 'transfer-encoding': 'chunked' + } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + const result = await client.helpers.esql({ query: 'FROM sample_data' }).toArrowReader() + t.ok(result.isStream()) + + let count = 0 + for await (const recordBatch of result) { + for (const record of recordBatch) { + t.same(record.toJSON(), testRecords[count]) + count++ + } + } + + t.end() + }) + + t.test('ESQL helper uses correct x-elastic-client-meta helper value', async t => { + const MockConnection = connection.buildMockConnection({ + onRequest (params) { + const header = params.headers?.['x-elastic-client-meta'] ?? '' + t.ok(header.includes('h=qa'), `Client meta header does not include ESQL helper value: ${header}`) + return { + body: Buffer.from(rawData), + statusCode: 200, + headers: { + 'content-type': 'application/vnd.elasticsearch+arrow+stream', + 'transfer-encoding': 'chunked' + } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + await client.helpers.esql({ query: 'FROM sample_data' }).toArrowReader() + t.end() + }) + + t.test('multi-batch support', async t => { + const intType = new arrow.Uint32 + const floatType = new arrow.Float32 + const schema = new arrow.Schema([ + arrow.Field.new('id', intType), + arrow.Field.new('val', floatType) + ]) + + function getBatch(ids: number[], vals: number[]) { + const id = arrow.makeData({ type: intType, data: ids }) + const val = arrow.makeData({ type: floatType, data: vals }) + return new arrow.RecordBatch({ id, val }) + } + + const batch1 = getBatch([1, 2, 3], [0.1, 0.2, 0.3]) + const batch2 = getBatch([4, 5, 6], [0.4, 0.5, 0.6]) + const batch3 = getBatch([7, 8, 9], [0.7, 0.8, 0.9]) + + const table = new arrow.Table(schema, [ + new arrow.RecordBatch(schema, batch1.data), + new arrow.RecordBatch(schema, batch2.data), + new arrow.RecordBatch(schema, batch3.data), + ]) + + const rawData = await arrow.RecordBatchStreamWriter.writeAll(table).toUint8Array() + + const MockConnection = connection.buildMockConnection({ + onRequest (_params) { + return { + body: Buffer.from(rawData), + statusCode: 200, + headers: { + 'content-type': 'application/vnd.elasticsearch+arrow+stream' + } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + const result = await client.helpers.esql({ query: 'FROM sample_data' }).toArrowReader() + t.ok(result.isStream()) + + let counter = 0 + for await (const batch of result) { + for (const row of batch) { + counter++ + const { id, val } = row.toJSON() + t.equal(id, counter) + // floating points are hard in JS + t.equal((Math.round(val * 10) / 10).toFixed(1), (counter * 0.1).toFixed(1)) + } + } + t.end() + }) + + t.end() + }) + t.end() +}) diff --git a/test/unit/helpers/msearch.test.js b/test/unit/helpers/msearch.test.js deleted file mode 100644 index 64e79dcf1..000000000 --- a/test/unit/helpers/msearch.test.js +++ /dev/null @@ -1,736 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -const { test } = require('tap') -const { Client, errors } = require('../../../') -const { connection } = require('../../utils') -const FakeTimers = require('@sinonjs/fake-timers') - -test('Basic', async t => { - const MockConnection = connection.buildMockConnection({ - onRequest (params) { - return { - body: { - responses: [{ - status: 200, - hits: { - hits: [ - { _source: { one: 'one' } }, - { _source: { two: 'two' } }, - { _source: { three: 'three' } } - ] - } - }] - } - } - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - const s = client.helpers.msearch({ operations: 1 }) - - const result = await s.search( - { index: 'test' }, - { query: { match: { foo: 'bar' } } } - ) - - t.deepEqual(result.body, { - status: 200, - hits: { - hits: [ - { _source: { one: 'one' } }, - { _source: { two: 'two' } }, - { _source: { three: 'three' } } - ] - } - }) - - t.deepEqual(result.documents, [ - { one: 'one' }, - { two: 'two' }, - { three: 'three' } - ]) - - t.teardown(() => s.stop()) -}) - -test('Multiple searches (inside async iterator)', t => { - t.plan(6) - - const MockConnection = connection.buildMockConnection({ - onRequest (params) { - return { - body: { - responses: [{ - status: 200, - hits: { - hits: [ - { _source: { one: 'one' } }, - { _source: { two: 'two' } }, - { _source: { three: 'three' } } - ] - } - }, { - status: 200, - hits: { - hits: [ - { _source: { four: 'four' } }, - { _source: { five: 'five' } }, - { _source: { six: 'six' } } - ] - } - }] - } - } - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - const s = client.helpers.msearch({ operations: 2 }) - - s.search({ index: 'test' }, { query: { match: { foo: 'bar' } } }, (err, result) => { - t.error(err) - t.deepEqual(result.body, { - status: 200, - hits: { - hits: [ - { _source: { one: 'one' } }, - { _source: { two: 'two' } }, - { _source: { three: 'three' } } - ] - } - }) - - t.deepEqual(result.documents, [ - { one: 'one' }, - { two: 'two' }, - { three: 'three' } - ]) - }) - - s.search({ index: 'test' }, { query: { match: { foo: 'bar' } } }, (err, result) => { - t.error(err) - t.deepEqual(result.body, { - status: 200, - hits: { - hits: [ - { _source: { four: 'four' } }, - { _source: { five: 'five' } }, - { _source: { six: 'six' } } - ] - } - }) - - t.deepEqual(result.documents, [ - { four: 'four' }, - { five: 'five' }, - { six: 'six' } - ]) - }) - - t.teardown(() => s.stop()) -}) - -test('Multiple searches (async iterator exits)', t => { - t.plan(6) - - const MockConnection = connection.buildMockConnection({ - onRequest (params) { - return { - body: { - responses: [{ - status: 200, - hits: { - hits: [ - { _source: { one: 'one' } }, - { _source: { two: 'two' } }, - { _source: { three: 'three' } } - ] - } - }, { - status: 200, - hits: { - hits: [ - { _source: { four: 'four' } }, - { _source: { five: 'five' } }, - { _source: { six: 'six' } } - ] - } - }] - } - } - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - const s = client.helpers.msearch() - - s.search({ index: 'test' }, { query: {} }, (err, result) => { - t.error(err) - t.deepEqual(result.body, { - status: 200, - hits: { - hits: [ - { _source: { one: 'one' } }, - { _source: { two: 'two' } }, - { _source: { three: 'three' } } - ] - } - }) - - t.deepEqual(result.documents, [ - { one: 'one' }, - { two: 'two' }, - { three: 'three' } - ]) - }) - - s.search({ index: 'test' }, { query: { match: { foo: 'bar' } } }, (err, result) => { - t.error(err) - t.deepEqual(result.body, { - status: 200, - hits: { - hits: [ - { _source: { four: 'four' } }, - { _source: { five: 'five' } }, - { _source: { six: 'six' } } - ] - } - }) - - t.deepEqual(result.documents, [ - { four: 'four' }, - { five: 'five' }, - { six: 'six' } - ]) - }) - - setImmediate(() => s.stop()) -}) - -test('Stop a msearch processor (promises)', async t => { - const MockConnection = connection.buildMockConnection({ - onRequest (params) { - return {} - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - const s = client.helpers.msearch({ operations: 1 }) - - s.stop() - - try { - await s.search( - { index: 'test' }, - { query: { match: { foo: 'bar' } } } - ) - } catch (err) { - t.strictEqual(err.message, 'The msearch processor has been stopped') - } - - t.teardown(() => s.stop()) -}) - -test('Stop a msearch processor (callbacks)', t => { - t.plan(1) - - const MockConnection = connection.buildMockConnection({ - onRequest (params) { - return {} - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - const s = client.helpers.msearch() - - s.stop() - - s.search({ index: 'test' }, { query: { match: { foo: 'bar' } } }, (err, result) => { - t.strictEqual(err.message, 'The msearch processor has been stopped') - }) -}) - -test('Bad header', t => { - t.plan(1) - - const MockConnection = connection.buildMockConnection({ - onRequest (params) { - return {} - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - const s = client.helpers.msearch() - - s.search(null, { query: { match: { foo: 'bar' } } }, (err, result) => { - t.strictEqual(err.message, 'The header should be an object') - }) - - t.teardown(() => s.stop()) -}) - -test('Bad body', t => { - t.plan(1) - - const MockConnection = connection.buildMockConnection({ - onRequest (params) { - return {} - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - const s = client.helpers.msearch() - - s.search({ index: 'test' }, null, (err, result) => { - t.strictEqual(err.message, 'The body should be an object') - }) - - t.teardown(() => s.stop()) -}) - -test('Retry on 429', async t => { - let count = 0 - const MockConnection = connection.buildMockConnection({ - onRequest (params) { - if (count++ === 0) { - return { - body: { - responses: [{ - status: 429, - error: {} - }] - } - } - } else { - return { - body: { - responses: [{ - status: 200, - hits: { - hits: [ - { _source: { one: 'one' } }, - { _source: { two: 'two' } }, - { _source: { three: 'three' } } - ] - } - }] - } - } - } - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - const s = client.helpers.msearch({ operations: 1, wait: 10 }) - - const result = await s.search( - { index: 'test' }, - { query: { match: { foo: 'bar' } } } - ) - - t.deepEqual(result.body, { - status: 200, - hits: { - hits: [ - { _source: { one: 'one' } }, - { _source: { two: 'two' } }, - { _source: { three: 'three' } } - ] - } - }) - - t.deepEqual(result.documents, [ - { one: 'one' }, - { two: 'two' }, - { three: 'three' } - ]) - - t.teardown(() => s.stop()) -}) - -test('Single search errors', async t => { - const MockConnection = connection.buildMockConnection({ - onRequest (params) { - return { - body: { - responses: [{ - status: 400, - error: { foo: 'bar' } - }] - } - } - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - const s = client.helpers.msearch({ operations: 1 }) - - try { - await s.search( - { index: 'test' }, - { query: { match: { foo: 'bar' } } } - ) - } catch (err) { - t.true(err instanceof errors.ResponseError) - } - - t.teardown(() => s.stop()) -}) - -test('Entire msearch fails', t => { - t.plan(4) - - const MockConnection = connection.buildMockConnection({ - onRequest (params) { - return { - statusCode: 500, - body: { - status: 500, - error: { foo: 'bar' } - } - } - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - const s = client.helpers.msearch({ operations: 1 }) - - s.search({ index: 'test' }, { query: {} }, (err, result) => { - t.true(err instanceof errors.ResponseError) - t.deepEqual(result.documents, []) - }) - - s.search({ index: 'test' }, { query: {} }, (err, result) => { - t.true(err instanceof errors.ResponseError) - t.deepEqual(result.documents, []) - }) - - t.teardown(() => s.stop()) -}) - -test('Resolves the msearch helper', t => { - t.plan(1) - - const MockConnection = connection.buildMockConnection({ - onRequest (params) { - return {} - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - const s = client.helpers.msearch() - - s.stop() - - s.then( - () => t.pass('called'), - e => t.fail('Should not fail') - ) - - s.catch(e => t.fail('Should not fail')) -}) - -test('Stop the msearch helper with an error', t => { - t.plan(3) - - const MockConnection = connection.buildMockConnection({ - onRequest (params) { - return {} - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - const s = client.helpers.msearch() - - s.stop(new Error('kaboom')) - - s.then( - () => t.fail('Should fail'), - err => t.is(err.message, 'kaboom') - ) - - s.catch(err => t.is(err.message, 'kaboom')) - - s.search({ index: 'test' }, { query: {} }, (err, result) => { - t.is(err.message, 'kaboom') - }) -}) - -test('Multiple searches (concurrency = 1)', t => { - t.plan(6) - - const MockConnection = connection.buildMockConnection({ - onRequest (params) { - return { - body: { - responses: [{ - status: 200, - hits: { - hits: [ - { _source: { one: 'one' } }, - { _source: { two: 'two' } }, - { _source: { three: 'three' } } - ] - } - }] - } - } - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - const s = client.helpers.msearch({ operations: 1, concurrency: 1 }) - - s.search({ index: 'test' }, { query: {} }, (err, result) => { - t.error(err) - t.deepEqual(result.body, { - status: 200, - hits: { - hits: [ - { _source: { one: 'one' } }, - { _source: { two: 'two' } }, - { _source: { three: 'three' } } - ] - } - }) - - t.deepEqual(result.documents, [ - { one: 'one' }, - { two: 'two' }, - { three: 'three' } - ]) - }) - - s.search({ index: 'test' }, { query: {} }, (err, result) => { - t.error(err) - t.deepEqual(result.body, { - status: 200, - hits: { - hits: [ - { _source: { one: 'one' } }, - { _source: { two: 'two' } }, - { _source: { three: 'three' } } - ] - } - }) - - t.deepEqual(result.documents, [ - { one: 'one' }, - { two: 'two' }, - { three: 'three' } - ]) - }) - - t.teardown(() => s.stop()) -}) - -test('Flush interval', t => { - t.plan(4) - const clock = FakeTimers.install({ toFake: ['setTimeout', 'clearTimeout'] }) - t.teardown(() => clock.uninstall()) - - const MockConnection = connection.buildMockConnection({ - onRequest (params) { - return { - body: { - responses: [{ - status: 200, - hits: { - hits: [ - { _source: { one: 'one' } }, - { _source: { two: 'two' } }, - { _source: { three: 'three' } } - ] - } - }, { - status: 200, - hits: { - hits: [ - { _source: { four: 'four' } }, - { _source: { five: 'five' } }, - { _source: { six: 'six' } } - ] - } - }] - } - } - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - const s = client.helpers.msearch() - - s.search({ index: 'test' }, { query: { match: { foo: 'bar' } } }, (err, result) => { - t.error(err) - t.is(result.documents.length, 3) - }) - - s.search({ index: 'test' }, { query: { match: { foo: 'bar' } } }, (err, result) => { - t.error(err) - t.is(result.documents.length, 3) - }) - - setImmediate(clock.next) - - t.teardown(() => s.stop()) -}) - -test('Flush interval - early stop', t => { - t.plan(3) - const clock = FakeTimers.install({ toFake: ['setTimeout', 'clearTimeout'] }) - t.teardown(() => clock.uninstall()) - - const MockConnection = connection.buildMockConnection({ - onRequest (params) { - return { - body: { - responses: [{ - status: 200, - hits: { - hits: [ - { _source: { one: 'one' } }, - { _source: { two: 'two' } }, - { _source: { three: 'three' } } - ] - } - }] - } - } - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - const s = client.helpers.msearch() - - s.search({ index: 'test' }, { query: { match: { foo: 'bar' } } }, (err, result) => { - t.error(err) - t.is(result.documents.length, 3) - }) - - setImmediate(() => { - clock.next() - s.search({ index: 'test' }, { query: { match: { foo: 'bar' } } }, (err, result) => { - t.ok(err instanceof errors.ConfigurationError) - }) - }) - - s.stop() -}) - -test('Stop should resolve the helper', t => { - t.plan(1) - - const MockConnection = connection.buildMockConnection({ - onRequest (params) { - return { - body: { - responses: [] - } - } - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - const s = client.helpers.msearch() - setImmediate(s.stop) - - s.then(() => t.pass('Called')) - .catch(() => t.fail('Should not fail')) -}) - -test('Stop should resolve the helper (error)', t => { - t.plan(3) - - const MockConnection = connection.buildMockConnection({ - onRequest (params) { - return { - body: { - responses: [] - } - } - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - const s = client.helpers.msearch() - setImmediate(s.stop, new Error('kaboom')) - - s.then(() => t.fail('Should not fail')) - .catch(err => t.is(err.message, 'kaboom')) - - s.catch(err => t.is(err.message, 'kaboom')) - - s.then(() => t.fail('Should not fail'), err => t.is(err.message, 'kaboom')) -}) diff --git a/test/unit/helpers/msearch.test.ts b/test/unit/helpers/msearch.test.ts new file mode 100644 index 000000000..a87d86c04 --- /dev/null +++ b/test/unit/helpers/msearch.test.ts @@ -0,0 +1,760 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +import { test } from 'tap' +import { Client, errors } from '../../../' +import { connection } from '../../utils' +import FakeTimers from '@sinonjs/fake-timers' + +test('Basic', async t => { + const MockConnection = connection.buildMockConnection({ + onRequest (_params) { + return { + body: { + responses: [{ + status: 200, + hits: { + hits: [ + { _source: { one: 'one' } }, + { _source: { two: 'two' } }, + { _source: { three: 'three' } } + ] + } + }] + } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + const m = client.helpers.msearch({ operations: 1 }) + + const result = await m.search( + { index: 'test' }, + { query: { match: { foo: 'bar' } } } + ) + + t.same(result.body, { + status: 200, + hits: { + hits: [ + { _source: { one: 'one' } }, + { _source: { two: 'two' } }, + { _source: { three: 'three' } } + ] + } + }) + + t.same(result.documents, [ + { one: 'one' }, + { two: 'two' }, + { three: 'three' } + ]) + + t.teardown(() => m.stop()) +}) + +test('Multiple searches (inside async iterator)', t => { + t.plan(4) + + const MockConnection = connection.buildMockConnection({ + onRequest (_params) { + return { + body: { + responses: [{ + status: 200, + hits: { + hits: [ + { _source: { one: 'one' } }, + { _source: { two: 'two' } }, + { _source: { three: 'three' } } + ] + } + }, { + status: 200, + hits: { + hits: [ + { _source: { four: 'four' } }, + { _source: { five: 'five' } }, + { _source: { six: 'six' } } + ] + } + }] + } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + const m = client.helpers.msearch({ operations: 2 }) + + m.search({ index: 'test' }, { query: { match: { foo: 'bar' } } }) + .then(result => { + t.same(result.body, { + status: 200, + hits: { + hits: [ + { _source: { one: 'one' } }, + { _source: { two: 'two' } }, + { _source: { three: 'three' } } + ] + } + }) + + t.same(result.documents, [ + { one: 'one' }, + { two: 'two' }, + { three: 'three' } + ]) + }) + .catch(t.error) + + m.search({ index: 'test' }, { query: { match: { foo: 'bar' } } }) + .then(result => { + t.same(result.body, { + status: 200, + hits: { + hits: [ + { _source: { four: 'four' } }, + { _source: { five: 'five' } }, + { _source: { six: 'six' } } + ] + } + }) + + t.same(result.documents, [ + { four: 'four' }, + { five: 'five' }, + { six: 'six' } + ]) + }) + .catch(t.error) + + t.teardown(() => m.stop()) +}) + +test('Multiple searches (async iterator exits)', t => { + t.plan(4) + + const MockConnection = connection.buildMockConnection({ + onRequest (_params) { + return { + body: { + responses: [{ + status: 200, + hits: { + hits: [ + { _source: { one: 'one' } }, + { _source: { two: 'two' } }, + { _source: { three: 'three' } } + ] + } + }, { + status: 200, + hits: { + hits: [ + { _source: { four: 'four' } }, + { _source: { five: 'five' } }, + { _source: { six: 'six' } } + ] + } + }] + } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + const m = client.helpers.msearch() + + m.search({ index: 'test' }, { query: {} }) + .then(result => { + t.same(result.body, { + status: 200, + hits: { + hits: [ + { _source: { one: 'one' } }, + { _source: { two: 'two' } }, + { _source: { three: 'three' } } + ] + } + }) + + t.same(result.documents, [ + { one: 'one' }, + { two: 'two' }, + { three: 'three' } + ]) + }) + .catch(t.error) + + m.search({ index: 'test' }, { query: {} }) + .then(result => { + t.same(result.body, { + status: 200, + hits: { + hits: [ + { _source: { four: 'four' } }, + { _source: { five: 'five' } }, + { _source: { six: 'six' } } + ] + } + }) + + t.same(result.documents, [ + { four: 'four' }, + { five: 'five' }, + { six: 'six' } + ]) + }) + .catch(t.error) + + setImmediate(() => m.stop()) +}) + +test('Stop a msearch processor (promises)', async t => { + const MockConnection = connection.buildMockConnection({ + onRequest (_params) { + return { body: {} } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + const m = client.helpers.msearch({ operations: 1 }) + + m.stop() + + try { + await m.search( + { index: 'test' }, + { query: { match: { foo: 'bar' } } } + ) + } catch (err: any) { + t.equal(err.message, 'The msearch processor has been stopped') + } + + t.teardown(() => m.stop()) +}) + +test('Bad header', t => { + t.plan(1) + + const MockConnection = connection.buildMockConnection({ + onRequest (_params) { + return { body: {} } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + const m = client.helpers.msearch() + + // @ts-expect-error + m.search(null, { query: { match: { foo: 'bar' } } }) + .catch(err => { + t.equal(err.message, 'The header should be an object') + }) + + t.teardown(() => m.stop()) +}) + +test('Bad body', t => { + t.plan(1) + + const MockConnection = connection.buildMockConnection({ + onRequest (_params) { + return { body: {} } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + const m = client.helpers.msearch() + + // @ts-expect-error + m.search({ index: 'test' }, null) + .catch(err => { + t.equal(err.message, 'The body should be an object') + }) + + t.teardown(() => m.stop()) +}) + +test('Retry on 429', async t => { + let count = 0 + const MockConnection = connection.buildMockConnection({ + onRequest (_params) { + if (count++ === 0) { + return { + body: { + responses: [{ + status: 429, + error: {} + }] + } + } + } else { + return { + body: { + responses: [{ + status: 200, + hits: { + hits: [ + { _source: { one: 'one' } }, + { _source: { two: 'two' } }, + { _source: { three: 'three' } } + ] + } + }] + } + } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + const m = client.helpers.msearch({ operations: 1, wait: 10 }) + + const result = await m.search( + { index: 'test' }, + { query: { match: { foo: 'bar' } } } + ) + + t.same(result.body, { + status: 200, + hits: { + hits: [ + { _source: { one: 'one' } }, + { _source: { two: 'two' } }, + { _source: { three: 'three' } } + ] + } + }) + + t.same(result.documents, [ + { one: 'one' }, + { two: 'two' }, + { three: 'three' } + ]) + + t.teardown(() => m.stop()) +}) + +test('Single search errors', async t => { + const MockConnection = connection.buildMockConnection({ + onRequest (_params) { + return { + body: { + responses: [{ + status: 400, + error: { foo: 'bar' } + }] + } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + const m = client.helpers.msearch({ operations: 1 }) + + try { + await m.search( + { index: 'test' }, + { query: { match: { foo: 'bar' } } } + ) + } catch (err: any) { + t.ok(err instanceof errors.ResponseError) + } + + t.teardown(() => m.stop()) +}) + +test('Entire msearch fails', t => { + t.plan(2) + + const MockConnection = connection.buildMockConnection({ + onRequest (_params) { + return { + statusCode: 500, + body: { + status: 500, + error: { foo: 'bar' } + } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + const m = client.helpers.msearch({ operations: 1 }) + + m.search({ index: 'test' }, { query: {} }) + .catch(err => { + t.ok(err instanceof errors.ResponseError) + }) + + m.search({ index: 'test' }, { query: {} }) + .catch(err => { + t.ok(err instanceof errors.ResponseError) + }) + + t.teardown(() => m.stop()) +}) + +test('Resolves the msearch helper', t => { + t.plan(1) + + const MockConnection = connection.buildMockConnection({ + onRequest (_params) { + return { body: {} } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + const m = client.helpers.msearch() + + m.stop() + + m.then( + () => t.pass('called'), + _e => t.fail('Should not fail') + ) + + m.catch(_e => t.fail('Should not fail')) +}) + +test('Stop the msearch helper with an error', t => { + t.plan(3) + + const MockConnection = connection.buildMockConnection({ + onRequest (_params) { + return { body: {} } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + const m = client.helpers.msearch() + + m.stop(new Error('kaboom')) + + m.then( + () => t.fail('Should fail'), + err => t.equal(err.message, 'kaboom') + ) + + m.catch(err => t.equal(err.message, 'kaboom')) + + m.search({ index: 'test' }, { query: {} }) + .catch(err => { + t.equal(err.message, 'kaboom') + }) +}) + +test('Multiple searches (concurrency = 1)', t => { + t.plan(4) + + const MockConnection = connection.buildMockConnection({ + onRequest (_params) { + return { + body: { + responses: [{ + status: 200, + hits: { + hits: [ + { _source: { one: 'one' } }, + { _source: { two: 'two' } }, + { _source: { three: 'three' } } + ] + } + }] + } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + const m = client.helpers.msearch({ operations: 1, concurrency: 1 }) + + m.search({ index: 'test' }, { query: { match: { foo: 'bar' } } }) + .then(result => { + t.same(result.body, { + status: 200, + hits: { + hits: [ + { _source: { one: 'one' } }, + { _source: { two: 'two' } }, + { _source: { three: 'three' } } + ] + } + }) + + t.same(result.documents, [ + { one: 'one' }, + { two: 'two' }, + { three: 'three' } + ]) + }) + .catch(t.error) + + m.search({ index: 'test' }, { query: { match: { foo: 'bar' } } }) + .then(result => { + t.same(result.body, { + status: 200, + hits: { + hits: [ + { _source: { one: 'one' } }, + { _source: { two: 'two' } }, + { _source: { three: 'three' } } + ] + } + }) + + t.same(result.documents, [ + { one: 'one' }, + { two: 'two' }, + { three: 'three' } + ]) + }) + .catch(t.error) + + t.teardown(() => m.stop()) +}) + +test('Flush interval', t => { + t.plan(2) + const clock = FakeTimers.install({ toFake: ['setTimeout', 'clearTimeout'], shouldClearNativeTimers: true }) + t.teardown(() => clock.uninstall()) + + const MockConnection = connection.buildMockConnection({ + onRequest (_params) { + return { + body: { + responses: [{ + status: 200, + hits: { + hits: [ + { _source: { one: 'one' } }, + { _source: { two: 'two' } }, + { _source: { three: 'three' } } + ] + } + }, { + status: 200, + hits: { + hits: [ + { _source: { four: 'four' } }, + { _source: { five: 'five' } }, + { _source: { six: 'six' } } + ] + } + }] + } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + const m = client.helpers.msearch() + + m.search({ index: 'test' }, { query: { match: { foo: 'bar' } } }) + .then(result => { + t.equal(result.documents.length, 3) + }) + + m.search({ index: 'test' }, { query: { match: { foo: 'bar' } } }) + .then(result => { + t.equal(result.documents.length, 3) + }) + + setImmediate(clock.next) + + t.teardown(() => m.stop()) +}) + +test('Flush interval - early stop', t => { + t.plan(2) + + const MockConnection = connection.buildMockConnection({ + onRequest (_params) { + return { + body: { + responses: [{ + status: 200, + hits: { + hits: [ + { _source: { one: 'one' } }, + { _source: { two: 'two' } }, + { _source: { three: 'three' } } + ] + } + }] + } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + const m = client.helpers.msearch() + + m.search({ index: 'test' }, { query: { match: { foo: 'bar' } } }) + .then(result => { + t.equal(result.documents.length, 3) + }) + + setImmediate(() => { + m.search({ index: 'test' }, { query: { match: { foo: 'bar' } } }) + .catch(err => { + t.ok(err instanceof errors.ConfigurationError) + }) + }) + + m.stop() +}) + +test('Stop should resolve the helper', t => { + t.plan(1) + + const MockConnection = connection.buildMockConnection({ + onRequest (_params) { + return { + body: { + responses: [] + } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + const m = client.helpers.msearch() + setImmediate(m.stop) + + m.then(() => t.pass('Called')) + .catch(() => t.fail('Should not fail')) +}) + +test('Stop should resolve the helper (error)', t => { + t.plan(3) + + const MockConnection = connection.buildMockConnection({ + onRequest (_params) { + return { + body: { + responses: [] + } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + const m = client.helpers.msearch() + setImmediate(m.stop, new Error('kaboom')) + + m.then(() => t.fail('Should not fail')) + .catch(err => t.equal(err.message, 'kaboom')) + + m.catch(err => t.equal(err.message, 'kaboom')) + + m.then(() => t.fail('Should not fail'), err => t.equal(err.message, 'kaboom')) +}) + +test('Should use req options', async t => { + t.plan(1) + + const MockConnection = connection.buildMockConnection({ + onRequest (params) { + t.match(params.headers, { + foo: 'bar' + }) + + return { + body: { + responses: [{ + status: 200, + hits: { hits: [] } + }] + } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + const m = client.helpers.msearch({ operations: 1 }, { + headers: { + foo: 'bar' + } + }) + + await m.search( + { index: 'test' }, + { query: { match: { foo: 'bar' } } } + ) + + t.teardown(() => m.stop()) +}) diff --git a/test/unit/helpers/scroll.test.js b/test/unit/helpers/scroll.test.js deleted file mode 100644 index 5571265de..000000000 --- a/test/unit/helpers/scroll.test.js +++ /dev/null @@ -1,273 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -const { test } = require('tap') -const { Client, errors } = require('../../../') -const { connection } = require('../../utils') - -test('Scroll search', async t => { - var count = 0 - const MockConnection = connection.buildMockConnection({ - onRequest (params) { - t.strictEqual(params.querystring, 'scroll=1m') - return { - body: { - _scroll_id: count === 3 ? undefined : 'id', - count, - hits: { - hits: [ - { _source: { one: 'one' } }, - { _source: { two: 'two' } }, - { _source: { three: 'three' } } - ] - } - } - } - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - const scrollSearch = client.helpers.scrollSearch({ - index: 'test', - body: { foo: 'bar' } - }) - - for await (const result of scrollSearch) { - t.strictEqual(result.body.count, count) - if (count < 3) { - t.strictEqual(result.body._scroll_id, 'id') - } else { - t.strictEqual(result.body._scroll_id, undefined) - } - count += 1 - } -}) - -test('Clear a scroll search', async t => { - var count = 0 - const MockConnection = connection.buildMockConnection({ - onRequest (params) { - if (params.method === 'DELETE') { - const body = JSON.parse(params.body) - t.strictEqual(body.scroll_id, 'id') - } - return { - body: { - _scroll_id: count === 3 ? undefined : 'id', - count, - hits: { - hits: [ - { _source: { one: 'one' } }, - { _source: { two: 'two' } }, - { _source: { three: 'three' } } - ] - } - } - } - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - const scrollSearch = client.helpers.scrollSearch({ - index: 'test', - body: { foo: 'bar' } - }) - - for await (const result of scrollSearch) { - if (count === 2) { - t.fail('The scroll search should be cleared') - } - t.strictEqual(result.body.count, count) - if (count === 1) { - await result.clear() - } - count += 1 - } -}) - -test('Scroll search (retry)', async t => { - var count = 0 - const MockConnection = connection.buildMockConnection({ - onRequest (params) { - if (count === 1) { - count += 1 - return { body: {}, statusCode: 429 } - } - return { - statusCode: 200, - body: { - _scroll_id: count === 4 ? undefined : 'id', - count, - hits: { - hits: [ - { _source: { one: 'one' } }, - { _source: { two: 'two' } }, - { _source: { three: 'three' } } - ] - } - } - } - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - const scrollSearch = client.helpers.scrollSearch({ - index: 'test', - body: { foo: 'bar' } - }, { - wait: 10 - }) - - for await (const result of scrollSearch) { - t.strictEqual(result.body.count, count) - t.notStrictEqual(result.body.count, 1) - if (count < 4) { - t.strictEqual(result.body._scroll_id, 'id') - } else { - t.strictEqual(result.body._scroll_id, undefined) - } - count += 1 - } -}) - -test('Scroll search (retry throws and maxRetries)', async t => { - var count = 0 - const MockConnection = connection.buildMockConnection({ - onRequest (params) { - count += 1 - return { body: {}, statusCode: 429 } - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection, - maxRetries: 5 - }) - - const scrollSearch = client.helpers.scrollSearch({ - index: 'test', - body: { foo: 'bar' } - }, { - wait: 10, - ignore: [404] - }) - - try { - for await (const result of scrollSearch) { // eslint-disable-line - t.fail('we should not be here') - } - } catch (err) { - t.true(err instanceof errors.ResponseError) - t.strictEqual(err.statusCode, 429) - t.strictEqual(count, 5) - } -}) - -test('Scroll search (retry throws later)', async t => { - var count = 0 - const MockConnection = connection.buildMockConnection({ - onRequest (params) { - // filter_path should not be added if is not already present - t.strictEqual(params.querystring, 'scroll=1m') - if (count > 1) { - count += 1 - return { body: {}, statusCode: 429 } - } - return { - statusCode: 200, - body: { - _scroll_id: count === 4 ? undefined : 'id', - count, - hits: { - hits: [ - { _source: { one: 'one' } }, - { _source: { two: 'two' } }, - { _source: { three: 'three' } } - ] - } - } - } - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - const scrollSearch = client.helpers.scrollSearch({ - index: 'test', - body: { foo: 'bar' } - }, { - wait: 10 - }) - - try { - for await (const result of scrollSearch) { // eslint-disable-line - t.strictEqual(result.body.count, count) - count += 1 - } - } catch (err) { - t.true(err instanceof errors.ResponseError) - t.strictEqual(err.statusCode, 429) - t.strictEqual(count, 5) - } -}) - -test('Scroll search documents', async t => { - var count = 0 - const MockConnection = connection.buildMockConnection({ - onRequest (params) { - t.strictEqual(params.querystring, 'filter_path=hits.hits._source%2C_scroll_id&scroll=1m') - return { - body: { - _scroll_id: count === 3 ? undefined : 'id', - count, - hits: { - hits: [ - { _source: { val: 1 * count } }, - { _source: { val: 2 * count } }, - { _source: { val: 3 * count } } - ] - } - } - } - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - const scrollSearch = client.helpers.scrollDocuments({ - index: 'test', - body: { foo: 'bar' } - }) - - let n = 1 - for await (const hit of scrollSearch) { - t.deepEqual(hit, { val: n * count }) - n += 1 - if (n === 4) { - count += 1 - n = 1 - } - } -}) diff --git a/test/unit/helpers/scroll.test.ts b/test/unit/helpers/scroll.test.ts new file mode 100644 index 000000000..ae01989a5 --- /dev/null +++ b/test/unit/helpers/scroll.test.ts @@ -0,0 +1,411 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +import { test } from 'tap' +import { Client, errors } from '../../../' +import { connection } from '../../utils' + +let clientVersion: string = require('../../../package.json').version // eslint-disable-line +if (clientVersion.includes('-')) { + clientVersion = clientVersion.slice(0, clientVersion.indexOf('-')) + 'p' +} +let transportVersion: string = require('@elastic/transport/package.json').version // eslint-disable-line +if (transportVersion.includes('-')) { + transportVersion = transportVersion.slice(0, transportVersion.indexOf('-')) + 'p' +} +const nodeVersion = process.versions.node + +test('Scroll search', async t => { + let count = 0 + const MockConnection = connection.buildMockConnection({ + onRequest (params) { + t.match(params.headers, { + 'x-elastic-client-meta': `es=${clientVersion},js=${nodeVersion},t=${transportVersion},hc=${nodeVersion},h=s` + }) + + count += 1 + if (params.method === 'POST') { + if (params.path === '/test/_search') { + t.equal(params.querystring, 'scroll=1m') + } else { + // @ts-expect-error + t.equal(JSON.parse(params.body).scroll, '1m') + } + } + if (count === 4) { + // final automated clear + t.equal(params.method, 'DELETE') + } + return { + body: { + _scroll_id: 'id', + count, + hits: { + hits: count === 3 + ? [] + : [ + { _source: { one: 'one' } }, + { _source: { two: 'two' } }, + { _source: { three: 'three' } } + ] + } + } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + const scrollSearch = client.helpers.scrollSearch({ + index: 'test', + query: { match_all: {} } + }) + + for await (const result of scrollSearch) { + // @ts-expect-error + t.equal(result.body.count, count) + t.equal(result.body._scroll_id, 'id') + } +}) + +test('Clear a scroll search', async t => { + let count = 0 + const MockConnection = connection.buildMockConnection({ + onRequest (params) { + t.notMatch(params.headers, { + 'x-elastic-client-meta': `es=${clientVersion},js=${nodeVersion},t=${transportVersion},hc=${nodeVersion},h=s` + }) + if (params.method === 'DELETE') { + // @ts-expect-error + const body = JSON.parse(params.body) + t.equal(body.scroll_id, 'id') + } + return { + body: { + _scroll_id: count === 3 ? undefined : 'id', + count, + hits: { + hits: [ + { _source: { one: 'one' } }, + { _source: { two: 'two' } }, + { _source: { three: 'three' } } + ] + } + } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection, + enableMetaHeader: false + }) + + const scrollSearch = client.helpers.scrollSearch({ + index: 'test', + query: { match_all: {} } + }) + + for await (const result of scrollSearch) { + if (count === 2) { + t.fail('The scroll search should be cleared') + } + // @ts-expect-error + t.equal(result.body.count, count) + if (count === 1) { + await result.clear() + } + count += 1 + } +}) + +test('Scroll search (retry)', async t => { + let count = 0 + const MockConnection = connection.buildMockConnection({ + onRequest (params) { + count += 1 + if (count === 1) { + return { body: {}, statusCode: 429 } + } + if (count === 5) { + // final automated clear + t.equal(params.method, 'DELETE') + } + return { + statusCode: 200, + body: { + _scroll_id: 'id', + count, + hits: { + hits: count === 4 + ? [] + : [ + { _source: { one: 'one' } }, + { _source: { two: 'two' } }, + { _source: { three: 'three' } } + ] + } + } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + const scrollSearch = client.helpers.scrollSearch({ + index: 'test', + query: { match_all: {} } + }, { + wait: 10 + }) + + for await (const result of scrollSearch) { + // @ts-expect-error + t.equal(result.body.count, count) + // @ts-expect-error + t.not(result.body.count, 1) + t.equal(result.body._scroll_id, 'id') + } +}) + +test('Scroll search (retry throws and maxRetries)', async t => { + const maxRetries = 5 + const expectedAttempts = maxRetries + 1 + let count = 0 + const MockConnection = connection.buildMockConnection({ + onRequest (_params) { + count += 1 + return { body: {}, statusCode: 429 } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection, + maxRetries + }) + + const scrollSearch = client.helpers.scrollSearch({ + index: 'test', + query: { match_all: {} } + }, { + wait: 10, + ignore: [404] + }) + + try { + for await (const _result of scrollSearch) { // eslint-disable-line + t.fail('we should not be here') + } + } catch (err: any) { + t.ok(err instanceof errors.ResponseError) + t.equal(err.statusCode, 429) + t.equal(count, expectedAttempts) + } +}) + +test('Scroll search (retry throws later)', async t => { + const maxRetries = 5 + const expectedAttempts = maxRetries + 2 + let count = 0 + const MockConnection = connection.buildMockConnection({ + onRequest (params) { + count += 1 + // filter_path should not be added if is not already present + if (params.method === 'POST') { + if (params.path === '/test/_search') { + t.equal(params.querystring, 'scroll=1m') + } else { + // @ts-expect-error + t.equal(JSON.parse(params.body).scroll, '1m') + } + } + if (count > 1) { + return { body: {}, statusCode: 429 } + } + return { + statusCode: 200, + body: { + _scroll_id: 'id', + count, + hits: { + hits: [ + { _source: { one: 'one' } }, + { _source: { two: 'two' } }, + { _source: { three: 'three' } } + ] + } + } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection, + maxRetries + }) + + const scrollSearch = client.helpers.scrollSearch({ + index: 'test', + query: { match_all: {} } + }, { + wait: 10 + }) + + try { + for await (const result of scrollSearch) { // eslint-disable-line + // @ts-expect-error + t.equal(result.body.count, count) + } + } catch (err: any) { + t.ok(err instanceof errors.ResponseError) + t.equal(err.statusCode, 429) + t.equal(count, expectedAttempts) + } +}) + +test('Scroll search documents', async t => { + let count = 0 + const MockConnection = connection.buildMockConnection({ + onRequest (params) { + if (count === 0) { + t.equal(params.querystring, 'filter_path=hits.hits._source%2C_scroll_id&scroll=1m') + } else { + if (params.method !== 'DELETE') { + t.equal(params.body, '{"scroll":"1m","scroll_id":"id"}') + } + } + return { + body: { + _scroll_id: 'id', + count, + hits: { + hits: count === 3 + ? [] + : [ + { _source: { val: 1 * count } }, + { _source: { val: 2 * count } }, + { _source: { val: 3 * count } } + ] + } + } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + const scrollSearch = client.helpers.scrollDocuments({ + index: 'test', + query: { match_all: {} } + }) + + let n = 1 + for await (const hit of scrollSearch) { + t.same(hit, { val: n * count }) + n += 1 + if (n === 4) { + count += 1 + n = 1 + } + } +}) + +test('Should not retry if maxRetries = 0', async t => { + const maxRetries = 0 + const expectedAttempts = 1 + let count = 0 + const MockConnection = connection.buildMockConnection({ + onRequest (_params) { + count += 1 + return { body: {}, statusCode: 429 } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection, + maxRetries + }) + + const scrollSearch = client.helpers.scrollSearch({ + index: 'test', + query: { match_all: {} } + }, { + wait: 10, + ignore: [404] + }) + + try { + for await (const _result of scrollSearch) { // eslint-disable-line + t.fail('we should not be here') + } + } catch (err: any) { + t.ok(err instanceof errors.ResponseError) + t.equal(err.statusCode, 429) + t.equal(count, expectedAttempts) + } +}) + +test('Fix querystring for scroll search', async t => { + let count = 0 + const MockConnection = connection.buildMockConnection({ + onRequest (params) { + if (count === 0) { + t.equal(params.querystring, 'scroll=1m') + } else { + if (params.method !== 'DELETE') { + if (params.method === 'POST') { + if (params.path === '/test/_search') { + t.equal(params.querystring, 'scroll=1m') + } else { + // @ts-expect-error + t.equal(JSON.parse(params.body).scroll, '1m') + } + } + } + } + return { + body: { + _scroll_id: 'id', + hits: { + hits: count === 3 + ? [] + : [ + { _source: { val: count } } + ] + } + } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + const scrollSearch = client.helpers.scrollSearch({ + index: 'test', + size: 1, + query: { match_all: {} } + }) + + for await (const response of scrollSearch) { + t.equal(response.body.hits.hits.length, 1) + count += 1 + } +}) diff --git a/test/unit/helpers/search.test.js b/test/unit/helpers/search.test.js deleted file mode 100644 index e1edbd48d..000000000 --- a/test/unit/helpers/search.test.js +++ /dev/null @@ -1,133 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -const { test } = require('tap') -const { Client } = require('../../../') -const { connection } = require('../../utils') - -test('Search should have an additional documents property', async t => { - const MockConnection = connection.buildMockConnection({ - onRequest (params) { - t.strictEqual(params.querystring, 'filter_path=hits.hits._source') - return { - body: { - hits: { - hits: [ - { _source: { one: 'one' } }, - { _source: { two: 'two' } }, - { _source: { three: 'three' } } - ] - } - } - } - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - const result = await client.helpers.search({ - index: 'test', - body: { foo: 'bar' } - }) - t.deepEqual(result, [ - { one: 'one' }, - { two: 'two' }, - { three: 'three' } - ]) -}) - -test('kGetHits fallback', async t => { - const MockConnection = connection.buildMockConnection({ - onRequest (params) { - t.strictEqual(params.querystring, 'filter_path=hits.hits._source') - return { body: {} } - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - const result = await client.helpers.search({ - index: 'test', - body: { foo: 'bar' } - }) - t.deepEqual(result, []) -}) - -test('Merge filter paths (snake_case)', async t => { - const MockConnection = connection.buildMockConnection({ - onRequest (params) { - t.strictEqual(params.querystring, 'filter_path=foo%2Chits.hits._source') - return { - body: { - hits: { - hits: [ - { _source: { one: 'one' } }, - { _source: { two: 'two' } }, - { _source: { three: 'three' } } - ] - } - } - } - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - const result = await client.helpers.search({ - index: 'test', - filter_path: 'foo', - body: { foo: 'bar' } - }) - t.deepEqual(result, [ - { one: 'one' }, - { two: 'two' }, - { three: 'three' } - ]) -}) - -test('Merge filter paths (camelCase)', async t => { - const MockConnection = connection.buildMockConnection({ - onRequest (params) { - t.strictEqual(params.querystring, 'filter_path=foo%2Chits.hits._source') - return { - body: { - hits: { - hits: [ - { _source: { one: 'one' } }, - { _source: { two: 'two' } }, - { _source: { three: 'three' } } - ] - } - } - } - } - }) - - const client = new Client({ - node: 'http://localhost:9200/', - Connection: MockConnection - }) - - const result = await client.helpers.search({ - index: 'test', - filterPath: 'foo', - body: { foo: 'bar' } - }) - t.deepEqual(result, [ - { one: 'one' }, - { two: 'two' }, - { three: 'three' } - ]) -}) diff --git a/test/unit/helpers/search.test.ts b/test/unit/helpers/search.test.ts new file mode 100644 index 000000000..8eddde16a --- /dev/null +++ b/test/unit/helpers/search.test.ts @@ -0,0 +1,97 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +import { test } from 'tap' +import { Client } from '../../../' +import { connection } from '../../utils' + +test('Search should have an additional documents property', async t => { + const MockConnection = connection.buildMockConnection({ + onRequest (params) { + t.equal(params.querystring, 'filter_path=hits.hits._id%2Chits.hits._source') + return { + body: { + hits: { + hits: [ + { _id: '1', _source: { one: 'one' } }, + { _id: '2', _source: { two: 'two' } }, + { _id: '3', _source: { three: 'three' } } + ] + } + } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + const result = await client.helpers.search({ + index: 'test', + query: { match_all: {} } + }) + t.same(result, [ + { _id: '1', one: 'one' }, + { _id: '2', two: 'two' }, + { _id: '3', three: 'three' } + ]) +}) + +test('kGetHits fallback', async t => { + const MockConnection = connection.buildMockConnection({ + onRequest (params) { + t.equal(params.querystring, 'filter_path=hits.hits._id%2Chits.hits._source') + return { body: {} } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + const result = await client.helpers.search({ + index: 'test', + query: { match_all: {} } + }) + t.same(result, []) +}) + +test('Merge filter paths (snake_case)', async t => { + const MockConnection = connection.buildMockConnection({ + onRequest (params) { + t.equal(params.querystring, 'filter_path=foo%2Chits.hits._id%2Chits.hits._source') + return { + body: { + hits: { + hits: [ + { _id: '1', _source: { one: 'one' } }, + { _id: '2', _source: { two: 'two' } }, + { _id: '3', _source: { three: 'three' } } + ] + } + } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + Connection: MockConnection + }) + + const result = await client.helpers.search({ + index: 'test', + filter_path: 'foo', + query: { match_all: {} } + }) + t.same(result, [ + { _id: '1', one: 'one' }, + { _id: '2', two: 'two' }, + { _id: '3', three: 'three' } + ]) +}) diff --git a/test/unit/selectors.test.js b/test/unit/selectors.test.js deleted file mode 100644 index 2cfd1687f..000000000 --- a/test/unit/selectors.test.js +++ /dev/null @@ -1,27 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -const { test } = require('tap') -const { roundRobinSelector, randomSelector } = require('../../lib/Transport').internals - -test('RoundRobinSelector', t => { - const selector = roundRobinSelector() - const arr = [0, 1, 2, 3, 4, 5] - - t.plan(arr.length + 1) - for (var i = 0; i <= arr.length; i++) { - t.strictEqual( - selector(arr), - i === arr.length ? arr[0] : arr[i] - ) - } -}) - -test('RandomSelector', t => { - t.plan(1) - const arr = [0, 1, 2, 3, 4, 5] - t.type(randomSelector(arr), 'number') -}) diff --git a/test/unit/serializer.test.js b/test/unit/serializer.test.js deleted file mode 100644 index 329762785..000000000 --- a/test/unit/serializer.test.js +++ /dev/null @@ -1,144 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -const { test } = require('tap') -const { stringify } = require('querystring') -const Serializer = require('../../lib/Serializer') -const { SerializationError, DeserializationError } = require('../../lib/errors') - -test('Basic', t => { - t.plan(2) - const s = new Serializer() - const obj = { hello: 'world' } - const json = JSON.stringify(obj) - t.strictEqual(s.serialize(obj), json) - t.deepEqual(s.deserialize(json), obj) -}) - -test('ndserialize', t => { - t.plan(1) - const s = new Serializer() - const obj = [ - { hello: 'world' }, - { winter: 'is coming' }, - { you_know: 'for search' } - ] - t.strictEqual( - s.ndserialize(obj), - JSON.stringify(obj[0]) + '\n' + - JSON.stringify(obj[1]) + '\n' + - JSON.stringify(obj[2]) + '\n' - ) -}) - -test('ndserialize (strings)', t => { - t.plan(1) - const s = new Serializer() - const obj = [ - JSON.stringify({ hello: 'world' }), - JSON.stringify({ winter: 'is coming' }), - JSON.stringify({ you_know: 'for search' }) - ] - t.strictEqual( - s.ndserialize(obj), - obj[0] + '\n' + - obj[1] + '\n' + - obj[2] + '\n' - ) -}) - -test('qserialize', t => { - t.plan(1) - const s = new Serializer() - const obj = { - hello: 'world', - you_know: 'for search' - } - - t.strictEqual( - s.qserialize(obj), - stringify(obj) - ) -}) - -test('qserialize (array)', t => { - t.plan(1) - const s = new Serializer() - const obj = { - hello: 'world', - arr: ['foo', 'bar'] - } - - t.strictEqual( - s.qserialize(obj), - 'hello=world&arr=foo%2Cbar' - ) -}) - -test('qserialize (string)', t => { - t.plan(1) - const s = new Serializer() - const obj = { - hello: 'world', - you_know: 'for search' - } - - t.strictEqual( - s.qserialize(stringify(obj)), - stringify(obj) - ) -}) - -test('qserialize (key with undefined value)', t => { - t.plan(1) - const s = new Serializer() - const obj = { - hello: 'world', - key: undefined, - foo: 'bar' - } - - t.strictEqual( - s.qserialize(obj), - 'hello=world&foo=bar' - ) -}) - -test('SerializationError', t => { - t.plan(1) - const s = new Serializer() - const obj = { hello: 'world' } - obj.o = obj - try { - s.serialize(obj) - t.fail('Should fail') - } catch (err) { - t.ok(err instanceof SerializationError) - } -}) - -test('SerializationError ndserialize', t => { - t.plan(1) - const s = new Serializer() - try { - s.ndserialize({ hello: 'world' }) - t.fail('Should fail') - } catch (err) { - t.ok(err instanceof SerializationError) - } -}) - -test('DeserializationError', t => { - t.plan(1) - const s = new Serializer() - const json = '{"hello' - try { - s.deserialize(json) - t.fail('Should fail') - } catch (err) { - t.ok(err instanceof DeserializationError) - } -}) diff --git a/test/unit/transport.test.js b/test/unit/transport.test.js deleted file mode 100644 index e3875b291..000000000 --- a/test/unit/transport.test.js +++ /dev/null @@ -1,2447 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -const { test } = require('tap') -const { URL } = require('url') -const lolex = require('lolex') -const { createGunzip } = require('zlib') -const os = require('os') -const intoStream = require('into-stream') -const { - buildServer, - connection: { MockConnection, MockConnectionTimeout, MockConnectionError } -} = require('../utils') -const { - NoLivingConnectionsError, - SerializationError, - DeserializationError, - TimeoutError, - ResponseError, - ConnectionError, - ConfigurationError, - RequestAbortedError -} = require('../../lib/errors') - -const ConnectionPool = require('../../lib/pool/ConnectionPool') -const Connection = require('../../lib/Connection') -const Serializer = require('../../lib/Serializer') -const Transport = require('../../lib/Transport') - -test('Basic', t => { - t.plan(2) - function handler (req, res) { - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { body }) => { - t.error(err) - t.deepEqual(body, { hello: 'world' }) - server.stop() - }) - }) -}) - -test('Basic (promises support)', t => { - t.plan(1) - - const pool = new ConnectionPool({ Connection: MockConnection }) - pool.addConnection('http://localhost:9200/') - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - - transport - .request({ - method: 'GET', - path: '/hello' - }) - .then(({ body }) => { - t.deepEqual(body, { hello: 'world' }) - }) - .catch(t.fail) -}) - -test('Basic - failing (promises support)', t => { - t.plan(1) - - const pool = new ConnectionPool({ Connection: MockConnectionTimeout }) - pool.addConnection('http://localhost:9200/') - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - - transport - .request({ - method: 'GET', - path: '/hello' - }) - .catch(err => { - t.ok(err instanceof TimeoutError) - }) -}) - -test('Basic (options + promises support)', t => { - t.plan(1) - - const pool = new ConnectionPool({ Connection: MockConnection }) - pool.addConnection('http://localhost:9200/') - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - - transport - .request({ - method: 'GET', - path: '/hello' - }, { - requestTimeout: 1000 - }) - .then(({ body }) => { - t.deepEqual(body, { hello: 'world' }) - }) - .catch(t.fail) -}) - -test('Send POST', t => { - t.plan(4) - function handler (req, res) { - t.match(req.headers, { - 'content-type': 'application/json', - 'content-length': '17' - }) - var json = '' - req.setEncoding('utf8') - req.on('data', chunk => { json += chunk }) - req.on('error', err => t.fail(err)) - req.on('end', () => { - t.deepEqual(JSON.parse(json), { hello: 'world' }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - }) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - - transport.request({ - method: 'POST', - path: '/hello', - body: { hello: 'world' } - }, (err, { body }) => { - t.error(err) - t.deepEqual(body, { hello: 'world' }) - server.stop() - }) - }) -}) - -test('Send POST (ndjson)', t => { - t.plan(4) - - const bulkBody = [ - { hello: 'world' }, - { winter: 'is coming' }, - { you_know: 'for search' } - ] - - function handler (req, res) { - t.match(req.headers, { - 'content-type': 'application/x-ndjson', - 'content-length': '67' - }) - var json = '' - req.setEncoding('utf8') - req.on('data', chunk => { json += chunk }) - req.on('error', err => t.fail(err)) - req.on('end', () => { - t.strictEqual( - json, - JSON.stringify(bulkBody[0]) + '\n' + - JSON.stringify(bulkBody[1]) + '\n' + - JSON.stringify(bulkBody[2]) + '\n' - ) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - }) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - - transport.request({ - method: 'POST', - path: '/hello', - bulkBody - }, (err, { body }) => { - t.error(err) - t.deepEqual(body, { hello: 'world' }) - server.stop() - }) - }) -}) - -test('Send stream', t => { - t.plan(4) - function handler (req, res) { - t.match(req.headers, { - 'content-type': 'application/json' - }) - var json = '' - req.setEncoding('utf8') - req.on('data', chunk => { json += chunk }) - req.on('error', err => t.fail(err)) - req.on('end', () => { - t.deepEqual(JSON.parse(json), { hello: 'world' }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - }) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - - transport.request({ - method: 'POST', - path: '/hello', - body: intoStream(JSON.stringify({ hello: 'world' })) - }, (err, { body }) => { - t.error(err) - t.deepEqual(body, { hello: 'world' }) - server.stop() - }) - }) -}) - -test('Send stream (bulkBody)', t => { - t.plan(4) - function handler (req, res) { - t.match(req.headers, { - 'content-type': 'application/x-ndjson' - }) - var json = '' - req.setEncoding('utf8') - req.on('data', chunk => { json += chunk }) - req.on('error', err => t.fail(err)) - req.on('end', () => { - t.deepEqual(JSON.parse(json), { hello: 'world' }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - }) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - - transport.request({ - method: 'POST', - path: '/hello', - bulkBody: intoStream(JSON.stringify({ hello: 'world' })) - }, (err, { body }) => { - t.error(err) - t.deepEqual(body, { hello: 'world' }) - server.stop() - }) - }) -}) - -test('Not JSON payload from server', t => { - t.plan(2) - function handler (req, res) { - res.setHeader('Content-Type', 'text/plain') - res.end('hello!') - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { body }) => { - t.error(err) - t.strictEqual(body, 'hello!') - server.stop() - }) - }) -}) - -test('NoLivingConnectionsError (null connection)', t => { - t.plan(3) - const pool = new ConnectionPool({ Connection }) - pool.addConnection('http://localhost:9200/') - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false, - nodeSelector (connections) { - t.is(connections.length, 1) - t.true(connections[0] instanceof Connection) - return null - } - }) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { body }) => { - t.ok(err instanceof NoLivingConnectionsError) - }) -}) - -test('NoLivingConnectionsError (undefined connection)', t => { - t.plan(3) - const pool = new ConnectionPool({ Connection }) - pool.addConnection('http://localhost:9200/') - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false, - nodeSelector (connections) { - t.is(connections.length, 1) - t.true(connections[0] instanceof Connection) - return undefined - } - }) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { body }) => { - t.ok(err instanceof NoLivingConnectionsError) - }) -}) - -test('SerializationError', t => { - t.plan(1) - const pool = new ConnectionPool({ Connection }) - pool.addConnection('http://localhost:9200/') - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - - const body = { hello: 'world' } - body.o = body - transport.request({ - method: 'POST', - path: '/hello', - body - }, (err, { body }) => { - t.ok(err instanceof SerializationError) - }) -}) - -test('SerializationError (bulk)', t => { - t.plan(1) - const pool = new ConnectionPool({ Connection }) - pool.addConnection('http://localhost:9200/') - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - - const bulkBody = { hello: 'world' } - bulkBody.o = bulkBody - transport.request({ - method: 'POST', - path: '/hello', - bulkBody - }, (err, { body }) => { - t.ok(err instanceof SerializationError) - }) -}) - -test('DeserializationError', t => { - t.plan(1) - function handler (req, res) { - res.setHeader('Content-Type', 'application/json;utf=8') - res.end('{"hello)') - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { body }) => { - t.ok(err instanceof DeserializationError) - server.stop() - }) - }) -}) - -test('TimeoutError (should call markDead on the failing connection)', t => { - t.plan(2) - - class CustomConnectionPool extends ConnectionPool { - markDead (connection) { - t.strictEqual(connection.id, 'node1') - super.markDead(connection) - } - } - - const pool = new CustomConnectionPool({ Connection: MockConnectionTimeout }) - pool.addConnection({ - url: new URL('http://localhost:9200/'), - id: 'node1' - }) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 0, - requestTimeout: 500, - sniffInterval: false, - sniffOnStart: false - }) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { body }) => { - t.ok(err instanceof TimeoutError) - }) -}) - -test('ConnectionError (should call markDead on the failing connection)', t => { - t.plan(2) - - class CustomConnectionPool extends ConnectionPool { - markDead (connection) { - t.strictEqual(connection.id, 'node1') - super.markDead(connection) - } - } - - const pool = new CustomConnectionPool({ Connection: MockConnectionError }) - pool.addConnection({ - url: new URL('http://localhost:9200/'), - id: 'node1' - }) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 0, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { body }) => { - t.ok(err instanceof ConnectionError) - }) -}) - -test('Retry mechanism', t => { - t.plan(2) - - var count = 0 - function handler (req, res) { - res.setHeader('Content-Type', 'application/json;utf=8') - if (count > 0) { - res.end(JSON.stringify({ hello: 'world' })) - } else { - res.statusCode = 504 - res.end(JSON.stringify({ error: true })) - } - count++ - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection([{ - url: new URL(`http://localhost:${port}`), - id: 'node1' - }, { - url: new URL(`http://localhost:${port}`), - id: 'node2' - }, { - url: new URL(`http://localhost:${port}`), - id: 'node3' - }]) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 1, - sniffInterval: false, - sniffOnStart: false - }) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { body }) => { - t.error(err) - t.deepEqual(body, { hello: 'world' }) - server.stop() - }) - }) -}) - -test('Should not retry if the body is a stream', t => { - t.plan(2) - - var count = 0 - function handler (req, res) { - count++ - res.setHeader('Content-Type', 'application/json;utf=8') - res.statusCode = 504 - res.end(JSON.stringify({ error: true })) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection([{ - url: new URL(`http://localhost:${port}`), - id: 'node1' - }, { - url: new URL(`http://localhost:${port}`), - id: 'node2' - }, { - url: new URL(`http://localhost:${port}`), - id: 'node3' - }]) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 1, - sniffInterval: false, - sniffOnStart: false - }) - - transport.request({ - method: 'POST', - path: '/hello', - body: intoStream(JSON.stringify({ hello: 'world' })) - }, (err, { body }) => { - t.ok(err instanceof ResponseError) - t.strictEqual(count, 1) - server.stop() - }) - }) -}) - -test('Custom retry mechanism', t => { - t.plan(2) - - var count = 0 - function handler (req, res) { - res.setHeader('Content-Type', 'application/json;utf=8') - if (count > 0) { - res.end(JSON.stringify({ hello: 'world' })) - } else { - res.statusCode = 504 - res.end(JSON.stringify({ error: true })) - } - count++ - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection([{ - url: new URL(`http://localhost:${port}`), - id: 'node1' - }, { - url: new URL(`http://localhost:${port}`), - id: 'node2' - }, { - url: new URL(`http://localhost:${port}`), - id: 'node3' - }]) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 0, - sniffInterval: false, - sniffOnStart: false - }) - - transport.request({ - method: 'GET', - path: '/hello' - }, { - maxRetries: 1 - }, (err, { body }) => { - t.error(err) - t.deepEqual(body, { hello: 'world' }) - server.stop() - }) - }) -}) - -test('Should not retry on 429', t => { - t.plan(3) - - var count = 0 - function handler (req, res) { - t.strictEqual(count++, 0) - res.statusCode = 429 - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection([{ - url: new URL(`http://localhost:${port}`), - id: 'node1' - }, { - url: new URL(`http://localhost:${port}`), - id: 'node2' - }, { - url: new URL(`http://localhost:${port}`), - id: 'node3' - }]) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 5, - requestTimeout: 250, - sniffInterval: false, - sniffOnStart: false - }) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, result) => { - t.ok(err) - t.strictEqual(err.statusCode, 429) - server.stop() - }) - }) -}) - -test('Should call markAlive with a successful response', t => { - t.plan(3) - - class CustomConnectionPool extends ConnectionPool { - markAlive (connection) { - t.strictEqual(connection.id, 'node1') - super.markAlive(connection) - } - } - - const pool = new CustomConnectionPool({ Connection: MockConnection }) - pool.addConnection({ - url: new URL('http://localhost:9200/'), - id: 'node1' - }) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { body }) => { - t.error(err) - t.deepEqual(body, { hello: 'world' }) - }) -}) - -test('Should call resurrect on every request', t => { - t.plan(5) - - class CustomConnectionPool extends ConnectionPool { - resurrect ({ now, requestId, name }) { - t.type(now, 'number') - t.type(requestId, 'number') - t.type(name, 'string') - } - } - - const pool = new CustomConnectionPool({ Connection: MockConnection }) - pool.addConnection({ - url: new URL('http://localhost:9200/'), - id: 'node1' - }) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false, - name: 'elasticsearch-js' - }) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { body }) => { - t.error(err) - t.deepEqual(body, { hello: 'world' }) - }) -}) - -test('Should return a request aborter utility', t => { - t.plan(1) - - const pool = new ConnectionPool({ Connection: MockConnection }) - pool.addConnection({ - url: new URL('http://localhost:9200/'), - id: 'node1' - }) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - - const request = transport.request({ - method: 'GET', - path: '/hello' - }, (err, result) => { - t.ok(err instanceof RequestAbortedError) - }) - - request.abort() -}) - -test('Retry mechanism and abort', t => { - t.plan(1) - - function handler (req, res) { - setTimeout(() => { - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - }, 1000) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection([{ - url: new URL(`http://localhost:${port}`), - id: 'node1' - }, { - url: new URL(`http://localhost:${port}`), - id: 'node2' - }, { - url: new URL(`http://localhost:${port}`), - id: 'node3' - }]) - - var count = 0 - const transport = new Transport({ - emit: event => { - if (event === 'request' && count++ > 0) { - request.abort() - } - }, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 2, - requestTimeout: 100, - sniffInterval: false, - sniffOnStart: false - }) - - const request = transport.request({ - method: 'GET', - path: '/hello' - }, (err, result) => { - t.ok(err instanceof RequestAbortedError) - server.stop() - }) - }) -}) - -test('Abort a request with the promise API', t => { - t.plan(1) - - const pool = new ConnectionPool({ Connection: MockConnection }) - pool.addConnection({ - url: new URL('http://localhost:9200/'), - id: 'node1' - }) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - - const request = transport.request({ - method: 'GET', - path: '/hello' - }) - - request - .then(() => { - t.fail('Should not be called') - }) - .catch(err => { - t.ok(err instanceof RequestAbortedError) - }) - - request.abort() -}) - -test('ResponseError', t => { - t.plan(3) - - function handler (req, res) { - res.statusCode = 500 - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ status: 500 })) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { body }) => { - t.ok(err instanceof ResponseError) - t.deepEqual(err.body, { status: 500 }) - t.strictEqual(err.statusCode, 500) - server.stop() - }) - }) -}) - -test('Override requestTimeout', t => { - t.plan(2) - function handler (req, res) { - setTimeout(() => { - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - }, 1000) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 500, - sniffInterval: false, - sniffOnStart: false - }) - - transport.request({ - method: 'GET', - path: '/hello' - }, { - requestTimeout: 2000 - }, (err, { body }) => { - t.error(err) - t.deepEqual(body, { hello: 'world' }) - server.stop() - }) - }) -}) - -test('sniff', t => { - t.test('sniffOnStart', t => { - t.plan(1) - - class MyTransport extends Transport { - sniff (opts) { - t.strictEqual(opts.reason, Transport.sniffReasons.SNIFF_ON_START) - } - } - - const pool = new ConnectionPool({ Connection }) - pool.addConnection('http://localhost:9200/') - - // eslint-disable-next-line - new MyTransport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: true, - sniffEndpoint: '/sniff' - }) - }) - - t.test('sniffOnConnectionFault', t => { - t.plan(2) - - class MyTransport extends Transport { - sniff (opts) { - t.strictEqual(opts.reason, Transport.sniffReasons.SNIFF_ON_CONNECTION_FAULT) - } - } - - const pool = new ConnectionPool({ Connection: MockConnectionTimeout }) - pool.addConnection('http://localhost:9200/') - - const transport = new MyTransport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 0, - requestTimeout: 500, - sniffInterval: false, - sniffOnConnectionFault: true, - sniffEndpoint: '/sniff' - }) - - transport.request({ - method: 'GET', - path: '/' - }, (err, { body }) => { - t.ok(err instanceof TimeoutError) - }) - }) - - t.test('sniffInterval', t => { - t.plan(6) - - const clock = lolex.install({ toFake: ['Date'] }) - t.teardown(() => clock.uninstall()) - - class MyTransport extends Transport { - sniff (opts) { - t.strictEqual(opts.reason, Transport.sniffReasons.SNIFF_INTERVAL) - } - } - - const pool = new ConnectionPool({ Connection: MockConnection }) - pool.addConnection('http://localhost:9200/') - - const transport = new MyTransport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 3000, - sniffInterval: 1, - sniffEndpoint: '/sniff' - }) - - const params = { method: 'GET', path: '/' } - clock.tick(100) - transport.request(params, t.error) - - clock.tick(200) - transport.request(params, t.error) - - clock.tick(300) - transport.request(params, t.error) - }) - - t.test('errored', t => { - t.plan(1) - - class CustomConnectionPool extends ConnectionPool { - nodesToHost () { - t.fail('This should not be called') - } - } - - const pool = new CustomConnectionPool({ Connection: MockConnectionError }) - pool.addConnection('http://localhost:9200/') - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 0, - requestTimeout: 30000, - sniffInterval: false, - sniffEndpoint: '/sniff' - }) - - transport.sniff((err, hosts) => { - t.ok(err instanceof ConnectionError) - }) - }) - - t.end() -}) - -test(`Should mark as dead connections where the statusCode is 502/3/4 - and return a ResponseError if there are no more attempts`, t => { - ;[502, 503, 504].forEach(runTest) - - function runTest (statusCode) { - t.test(statusCode, t => { - t.plan(3) - - class CustomConnectionPool extends ConnectionPool { - markDead (connection) { - t.ok('called') - super.markDead(connection) - } - } - - const pool = new CustomConnectionPool({ Connection: MockConnection }) - pool.addConnection('http://localhost:9200/') - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 0, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - - transport.request({ - method: 'GET', - path: `/${statusCode}` - }, (err, { body }) => { - t.ok(err instanceof ResponseError) - t.match(err, { - body: { hello: 'world' }, - headers: { 'content-type': 'application/json;utf=8' }, - statusCode: statusCode - }) - }) - }) - } - - t.end() -}) - -test('Should retry the request if the statusCode is 502/3/4', t => { - ;[502, 503, 504].forEach(runTest) - - function runTest (statusCode) { - t.test(statusCode, t => { - t.plan(3) - - var first = true - function handler (req, res) { - if (first) { - first = false - res.statusCode = statusCode - } - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - class CustomConnectionPool extends ConnectionPool { - markDead (connection) { - t.ok('called') - } - } - - buildServer(handler, ({ port }, server) => { - const pool = new CustomConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 1, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { body }) => { - t.error(err) - t.deepEqual(body, { hello: 'world' }) - server.stop() - }) - }) - }) - } - - t.end() -}) - -test('Ignore status code', t => { - t.plan(4) - - const pool = new ConnectionPool({ Connection: MockConnection }) - pool.addConnection('http://localhost:9200/') - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - - transport.request({ - method: 'GET', - path: '/404' - }, { - ignore: [404] - }, (err, { body }) => { - t.error(err) - t.deepEqual(body, { hello: 'world' }) - }) - - transport.request({ - method: 'GET', - path: '/404' - }, (err, { body }) => { - t.ok(err instanceof ResponseError) - }) - - transport.request({ - method: 'GET', - path: '/404' - }, { - ignore: [403, 405] - }, (err, { body }) => { - t.ok(err instanceof ResponseError) - }) -}) - -test('Should serialize the querystring', t => { - t.plan(2) - - function handler (req, res) { - t.strictEqual(req.url, '/hello?hello=world&you_know=for%20search') - res.end('ok') - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - - transport.request({ - method: 'GET', - path: '/hello', - querystring: { - hello: 'world', - you_know: 'for search' - } - }, (err, { body }) => { - t.error(err) - server.stop() - }) - }) -}) - -test('timeout option', t => { - function handler (req, res) { - setTimeout(() => { - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - }, 1000) - } - - t.test('as number', t => { - t.test('global', t => { - t.plan(1) - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection({ - url: new URL(`http://localhost:${port}`), - id: 'node1' - }) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 0, - requestTimeout: 500, - sniffInterval: false, - sniffOnStart: false - }) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { body }) => { - t.ok(err instanceof TimeoutError) - server.stop() - }) - }) - }) - - t.test('custom', t => { - t.plan(1) - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection({ - url: new URL(`http://localhost:${port}`), - id: 'node1' - }) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 0, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - - transport.request({ - method: 'GET', - path: '/hello' - }, { - requestTimeout: 500 - }, (err, { body }) => { - t.ok(err instanceof TimeoutError) - server.stop() - }) - }) - }) - - t.end() - }) - - t.test('as string', t => { - t.test('global', t => { - t.plan(1) - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection({ - url: new URL(`http://localhost:${port}`), - id: 'node1' - }) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 0, - requestTimeout: '0.5s', - sniffInterval: false, - sniffOnStart: false - }) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { body }) => { - t.ok(err instanceof TimeoutError) - server.stop() - }) - }) - }) - - t.test('custom', t => { - t.plan(1) - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection({ - url: new URL(`http://localhost:${port}`), - id: 'node1' - }) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 0, - requestTimeout: '30s', - sniffInterval: false, - sniffOnStart: false - }) - - transport.request({ - method: 'GET', - path: '/hello' - }, { - requestTimeout: '0.5s' - }, (err, { body }) => { - t.ok(err instanceof TimeoutError) - server.stop() - }) - }) - }) - - t.end() - }) - - t.end() -}) - -test('Should cast to boolean HEAD request', t => { - t.test('2xx response', t => { - t.plan(3) - const pool = new ConnectionPool({ Connection: MockConnection }) - pool.addConnection('http://localhost:9200/') - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - - transport.request({ - method: 'HEAD', - path: '/200' - }, (err, { body, statusCode }) => { - t.error(err) - t.strictEqual(statusCode, 200) - t.strictEqual(body, true) - }) - }) - - t.test('404 response', t => { - t.plan(3) - const pool = new ConnectionPool({ Connection: MockConnection }) - pool.addConnection('http://localhost:9200/') - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - - transport.request({ - method: 'HEAD', - path: '/404' - }, (err, { body, statusCode }) => { - t.error(err) - t.strictEqual(statusCode, 404) - t.strictEqual(body, false) - }) - }) - - t.test('4xx response', t => { - t.plan(2) - - const pool = new ConnectionPool({ Connection: MockConnection }) - pool.addConnection('http://localhost:9200/') - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - - transport.request({ - method: 'HEAD', - path: '/400' - }, (err, { body, statusCode }) => { - t.ok(err instanceof ResponseError) - t.strictEqual(statusCode, 400) - }) - }) - - t.test('5xx response', t => { - t.plan(2) - const pool = new ConnectionPool({ Connection: MockConnection }) - pool.addConnection('http://localhost:9200/') - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - - transport.request({ - method: 'HEAD', - path: '/500' - }, (err, { body, statusCode }) => { - t.ok(err instanceof ResponseError) - t.strictEqual(statusCode, 500) - }) - }) - - t.end() -}) - -test('Suggest compression', t => { - t.plan(2) - function handler (req, res) { - t.match(req.headers, { - 'accept-encoding': 'gzip,deflate' - }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false, - suggestCompression: true - }) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { body }) => { - t.error(err) - server.stop() - }) - }) -}) - -test('Warning header', t => { - t.test('Single warning', t => { - t.plan(3) - - const warn = '112 - "cache down" "Wed, 21 Oct 2015 07:28:00 GMT"' - function handler (req, res) { - res.setHeader('Content-Type', 'application/json;utf=8') - res.setHeader('Warning', warn) - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { warnings }) => { - t.error(err) - t.deepEqual(warnings, [warn]) - warnings.forEach(w => t.type(w, 'string')) - server.stop() - }) - }) - }) - - t.test('Multiple warnings', t => { - t.plan(4) - - const warn1 = '112 - "cache down" "Wed, 21 Oct 2015 07:28:00 GMT"' - const warn2 = '199 agent "Error message" "2015-01-01"' - function handler (req, res) { - res.setHeader('Content-Type', 'application/json;utf=8') - res.setHeader('Warning', warn1 + ',' + warn2) - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { warnings }) => { - t.error(err) - t.deepEqual(warnings, [warn1, warn2]) - warnings.forEach(w => t.type(w, 'string')) - server.stop() - }) - }) - }) - - t.test('No warnings', t => { - t.plan(2) - - function handler (req, res) { - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { warnings }) => { - t.error(err) - t.strictEqual(warnings, null) - server.stop() - }) - }) - }) - - t.test('Multiple warnings and external warning', t => { - t.plan(5) - - const warn1 = '112 - "cache down" "Wed, 21 Oct 2015 07:28:00 GMT"' - const warn2 = '199 agent "Error message" "2015-01-01"' - function handler (req, res) { - res.setHeader('Content-Type', 'application/json;utf=8') - res.setHeader('Warning', warn1 + ',' + warn2) - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - - transport.request({ - method: 'GET', - path: '/hello' - }, { - warnings: ['winter is coming'] - }, (err, { warnings }) => { - t.error(err) - t.deepEqual(warnings, ['winter is coming', warn1, warn2]) - warnings.forEach(w => t.type(w, 'string')) - server.stop() - }) - }) - }) - - t.end() -}) - -test('asStream set to true', t => { - t.plan(3) - function handler (req, res) { - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - - transport.request({ - method: 'GET', - path: '/hello' - }, { - asStream: true - }, (err, { body, headers }) => { - t.error(err) - t.match(headers, { - connection: 'keep-alive', - 'content-type': 'application/json;utf=8' - }) - - var payload = '' - body.setEncoding('utf8') - body.on('data', chunk => { payload += chunk }) - body.on('error', err => t.fail(err)) - body.on('end', () => { - t.deepEqual(JSON.parse(payload), { hello: 'world' }) - server.stop() - }) - }) - }) -}) - -test('Compress request', t => { - t.test('gzip as request option', t => { - t.plan(4) - function handler (req, res) { - t.match(req.headers, { - 'content-type': 'application/json', - 'content-encoding': 'gzip' - }) - var json = '' - req - .pipe(createGunzip()) - .on('data', chunk => { json += chunk }) - .on('error', err => t.fail(err)) - .on('end', () => { - t.deepEqual(JSON.parse(json), { you_know: 'for search' }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ you_know: 'for search' })) - }) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - - transport.request({ - method: 'POST', - path: '/hello', - body: { you_know: 'for search' } - }, { - compression: 'gzip' - }, (err, { body }) => { - t.error(err) - t.deepEqual(body, { you_know: 'for search' }) - server.stop() - }) - }) - }) - - t.test('gzip as transport option', t => { - t.plan(4) - function handler (req, res) { - t.match(req.headers, { - 'content-type': 'application/json', - 'content-encoding': 'gzip' - }) - var json = '' - req - .pipe(createGunzip()) - .on('data', chunk => { json += chunk }) - .on('error', err => t.fail(err)) - .on('end', () => { - t.deepEqual(JSON.parse(json), { you_know: 'for search' }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ you_know: 'for search' })) - }) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false, - compression: 'gzip' - }) - - transport.request({ - method: 'POST', - path: '/hello', - body: { you_know: 'for search' } - }, (err, { body }) => { - t.error(err) - t.deepEqual(body, { you_know: 'for search' }) - server.stop() - }) - }) - }) - - t.test('gzip stream body', t => { - t.plan(4) - function handler (req, res) { - t.match(req.headers, { - 'content-type': 'application/json', - 'content-encoding': 'gzip' - }) - var json = '' - req - .pipe(createGunzip()) - .on('data', chunk => { json += chunk }) - .on('error', err => t.fail(err)) - .on('end', () => { - t.deepEqual(JSON.parse(json), { you_know: 'for search' }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ you_know: 'for search' })) - }) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - - transport.request({ - method: 'POST', - path: '/hello', - body: intoStream(JSON.stringify({ you_know: 'for search' })) - }, { - compression: 'gzip' - }, (err, { body }) => { - t.error(err) - t.deepEqual(body, { you_know: 'for search' }) - server.stop() - }) - }) - }) - - t.test('Should throw on invalid compression value', t => { - t.plan(2) - - try { - new Transport({ // eslint-disable-line - emit: () => {}, - connectionPool: new ConnectionPool({ Connection }), - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false, - compression: 'deflate' - }) - t.fail('Should throw') - } catch (err) { - t.true(err instanceof ConfigurationError) - t.is(err.message, 'Invalid compression: \'deflate\'') - } - }) - - t.test('Should skip the compression for empty strings/null/undefined', t => { - t.plan(9) - - function handler (req, res) { - t.strictEqual(req.headers['content-encoding'], undefined) - t.strictEqual(req.headers['content-type'], undefined) - res.end() - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - compression: 'gzip', - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - - transport.request({ - method: 'DELETE', - path: '/hello', - body: '' - }, (err, { body }) => { - t.error(err) - transport.request({ - method: 'GET', - path: '/hello', - body: null - }, (err, { body }) => { - t.error(err) - transport.request({ - method: 'GET', - path: '/hello', - body: undefined - }, (err, { body }) => { - t.error(err) - server.stop() - }) - }) - }) - }) - }) - - t.test('Retry a gzipped body', t => { - t.plan(7) - - var count = 0 - function handler (req, res) { - t.match(req.headers, { - 'content-type': 'application/json', - 'content-encoding': 'gzip' - }) - var json = '' - req - .pipe(createGunzip()) - .on('data', chunk => { json += chunk }) - .on('error', err => t.fail(err)) - .on('end', () => { - t.deepEqual(JSON.parse(json), { you_know: 'for search' }) - res.setHeader('Content-Type', 'application/json;utf=8') - if (count++ > 0) { - res.end(JSON.stringify({ you_know: 'for search' })) - } else { - setTimeout(() => { - res.end(JSON.stringify({ you_know: 'for search' })) - }, 1000) - } - }) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 250, - sniffInterval: false, - sniffOnStart: false - }) - - transport.request({ - method: 'POST', - path: '/hello', - body: { you_know: 'for search' } - }, { - compression: 'gzip' - }, (err, { body, meta }) => { - t.error(err) - t.deepEqual(body, { you_know: 'for search' }) - t.strictEqual(count, 2) - server.stop() - }) - }) - }) - - t.end() -}) - -test('Headers configuration', t => { - t.test('Global option', t => { - t.plan(3) - function handler (req, res) { - t.match(req.headers, { 'x-foo': 'bar' }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false, - headers: { - 'x-foo': 'bar' - } - }) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { body }) => { - t.error(err) - t.deepEqual(body, { hello: 'world' }) - server.stop() - }) - }) - }) - - t.test('Global option and custom option', t => { - t.plan(3) - function handler (req, res) { - t.match(req.headers, { - 'x-foo': 'bar', - 'x-baz': 'faz' - }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false, - headers: { - 'x-foo': 'bar' - } - }) - - transport.request({ - method: 'GET', - path: '/hello' - }, { - headers: { 'x-baz': 'faz' } - }, (err, { body }) => { - t.error(err) - t.deepEqual(body, { hello: 'world' }) - server.stop() - }) - }) - }) - - t.test('Custom options should override global option', t => { - t.plan(3) - function handler (req, res) { - t.match(req.headers, { 'x-foo': 'faz' }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false, - headers: { - 'x-foo': 'bar' - } - }) - - transport.request({ - method: 'GET', - path: '/hello' - }, { - headers: { 'x-foo': 'faz' } - }, (err, { body }) => { - t.error(err) - t.deepEqual(body, { hello: 'world' }) - server.stop() - }) - }) - }) - - t.end() -}) - -test('nodeFilter and nodeSelector', t => { - t.plan(4) - - const pool = new ConnectionPool({ Connection: MockConnection }) - pool.addConnection('http://localhost:9200/') - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false, - nodeFilter: () => { - t.ok('called') - return true - }, - nodeSelector: conns => { - t.ok('called') - return conns[0] - } - }) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { body }) => { - t.error(err) - t.deepEqual(body, { hello: 'world' }) - }) -}) - -test('Should accept custom querystring in the optons object', t => { - t.test('Options object', t => { - t.plan(3) - - function handler (req, res) { - t.strictEqual(req.url, '/hello?foo=bar') - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - - transport.request({ - method: 'GET', - path: '/hello' - }, { - querystring: { foo: 'bar' } - }, (err, { body }) => { - t.error(err) - t.deepEqual(body, { hello: 'world' }) - server.stop() - }) - }) - }) - - t.test('Options object and params', t => { - t.plan(3) - - function handler (req, res) { - t.strictEqual(req.url, '/hello?baz=faz&foo=bar') - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - - transport.request({ - method: 'GET', - path: '/hello', - querystring: { baz: 'faz' } - }, { - querystring: { foo: 'bar' } - }, (err, { body }) => { - t.error(err) - t.deepEqual(body, { hello: 'world' }) - server.stop() - }) - }) - }) - - t.end() -}) - -test('Should add an User-Agent header', t => { - t.plan(2) - const clientVersion = require('../../package.json').version - const userAgent = `elasticsearch-js/${clientVersion} (${os.platform()} ${os.release()}-${os.arch()}; Node.js ${process.version})` - - function handler (req, res) { - t.match(req.headers, { - 'user-agent': userAgent - }) - res.setHeader('Content-Type', 'application/json;utf=8') - res.end(JSON.stringify({ hello: 'world' })) - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { body }) => { - t.error(err) - server.stop() - }) - }) -}) - -test('Should pass request params and options to generateRequestId', t => { - t.plan(3) - - const pool = new ConnectionPool({ Connection: MockConnection }) - pool.addConnection('http://localhost:9200/') - - const params = { method: 'GET', path: '/hello' } - const options = { context: { winter: 'is coming' } } - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false, - generateRequestId: function (requestParams, requestOptions) { - t.deepEqual(requestParams, params) - t.deepEqual(requestOptions, options) - return 'id' - } - }) - - transport.request(params, options, t.error) -}) - -test('Secure json parsing', t => { - t.test('__proto__ protection', t => { - t.plan(2) - function handler (req, res) { - res.setHeader('Content-Type', 'application/json;utf=8') - res.end('{"__proto__":{"a":1}}') - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { body }) => { - t.true(err instanceof DeserializationError) - t.is(err.message, 'Object contains forbidden prototype property') - server.stop() - }) - }) - }) - - t.test('constructor protection', t => { - t.plan(2) - function handler (req, res) { - res.setHeader('Content-Type', 'application/json;utf=8') - res.end('{"constructor":{"prototype":{"bar":"baz"}}}') - } - - buildServer(handler, ({ port }, server) => { - const pool = new ConnectionPool({ Connection }) - pool.addConnection(`http://localhost:${port}`) - - const transport = new Transport({ - emit: () => {}, - connectionPool: pool, - serializer: new Serializer(), - maxRetries: 3, - requestTimeout: 30000, - sniffInterval: false, - sniffOnStart: false - }) - - transport.request({ - method: 'GET', - path: '/hello' - }, (err, { body }) => { - t.true(err instanceof DeserializationError) - t.is(err.message, 'Object contains forbidden prototype property') - server.stop() - }) - }) - }) - - t.end() -}) - -test('Lowercase headers utilty', t => { - t.plan(4) - const { lowerCaseHeaders } = Transport.internals - - t.deepEqual(lowerCaseHeaders({ - Foo: 'bar', - Faz: 'baz', - 'X-Hello': 'world' - }), { - foo: 'bar', - faz: 'baz', - 'x-hello': 'world' - }) - - t.deepEqual(lowerCaseHeaders({ - Foo: 'bar', - faz: 'baz', - 'X-hello': 'world' - }), { - foo: 'bar', - faz: 'baz', - 'x-hello': 'world' - }) - - t.strictEqual(lowerCaseHeaders(null), null) - - t.strictEqual(lowerCaseHeaders(undefined), undefined) -}) diff --git a/test/utils/MockConnection.js b/test/utils/MockConnection.js deleted file mode 100644 index d6154654e..000000000 --- a/test/utils/MockConnection.js +++ /dev/null @@ -1,164 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -const assert = require('assert') -const { Connection } = require('../../index') -const { - ConnectionError, - RequestAbortedError, - TimeoutError -} = require('../../lib/errors') -const intoStream = require('into-stream') - -class MockConnection extends Connection { - request (params, callback) { - var aborted = false - const stream = intoStream(JSON.stringify({ hello: 'world' })) - stream.statusCode = setStatusCode(params.path) - stream.headers = { - 'content-type': 'application/json;utf=8', - date: new Date().toISOString(), - connection: 'keep-alive', - 'content-length': '17' - } - process.nextTick(() => { - if (!aborted) { - callback(null, stream) - } else { - callback(new RequestAbortedError(), null) - } - }) - return { - abort: () => { aborted = true } - } - } -} - -class MockConnectionTimeout extends Connection { - request (params, callback) { - var aborted = false - process.nextTick(() => { - if (!aborted) { - callback(new TimeoutError('Request timed out', params), null) - } else { - callback(new RequestAbortedError(), null) - } - }) - return { - abort: () => { aborted = true } - } - } -} - -class MockConnectionError extends Connection { - request (params, callback) { - var aborted = false - process.nextTick(() => { - if (!aborted) { - callback(new ConnectionError('Kaboom'), null) - } else { - callback(new RequestAbortedError(), null) - } - }) - return { - abort: () => { aborted = true } - } - } -} - -class MockConnectionSniff extends Connection { - request (params, callback) { - var aborted = false - const sniffResult = { - nodes: { - 'node-1': { - http: { - publish_address: 'localhost:9200' - }, - roles: ['master', 'data', 'ingest'] - }, - 'node-2': { - http: { - publish_address: 'localhost:9201' - }, - roles: ['master', 'data', 'ingest'] - } - } - } - const stream = intoStream(JSON.stringify(sniffResult)) - stream.statusCode = setStatusCode(params.path) - stream.headers = { - 'content-type': 'application/json;utf=8', - date: new Date().toISOString(), - connection: 'keep-alive', - 'content-length': '205' - } - process.nextTick(() => { - if (!aborted) { - if (params.headers.timeout) { - callback(new TimeoutError('Request timed out', params), null) - } else { - callback(null, stream) - } - } else { - callback(new RequestAbortedError(), null) - } - }) - return { - abort: () => { aborted = true } - } - } -} - -function buildMockConnection (opts) { - assert(opts.onRequest, 'Missing required onRequest option') - - class MockConnection extends Connection { - request (params, callback) { - var { body, statusCode } = opts.onRequest(params) - if (typeof body !== 'string') { - body = JSON.stringify(body) - } - var aborted = false - const stream = intoStream(body) - stream.statusCode = statusCode || 200 - stream.headers = { - 'content-type': 'application/json;utf=8', - date: new Date().toISOString(), - connection: 'keep-alive', - 'content-length': Buffer.byteLength(body) - } - process.nextTick(() => { - if (!aborted) { - callback(null, stream) - } else { - callback(new RequestAbortedError(), null) - } - }) - return { - abort: () => { aborted = true } - } - } - } - - return MockConnection -} - -function setStatusCode (path) { - const statusCode = Number(path.slice(1)) - if (Number.isInteger(statusCode)) { - return statusCode - } - return 200 -} - -module.exports = { - MockConnection, - MockConnectionTimeout, - MockConnectionError, - MockConnectionSniff, - buildMockConnection -} diff --git a/test/utils/MockConnection.ts b/test/utils/MockConnection.ts new file mode 100644 index 000000000..c1bd25873 --- /dev/null +++ b/test/utils/MockConnection.ts @@ -0,0 +1,137 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +import assert from 'assert' +import * as http from 'http' +import { + BaseConnection, + ConnectionRequestParams, + ConnectionRequestOptions, + ConnectionRequestResponse, + errors, + ConnectionRequestOptionsAsStream, + ConnectionRequestResponseAsStream +} from '@elastic/transport' +const { + ConnectionError, + TimeoutError +} = errors + +export class MockConnection extends BaseConnection { + async request (params: ConnectionRequestParams, options: ConnectionRequestOptions): Promise + async request (params: ConnectionRequestParams, options: ConnectionRequestOptionsAsStream): Promise + async request (params: ConnectionRequestParams, options: any): Promise { + return new Promise((resolve, reject) => { + const body = JSON.stringify({ hello: 'world' }) + const statusCode = setStatusCode(params.path) + const headers = { + 'content-type': 'application/json;utf=8', + date: new Date().toISOString(), + connection: 'keep-alive', + 'content-length': '17', + 'x-elastic-product': 'Elasticsearch' + } + process.nextTick(resolve, { body, statusCode, headers }) + }) + } +} + +export class MockConnectionTimeout extends BaseConnection { + async request (params: ConnectionRequestParams, options: ConnectionRequestOptions): Promise + async request (params: ConnectionRequestParams, options: ConnectionRequestOptionsAsStream): Promise + async request (params: ConnectionRequestParams, options: any): Promise { + return new Promise((resolve, reject) => { + process.nextTick(reject, new TimeoutError('Request timed out')) + }) + } +} + +export class MockConnectionError extends BaseConnection { + async request (params: ConnectionRequestParams, options: ConnectionRequestOptions): Promise + async request (params: ConnectionRequestParams, options: ConnectionRequestOptionsAsStream): Promise + async request (params: ConnectionRequestParams, options: any): Promise { + return new Promise((resolve, reject) => { + process.nextTick(reject, new ConnectionError('kaboom')) + }) + } +} + +export class MockConnectionSniff extends BaseConnection { + async request (params: ConnectionRequestParams, options: ConnectionRequestOptions): Promise + async request (params: ConnectionRequestParams, options: ConnectionRequestOptionsAsStream): Promise + async request (params: ConnectionRequestParams, options: any): Promise { + return new Promise((resolve, reject) => { + const sniffResult = { + nodes: { + 'node-1': { + http: { + publish_address: 'localhost:9200' + } + }, + 'node-2': { + http: { + publish_address: 'localhost:9201' + } + } + } + } + const body = JSON.stringify(sniffResult) + const statusCode = setStatusCode(params.path) + const headers = { + 'content-type': 'application/json;utf=8', + date: new Date().toISOString(), + connection: 'keep-alive', + 'content-length': '191', + 'x-elastic-product': 'Elasticsearch' + } + if (params.headers?.timeout != null) { + process.nextTick(reject, new TimeoutError('Request timed out')) + } else { + process.nextTick(resolve, { body, statusCode, headers }) + } + }) + } +} + +interface onRequestMock { + onRequest(opts: ConnectionRequestParams): { body: any, statusCode?: number, headers?: http.IncomingHttpHeaders } +} +export function buildMockConnection (opts: onRequestMock) { + assert(opts.onRequest, 'Missing required onRequest option') + + class MockConnection extends BaseConnection { + async request (params: ConnectionRequestParams, options: ConnectionRequestOptions): Promise + async request (params: ConnectionRequestParams, options: ConnectionRequestOptionsAsStream): Promise + async request (params: ConnectionRequestParams, options: any): Promise { + return new Promise((resolve, reject) => { + params.headers = { ...this.headers, ...params.headers } + let { body, statusCode, headers } = opts.onRequest(params) + if (typeof body !== 'string' && !(body instanceof Buffer)) { + body = JSON.stringify(body) + } + statusCode = statusCode || 200 + headers = { + 'content-type': 'application/json;utf=8', + date: new Date().toISOString(), + connection: 'keep-alive', + 'content-length': Buffer.byteLength(body) + '', + 'x-elastic-product': 'Elasticsearch', + ...headers + } + process.nextTick(resolve, { body, statusCode, headers }) + }) + } + } + + return MockConnection +} + +function setStatusCode (path: string): number { + const statusCode = Number(path.slice(1)) + if (Number.isInteger(statusCode)) { + return statusCode + } + return 200 +} diff --git a/test/utils/buildCluster.js b/test/utils/buildCluster.js deleted file mode 100644 index f30c1c241..000000000 --- a/test/utils/buildCluster.js +++ /dev/null @@ -1,95 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -const debug = require('debug')('elasticsearch-test') -const workq = require('workq') -const buildServer = require('./buildServer') - -var id = 0 -function buildCluster (options, callback) { - const clusterId = id++ - debug(`Booting cluster '${clusterId}'`) - if (typeof options === 'function') { - callback = options - options = {} - } - - const q = workq() - const nodes = {} - const sniffResult = { nodes: {} } - - options.numberOfNodes = options.numberOfNodes || 4 - for (var i = 0; i < options.numberOfNodes; i++) { - q.add(bootNode, { id: `node${i}` }) - } - - function bootNode (q, opts, done) { - function handler (req, res) { - res.setHeader('content-type', 'application/json') - if (req.url === '/_nodes/_all/http') { - res.end(JSON.stringify(sniffResult)) - } else { - res.end(JSON.stringify({ hello: 'world' })) - } - } - - buildServer(options.handler || handler, ({ port }, server) => { - nodes[opts.id] = { - url: `http://127.0.0.1:${port}`, - server - } - sniffResult.nodes[opts.id] = { - http: { - publish_address: options.hostPublishAddress - ? `localhost/127.0.0.1:${port}` - : `127.0.0.1:${port}` - }, - roles: ['master', 'data', 'ingest'] - } - debug(`Booted cluster node '${opts.id}' on port ${port} (cluster id: '${clusterId}')`) - done() - }) - } - - function shutdown () { - debug(`Shutting down cluster '${clusterId}'`) - for (const id in nodes) { - kill(id) - } - } - - function kill (id, callback) { - debug(`Shutting down cluster node '${id}' (cluster id: '${clusterId}')`) - const node = nodes[id] - delete nodes[id] - delete sniffResult.nodes[id] - node.server.stop(callback) - } - - function spawn (id, callback) { - debug(`Spawning cluster node '${id}' (cluster id: '${clusterId}')`) - q.add(bootNode, { id }) - q.add((q, done) => { - callback() - done() - }) - } - - const cluster = { - nodes, - shutdown, - kill, - spawn - } - - q.drain(done => { - debug(`Cluster '${clusterId}' booted with ${options.numberOfNodes} nodes`) - callback(cluster) - done() - }) -} - -module.exports = buildCluster diff --git a/test/utils/buildCluster.ts b/test/utils/buildCluster.ts new file mode 100644 index 000000000..5b101f757 --- /dev/null +++ b/test/utils/buildCluster.ts @@ -0,0 +1,105 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +import Debug from 'debug' +import * as http from 'node:http' +import buildServer, { ServerHandler } from './buildServer' +import { StoppableServer } from 'stoppable' + +interface BuildClusterOptions { + numberOfNodes?: number + handler?: ServerHandler + hostPublishAddress?: boolean +} + +interface Node { + url: string + server: StoppableServer +} + +interface Cluster { + nodes: Record, + shutdown(): Promise, + kill(id: string): Promise, + spawn(id: string): Promise +} + +interface SniffNode { + http: { + publish_address: string + }, + roles: string[] +} + +type SniffResult = Record + +const debug = Debug('elasticsearch-test') +let id = 0 +export default async function buildCluster (options: BuildClusterOptions): Promise { + const clusterId = id++ + debug(`Booting cluster '${clusterId}'`) + + const cluster: Cluster = { + nodes: {}, + shutdown, + kill, + spawn + } + + options.numberOfNodes = options.numberOfNodes || 4 + for (let i = 0; i < options.numberOfNodes; i++) { + await bootNode(`node${i}`) + } + + async function bootNode (id: string): Promise { + const [{ port }, server] = await buildServer(options.handler ?? handler) + cluster.nodes[id] = { + url: `http://127.0.0.1:${port}`, + server + } + } + + function handler (req: http.IncomingMessage, res: http.ServerResponse): void { + res.setHeader('content-type', 'application/json') + if (req.url === '/_nodes/_all/http') { + const sniffResult: SniffResult = Object.keys(cluster.nodes).reduce((acc: SniffResult, val: string) => { + const node = cluster.nodes[val] + acc[val] = { + http: { + publish_address: options.hostPublishAddress + ? `localhost/${node.url}` + : node.url + }, + roles: ['master', 'data', 'ingest'] + } + return acc + }, {}) + res.end(JSON.stringify(sniffResult)) + } else { + res.end(JSON.stringify({ hello: 'world' })) + } + } + + async function shutdown (): Promise { + debug(`Shutting down cluster '${clusterId}'`) + for (const id in cluster.nodes) { + await kill(id) + } + } + + async function kill (id: string): Promise { + debug(`Shutting down cluster node '${id}' (cluster id: '${clusterId}')`) + const node = cluster.nodes[id] + delete cluster.nodes[id] + node.server.stop() + } + + async function spawn (id: string): Promise { + debug(`Spawning cluster node '${id}' (cluster id: '${clusterId}')`) + await bootNode(id) + } + + return cluster +} diff --git a/test/utils/buildProxy.ts b/test/utils/buildProxy.ts new file mode 100644 index 000000000..314a08c47 --- /dev/null +++ b/test/utils/buildProxy.ts @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +// @ts-ignore +import proxy from 'proxy' +import { readFileSync } from 'fs' +import { join } from 'path' +import * as http from 'http' +import * as https from 'https' + +export const ssl = { + key: readFileSync(join(__dirname, '..', 'fixtures', 'https.key')), + cert: readFileSync(join(__dirname, '..', 'fixtures', 'https.cert')) +} + +type AuthenticateFn = (err: Error | null, valid: boolean) => void +interface ProxyServer extends http.Server { + authenticate?(req: http.IncomingMessage, fn: AuthenticateFn): void +} + +export function createProxy (): Promise { + return new Promise((resolve, reject) => { + const server = proxy(http.createServer()) + server.listen(0, '127.0.0.1', () => { + resolve(server) + }) + }) +} + +export function createSecureProxy (): Promise { + return new Promise((resolve, reject) => { + const server = proxy(https.createServer(ssl)) + server.listen(0, '127.0.0.1', () => { + resolve(server) + }) + }) +} + +export function createServer (): Promise { + return new Promise((resolve, reject) => { + const server = http.createServer() + server.listen(0, '127.0.0.1', () => { + resolve(server) + }) + }) +} + +export function createSecureServer (): Promise { + return new Promise((resolve, reject) => { + const server = https.createServer(ssl) + server.listen(0, '127.0.0.1', () => { + resolve(server) + }) + }) +} diff --git a/test/utils/buildServer.js b/test/utils/buildServer.js deleted file mode 100644 index f661509aa..000000000 --- a/test/utils/buildServer.js +++ /dev/null @@ -1,58 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -const debug = require('debug')('elasticsearch-test') -const stoppable = require('stoppable') - -// allow self signed certificates for testing purposes -process.env.NODE_TLS_REJECT_UNAUTHORIZED = 0 - -const { readFileSync } = require('fs') -const { join } = require('path') -const https = require('https') -const http = require('http') - -const secureOpts = { - key: readFileSync(join(__dirname, '..', 'fixtures', 'https.key'), 'utf8'), - cert: readFileSync(join(__dirname, '..', 'fixtures', 'https.cert'), 'utf8') -} - -var id = 0 -function buildServer (handler, opts, cb) { - const serverId = id++ - debug(`Booting server '${serverId}'`) - if (cb == null) { - cb = opts - opts = {} - } - - const server = opts.secure - ? stoppable(https.createServer(secureOpts)) - : stoppable(http.createServer()) - - server.on('request', handler) - server.on('error', err => { - console.log('http server error', err) - process.exit(1) - }) - if (cb === undefined) { - return new Promise((resolve, reject) => { - server.listen(0, () => { - const port = server.address().port - debug(`Server '${serverId}' booted on port ${port}`) - resolve([Object.assign({}, secureOpts, { port }), server]) - }) - }) - } else { - server.listen(0, () => { - const port = server.address().port - debug(`Server '${serverId}' booted on port ${port}`) - cb(Object.assign({}, secureOpts, { port }), server) - }) - } -} - -module.exports = buildServer diff --git a/test/utils/buildServer.ts b/test/utils/buildServer.ts new file mode 100644 index 000000000..c2fcfc065 --- /dev/null +++ b/test/utils/buildServer.ts @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +import { readFileSync } from 'fs' +import crypto from 'crypto' +import { join } from 'path' +import https from 'https' +import http from 'http' +import Debug from 'debug' +import stoppable, { StoppableServer } from 'stoppable' + +const debug = Debug('elasticsearch-test') + +// allow self signed certificates for testing purposes +process.env.NODE_TLS_REJECT_UNAUTHORIZED = '0' + +const secureOpts = { + key: readFileSync(join(__dirname, '..', 'fixtures', 'https.key'), 'utf8'), + cert: readFileSync(join(__dirname, '..', 'fixtures', 'https.cert'), 'utf8') +} + +const caFingerprint = getFingerprint(secureOpts.cert + .split('\n') + .slice(1, -1) + .map(line => line.trim()) + .join('') +) + +export type ServerHandler = (req: http.IncomingMessage, res: http.ServerResponse) => void +interface Options { secure?: boolean } +type Server = [{ key: string, cert: string, port: number, caFingerprint: string }, StoppableServer] + +let id = 0 +export default function buildServer (handler: ServerHandler, opts: Options = {}): Promise { + const serverId = id++ + debug(`Booting server '${serverId}'`) + + const server = opts.secure + ? stoppable(https.createServer(secureOpts)) + : stoppable(http.createServer()) + + server.on('request', (req, res) => { + res.setHeader('x-elastic-product', 'Elasticsearch') + handler(req, res) + }) + + server.on('error', err => { + console.log('http server error', err) + process.exit(1) + }) + + return new Promise((resolve, reject) => { + server.listen(0, () => { + // @ts-expect-error + const port = server.address().port + debug(`Server '${serverId}' booted on port ${port}`) + resolve([Object.assign({}, secureOpts, { port, caFingerprint }), server]) + }) + }) +} + +function getFingerprint (content: string, inputEncoding = 'base64', outputEncoding = 'hex'): string { + const shasum = crypto.createHash('sha256') + // @ts-expect-error + shasum.update(content, inputEncoding) + // @ts-expect-error + const res = shasum.digest(outputEncoding) + const arr = res.toUpperCase().match(/.{1,2}/g) + if (arr == null) { + throw new Error('Should produce a match') + } + return arr.join(':') +} diff --git a/test/utils/index.js b/test/utils/index.js deleted file mode 100644 index 3b34c7fc4..000000000 --- a/test/utils/index.js +++ /dev/null @@ -1,33 +0,0 @@ -// Licensed to Elasticsearch B.V under one or more agreements. -// Elasticsearch B.V licenses this file to you under the Apache 2.0 License. -// See the LICENSE file in the project root for more information - -'use strict' - -const { promisify } = require('util') -const sleep = promisify(setTimeout) -const buildServer = require('./buildServer') -const buildCluster = require('./buildCluster') -const connection = require('./MockConnection') - -async function waitCluster (client, waitForStatus = 'green', timeout = '50s', times = 0) { - if (!client) { - throw new Error('waitCluster helper: missing client instance') - } - try { - await client.cluster.health({ waitForStatus, timeout }) - } catch (err) { - if (++times < 10) { - await sleep(5000) - return waitCluster(client, waitForStatus, timeout, times) - } - throw err - } -} - -module.exports = { - buildServer, - buildCluster, - connection, - waitCluster -} diff --git a/test/utils/index.ts b/test/utils/index.ts new file mode 100644 index 000000000..6b74fa033 --- /dev/null +++ b/test/utils/index.ts @@ -0,0 +1,16 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +import buildServer from './buildServer' +import * as connection from './MockConnection' +import buildCluster from './buildCluster' +import * as buildProxy from './buildProxy' + +export { + buildServer, + connection, + buildCluster, + buildProxy +} diff --git a/tsconfig.json b/tsconfig.json new file mode 100644 index 000000000..a7d7a1352 --- /dev/null +++ b/tsconfig.json @@ -0,0 +1,38 @@ +{ + "compilerOptions": { + "target": "ES2019", + "module": "commonjs", + "moduleResolution": "node", + "declaration": true, + "pretty": true, + "noEmitOnError": true, + "strict": true, + "resolveJsonModule": true, + "removeComments": false, + "sourceMap": true, + "newLine": "lf", + "noUnusedLocals": true, + "noFallthroughCasesInSwitch": true, + "useDefineForClassFields": true, + "forceConsistentCasingInFileNames": true, + "skipLibCheck": false, + "esModuleInterop": true, + "isolatedModules": true, + "importHelpers": true, + "outDir": "lib", + "lib": [ + "ES2019", + "dom" + ] + }, + "formatCodeOptions": { + "identSize": 2, + "tabSize": 2 + }, + "exclude": [ + "node_modules" + ], + "include": [ + "./src/**/*.ts" + ] +}